xref: /openbmc/linux/arch/mips/kernel/mips-mt.c (revision 49a89efb)
1 /*
2  * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
3  * Copyright (C) 2005 Mips Technologies, Inc
4  */
5 
6 #include <linux/device.h>
7 #include <linux/kallsyms.h>
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/interrupt.h>
12 #include <linux/security.h>
13 
14 #include <asm/cpu.h>
15 #include <asm/processor.h>
16 #include <asm/atomic.h>
17 #include <asm/system.h>
18 #include <asm/hardirq.h>
19 #include <asm/mmu_context.h>
20 #include <asm/smp.h>
21 #include <asm/mipsmtregs.h>
22 #include <asm/r4kcache.h>
23 #include <asm/cacheflush.h>
24 
25 int vpelimit;
26 
27 static int __init maxvpes(char *str)
28 {
29 	get_option(&str, &vpelimit);
30 
31 	return 1;
32 }
33 
34 __setup("maxvpes=", maxvpes);
35 
36 int tclimit;
37 
38 static int __init maxtcs(char *str)
39 {
40 	get_option(&str, &tclimit);
41 
42 	return 1;
43 }
44 
45 __setup("maxtcs=", maxtcs);
46 
47 /*
48  * Dump new MIPS MT state for the core. Does not leave TCs halted.
49  * Takes an argument which taken to be a pre-call MVPControl value.
50  */
51 
52 void mips_mt_regdump(unsigned long mvpctl)
53 {
54 	unsigned long flags;
55 	unsigned long vpflags;
56 	unsigned long mvpconf0;
57 	int nvpe;
58 	int ntc;
59 	int i;
60 	int tc;
61 	unsigned long haltval;
62 	unsigned long tcstatval;
63 #ifdef CONFIG_MIPS_MT_SMTC
64 	void smtc_soft_dump(void);
65 #endif /* CONFIG_MIPT_MT_SMTC */
66 
67 	local_irq_save(flags);
68 	vpflags = dvpe();
69 	printk("=== MIPS MT State Dump ===\n");
70 	printk("-- Global State --\n");
71 	printk("   MVPControl Passed: %08lx\n", mvpctl);
72 	printk("   MVPControl Read: %08lx\n", vpflags);
73 	printk("   MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
74 	nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
75 	ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
76 	printk("-- per-VPE State --\n");
77 	for (i = 0; i < nvpe; i++) {
78 		for (tc = 0; tc < ntc; tc++) {
79 			settc(tc);
80 			if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
81 				printk("  VPE %d\n", i);
82 				printk("   VPEControl : %08lx\n",
83 				       read_vpe_c0_vpecontrol());
84 				printk("   VPEConf0 : %08lx\n",
85 				       read_vpe_c0_vpeconf0());
86 				printk("   VPE%d.Status : %08lx\n",
87 				       i, read_vpe_c0_status());
88 				printk("   VPE%d.EPC : %08lx ",
89 				       i, read_vpe_c0_epc());
90 				print_symbol("%s\n", read_vpe_c0_epc());
91 				printk("   VPE%d.Cause : %08lx\n",
92 				       i, read_vpe_c0_cause());
93 				printk("   VPE%d.Config7 : %08lx\n",
94 				       i, read_vpe_c0_config7());
95 				break; /* Next VPE */
96 			}
97 		}
98 	}
99 	printk("-- per-TC State --\n");
100 	for (tc = 0; tc < ntc; tc++) {
101 		settc(tc);
102 		if (read_tc_c0_tcbind() == read_c0_tcbind()) {
103 			/* Are we dumping ourself?  */
104 			haltval = 0; /* Then we're not halted, and mustn't be */
105 			tcstatval = flags; /* And pre-dump TCStatus is flags */
106 			printk("  TC %d (current TC with VPE EPC above)\n", tc);
107 		} else {
108 			haltval = read_tc_c0_tchalt();
109 			write_tc_c0_tchalt(1);
110 			tcstatval = read_tc_c0_tcstatus();
111 			printk("  TC %d\n", tc);
112 		}
113 		printk("   TCStatus : %08lx\n", tcstatval);
114 		printk("   TCBind : %08lx\n", read_tc_c0_tcbind());
115 		printk("   TCRestart : %08lx ", read_tc_c0_tcrestart());
116 		print_symbol("%s\n", read_tc_c0_tcrestart());
117 		printk("   TCHalt : %08lx\n", haltval);
118 		printk("   TCContext : %08lx\n", read_tc_c0_tccontext());
119 		if (!haltval)
120 			write_tc_c0_tchalt(0);
121 	}
122 #ifdef CONFIG_MIPS_MT_SMTC
123 	smtc_soft_dump();
124 #endif /* CONFIG_MIPT_MT_SMTC */
125 	printk("===========================\n");
126 	evpe(vpflags);
127 	local_irq_restore(flags);
128 }
129 
130 static int mt_opt_norps = 0;
131 static int mt_opt_rpsctl = -1;
132 static int mt_opt_nblsu = -1;
133 static int mt_opt_forceconfig7 = 0;
134 static int mt_opt_config7 = -1;
135 
136 static int __init rps_disable(char *s)
137 {
138 	mt_opt_norps = 1;
139 	return 1;
140 }
141 __setup("norps", rps_disable);
142 
143 static int __init rpsctl_set(char *str)
144 {
145 	get_option(&str, &mt_opt_rpsctl);
146 	return 1;
147 }
148 __setup("rpsctl=", rpsctl_set);
149 
150 static int __init nblsu_set(char *str)
151 {
152 	get_option(&str, &mt_opt_nblsu);
153 	return 1;
154 }
155 __setup("nblsu=", nblsu_set);
156 
157 static int __init config7_set(char *str)
158 {
159 	get_option(&str, &mt_opt_config7);
160 	mt_opt_forceconfig7 = 1;
161 	return 1;
162 }
163 __setup("config7=", config7_set);
164 
165 /* Experimental cache flush control parameters that should go away some day */
166 int mt_protiflush = 0;
167 int mt_protdflush = 0;
168 int mt_n_iflushes = 1;
169 int mt_n_dflushes = 1;
170 
171 static int __init set_protiflush(char *s)
172 {
173 	mt_protiflush = 1;
174 	return 1;
175 }
176 __setup("protiflush", set_protiflush);
177 
178 static int __init set_protdflush(char *s)
179 {
180 	mt_protdflush = 1;
181 	return 1;
182 }
183 __setup("protdflush", set_protdflush);
184 
185 static int __init niflush(char *s)
186 {
187 	get_option(&s, &mt_n_iflushes);
188 	return 1;
189 }
190 __setup("niflush=", niflush);
191 
192 static int __init ndflush(char *s)
193 {
194 	get_option(&s, &mt_n_dflushes);
195 	return 1;
196 }
197 __setup("ndflush=", ndflush);
198 
199 static unsigned int itc_base = 0;
200 
201 static int __init set_itc_base(char *str)
202 {
203 	get_option(&str, &itc_base);
204 	return 1;
205 }
206 
207 __setup("itcbase=", set_itc_base);
208 
209 void mips_mt_set_cpuoptions(void)
210 {
211 	unsigned int oconfig7 = read_c0_config7();
212 	unsigned int nconfig7 = oconfig7;
213 
214 	if (mt_opt_norps) {
215 		printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
216 	}
217 	if (mt_opt_rpsctl >= 0) {
218 		printk("34K return prediction stack override set to %d.\n",
219 			mt_opt_rpsctl);
220 		if (mt_opt_rpsctl)
221 			nconfig7 |= (1 << 2);
222 		else
223 			nconfig7 &= ~(1 << 2);
224 	}
225 	if (mt_opt_nblsu >= 0) {
226 		printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
227 		if (mt_opt_nblsu)
228 			nconfig7 |= (1 << 5);
229 		else
230 			nconfig7 &= ~(1 << 5);
231 	}
232 	if (mt_opt_forceconfig7) {
233 		printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
234 		nconfig7 = mt_opt_config7;
235 	}
236 	if (oconfig7 != nconfig7) {
237 		__asm__ __volatile("sync");
238 		write_c0_config7(nconfig7);
239 		ehb();
240 		printk("Config7: 0x%08x\n", read_c0_config7());
241 	}
242 
243 	/* Report Cache management debug options */
244 	if (mt_protiflush)
245 		printk("I-cache flushes single-threaded\n");
246 	if (mt_protdflush)
247 		printk("D-cache flushes single-threaded\n");
248 	if (mt_n_iflushes != 1)
249 		printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
250 	if (mt_n_dflushes != 1)
251 		printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
252 
253 	if (itc_base != 0) {
254 		/*
255 		 * Configure ITC mapping.  This code is very
256 		 * specific to the 34K core family, which uses
257 		 * a special mode bit ("ITC") in the ErrCtl
258 		 * register to enable access to ITC control
259 		 * registers via cache "tag" operations.
260 		 */
261 		unsigned long ectlval;
262 		unsigned long itcblkgrn;
263 
264 		/* ErrCtl register is known as "ecc" to Linux */
265 		ectlval = read_c0_ecc();
266 		write_c0_ecc(ectlval | (0x1 << 26));
267 		ehb();
268 #define INDEX_0 (0x80000000)
269 #define INDEX_8 (0x80000008)
270 		/* Read "cache tag" for Dcache pseudo-index 8 */
271 		cache_op(Index_Load_Tag_D, INDEX_8);
272 		ehb();
273 		itcblkgrn = read_c0_dtaglo();
274 		itcblkgrn &= 0xfffe0000;
275 		/* Set for 128 byte pitch of ITC cells */
276 		itcblkgrn |= 0x00000c00;
277 		/* Stage in Tag register */
278 		write_c0_dtaglo(itcblkgrn);
279 		ehb();
280 		/* Write out to ITU with CACHE op */
281 		cache_op(Index_Store_Tag_D, INDEX_8);
282 		/* Now set base address, and turn ITC on with 0x1 bit */
283 		write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
284 		ehb();
285 		/* Write out to ITU with CACHE op */
286 		cache_op(Index_Store_Tag_D, INDEX_0);
287 		write_c0_ecc(ectlval);
288 		ehb();
289 		printk("Mapped %ld ITC cells starting at 0x%08x\n",
290 			((itcblkgrn & 0x7fe00000) >> 20), itc_base);
291 	}
292 }
293 
294 /*
295  * Function to protect cache flushes from concurrent execution
296  * depends on MP software model chosen.
297  */
298 
299 void mt_cflush_lockdown(void)
300 {
301 #ifdef CONFIG_MIPS_MT_SMTC
302 	void smtc_cflush_lockdown(void);
303 
304 	smtc_cflush_lockdown();
305 #endif /* CONFIG_MIPS_MT_SMTC */
306 	/* FILL IN VSMP and AP/SP VERSIONS HERE */
307 }
308 
309 void mt_cflush_release(void)
310 {
311 #ifdef CONFIG_MIPS_MT_SMTC
312 	void smtc_cflush_release(void);
313 
314 	smtc_cflush_release();
315 #endif /* CONFIG_MIPS_MT_SMTC */
316 	/* FILL IN VSMP and AP/SP VERSIONS HERE */
317 }
318 
319 struct class *mt_class;
320 
321 static int __init mt_init(void)
322 {
323 	struct class *mtc;
324 
325 	mtc = class_create(THIS_MODULE, "mt");
326 	if (IS_ERR(mtc))
327 		return PTR_ERR(mtc);
328 
329 	mt_class = mtc;
330 
331 	return 0;
332 }
333 
334 subsys_initcall(mt_init);
335