xref: /openbmc/linux/arch/powerpc/kvm/e500_emulate.c (revision 03ab8e6297acd1bc0eedaa050e2a1635c576fd11)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4   *
5   * Author: Yu Liu, <yu.liu@freescale.com>
6   *
7   * Description:
8   * This file is derived from arch/powerpc/kvm/44x_emulate.c,
9   * by Hollis Blanchard <hollisb@us.ibm.com>.
10   */
11  
12  #include <asm/kvm_ppc.h>
13  #include <asm/disassemble.h>
14  #include <asm/dbell.h>
15  #include <asm/reg_booke.h>
16  
17  #include "booke.h"
18  #include "e500.h"
19  
20  #define XOP_DCBTLS  166
21  #define XOP_MSGSND  206
22  #define XOP_MSGCLR  238
23  #define XOP_MFTMR   366
24  #define XOP_TLBIVAX 786
25  #define XOP_TLBSX   914
26  #define XOP_TLBRE   946
27  #define XOP_TLBWE   978
28  #define XOP_TLBILX  18
29  #define XOP_EHPRIV  270
30  
31  #ifdef CONFIG_KVM_E500MC
dbell2prio(ulong param)32  static int dbell2prio(ulong param)
33  {
34  	int msg = param & PPC_DBELL_TYPE_MASK;
35  	int prio = -1;
36  
37  	switch (msg) {
38  	case PPC_DBELL_TYPE(PPC_DBELL):
39  		prio = BOOKE_IRQPRIO_DBELL;
40  		break;
41  	case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
42  		prio = BOOKE_IRQPRIO_DBELL_CRIT;
43  		break;
44  	default:
45  		break;
46  	}
47  
48  	return prio;
49  }
50  
kvmppc_e500_emul_msgclr(struct kvm_vcpu * vcpu,int rb)51  static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
52  {
53  	ulong param = vcpu->arch.regs.gpr[rb];
54  	int prio = dbell2prio(param);
55  
56  	if (prio < 0)
57  		return EMULATE_FAIL;
58  
59  	clear_bit(prio, &vcpu->arch.pending_exceptions);
60  	return EMULATE_DONE;
61  }
62  
kvmppc_e500_emul_msgsnd(struct kvm_vcpu * vcpu,int rb)63  static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
64  {
65  	ulong param = vcpu->arch.regs.gpr[rb];
66  	int prio = dbell2prio(rb);
67  	int pir = param & PPC_DBELL_PIR_MASK;
68  	unsigned long i;
69  	struct kvm_vcpu *cvcpu;
70  
71  	if (prio < 0)
72  		return EMULATE_FAIL;
73  
74  	kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
75  		int cpir = cvcpu->arch.shared->pir;
76  		if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
77  			set_bit(prio, &cvcpu->arch.pending_exceptions);
78  			kvm_vcpu_kick(cvcpu);
79  		}
80  	}
81  
82  	return EMULATE_DONE;
83  }
84  #endif
85  
kvmppc_e500_emul_ehpriv(struct kvm_vcpu * vcpu,unsigned int inst,int * advance)86  static int kvmppc_e500_emul_ehpriv(struct kvm_vcpu *vcpu,
87  				   unsigned int inst, int *advance)
88  {
89  	int emulated = EMULATE_DONE;
90  
91  	switch (get_oc(inst)) {
92  	case EHPRIV_OC_DEBUG:
93  		vcpu->run->exit_reason = KVM_EXIT_DEBUG;
94  		vcpu->run->debug.arch.address = vcpu->arch.regs.nip;
95  		vcpu->run->debug.arch.status = 0;
96  		kvmppc_account_exit(vcpu, DEBUG_EXITS);
97  		emulated = EMULATE_EXIT_USER;
98  		*advance = 0;
99  		break;
100  	default:
101  		emulated = EMULATE_FAIL;
102  	}
103  	return emulated;
104  }
105  
kvmppc_e500_emul_dcbtls(struct kvm_vcpu * vcpu)106  static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu)
107  {
108  	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
109  
110  	/* Always fail to lock the cache */
111  	vcpu_e500->l1csr0 |= L1CSR0_CUL;
112  	return EMULATE_DONE;
113  }
114  
kvmppc_e500_emul_mftmr(struct kvm_vcpu * vcpu,unsigned int inst,int rt)115  static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
116  				  int rt)
117  {
118  	/* Expose one thread per vcpu */
119  	if (get_tmrn(inst) == TMRN_TMCFG0) {
120  		kvmppc_set_gpr(vcpu, rt,
121  			       1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT));
122  		return EMULATE_DONE;
123  	}
124  
125  	return EMULATE_FAIL;
126  }
127  
kvmppc_core_emulate_op_e500(struct kvm_vcpu * vcpu,unsigned int inst,int * advance)128  int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu,
129  				unsigned int inst, int *advance)
130  {
131  	int emulated = EMULATE_DONE;
132  	int ra = get_ra(inst);
133  	int rb = get_rb(inst);
134  	int rt = get_rt(inst);
135  	gva_t ea;
136  
137  	switch (get_op(inst)) {
138  	case 31:
139  		switch (get_xop(inst)) {
140  
141  		case XOP_DCBTLS:
142  			emulated = kvmppc_e500_emul_dcbtls(vcpu);
143  			break;
144  
145  #ifdef CONFIG_KVM_E500MC
146  		case XOP_MSGSND:
147  			emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
148  			break;
149  
150  		case XOP_MSGCLR:
151  			emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
152  			break;
153  #endif
154  
155  		case XOP_TLBRE:
156  			emulated = kvmppc_e500_emul_tlbre(vcpu);
157  			break;
158  
159  		case XOP_TLBWE:
160  			emulated = kvmppc_e500_emul_tlbwe(vcpu);
161  			break;
162  
163  		case XOP_TLBSX:
164  			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
165  			emulated = kvmppc_e500_emul_tlbsx(vcpu, ea);
166  			break;
167  
168  		case XOP_TLBILX: {
169  			int type = rt & 0x3;
170  			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
171  			emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea);
172  			break;
173  		}
174  
175  		case XOP_TLBIVAX:
176  			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
177  			emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
178  			break;
179  
180  		case XOP_MFTMR:
181  			emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt);
182  			break;
183  
184  		case XOP_EHPRIV:
185  			emulated = kvmppc_e500_emul_ehpriv(vcpu, inst, advance);
186  			break;
187  
188  		default:
189  			emulated = EMULATE_FAIL;
190  		}
191  
192  		break;
193  
194  	default:
195  		emulated = EMULATE_FAIL;
196  	}
197  
198  	if (emulated == EMULATE_FAIL)
199  		emulated = kvmppc_booke_emulate_op(vcpu, inst, advance);
200  
201  	return emulated;
202  }
203  
kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu * vcpu,int sprn,ulong spr_val)204  int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
205  {
206  	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
207  	int emulated = EMULATE_DONE;
208  
209  	switch (sprn) {
210  #ifndef CONFIG_KVM_BOOKE_HV
211  	case SPRN_PID:
212  		kvmppc_set_pid(vcpu, spr_val);
213  		break;
214  	case SPRN_PID1:
215  		if (spr_val != 0)
216  			return EMULATE_FAIL;
217  		vcpu_e500->pid[1] = spr_val;
218  		break;
219  	case SPRN_PID2:
220  		if (spr_val != 0)
221  			return EMULATE_FAIL;
222  		vcpu_e500->pid[2] = spr_val;
223  		break;
224  	case SPRN_MAS0:
225  		vcpu->arch.shared->mas0 = spr_val;
226  		break;
227  	case SPRN_MAS1:
228  		vcpu->arch.shared->mas1 = spr_val;
229  		break;
230  	case SPRN_MAS2:
231  		vcpu->arch.shared->mas2 = spr_val;
232  		break;
233  	case SPRN_MAS3:
234  		vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
235  		vcpu->arch.shared->mas7_3 |= spr_val;
236  		break;
237  	case SPRN_MAS4:
238  		vcpu->arch.shared->mas4 = spr_val;
239  		break;
240  	case SPRN_MAS6:
241  		vcpu->arch.shared->mas6 = spr_val;
242  		break;
243  	case SPRN_MAS7:
244  		vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
245  		vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
246  		break;
247  #endif
248  	case SPRN_L1CSR0:
249  		vcpu_e500->l1csr0 = spr_val;
250  		vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
251  		break;
252  	case SPRN_L1CSR1:
253  		vcpu_e500->l1csr1 = spr_val;
254  		vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR);
255  		break;
256  	case SPRN_HID0:
257  		vcpu_e500->hid0 = spr_val;
258  		break;
259  	case SPRN_HID1:
260  		vcpu_e500->hid1 = spr_val;
261  		break;
262  
263  	case SPRN_MMUCSR0:
264  		emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
265  				spr_val);
266  		break;
267  
268  	case SPRN_PWRMGTCR0:
269  		/*
270  		 * Guest relies on host power management configurations
271  		 * Treat the request as a general store
272  		 */
273  		vcpu->arch.pwrmgtcr0 = spr_val;
274  		break;
275  
276  	case SPRN_BUCSR:
277  		/*
278  		 * If we are here, it means that we have already flushed the
279  		 * branch predictor, so just return to guest.
280  		 */
281  		break;
282  
283  	/* extra exceptions */
284  #ifdef CONFIG_SPE_POSSIBLE
285  	case SPRN_IVOR32:
286  		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
287  		break;
288  	case SPRN_IVOR33:
289  		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
290  		break;
291  	case SPRN_IVOR34:
292  		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
293  		break;
294  #endif
295  #ifdef CONFIG_ALTIVEC
296  	case SPRN_IVOR32:
297  		vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val;
298  		break;
299  	case SPRN_IVOR33:
300  		vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val;
301  		break;
302  #endif
303  	case SPRN_IVOR35:
304  		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
305  		break;
306  #ifdef CONFIG_KVM_BOOKE_HV
307  	case SPRN_IVOR36:
308  		vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
309  		break;
310  	case SPRN_IVOR37:
311  		vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
312  		break;
313  #endif
314  	default:
315  		emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
316  	}
317  
318  	return emulated;
319  }
320  
kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu * vcpu,int sprn,ulong * spr_val)321  int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
322  {
323  	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
324  	int emulated = EMULATE_DONE;
325  
326  	switch (sprn) {
327  #ifndef CONFIG_KVM_BOOKE_HV
328  	case SPRN_PID:
329  		*spr_val = vcpu_e500->pid[0];
330  		break;
331  	case SPRN_PID1:
332  		*spr_val = vcpu_e500->pid[1];
333  		break;
334  	case SPRN_PID2:
335  		*spr_val = vcpu_e500->pid[2];
336  		break;
337  	case SPRN_MAS0:
338  		*spr_val = vcpu->arch.shared->mas0;
339  		break;
340  	case SPRN_MAS1:
341  		*spr_val = vcpu->arch.shared->mas1;
342  		break;
343  	case SPRN_MAS2:
344  		*spr_val = vcpu->arch.shared->mas2;
345  		break;
346  	case SPRN_MAS3:
347  		*spr_val = (u32)vcpu->arch.shared->mas7_3;
348  		break;
349  	case SPRN_MAS4:
350  		*spr_val = vcpu->arch.shared->mas4;
351  		break;
352  	case SPRN_MAS6:
353  		*spr_val = vcpu->arch.shared->mas6;
354  		break;
355  	case SPRN_MAS7:
356  		*spr_val = vcpu->arch.shared->mas7_3 >> 32;
357  		break;
358  #endif
359  	case SPRN_DECAR:
360  		*spr_val = vcpu->arch.decar;
361  		break;
362  	case SPRN_TLB0CFG:
363  		*spr_val = vcpu->arch.tlbcfg[0];
364  		break;
365  	case SPRN_TLB1CFG:
366  		*spr_val = vcpu->arch.tlbcfg[1];
367  		break;
368  	case SPRN_TLB0PS:
369  		if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
370  			return EMULATE_FAIL;
371  		*spr_val = vcpu->arch.tlbps[0];
372  		break;
373  	case SPRN_TLB1PS:
374  		if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
375  			return EMULATE_FAIL;
376  		*spr_val = vcpu->arch.tlbps[1];
377  		break;
378  	case SPRN_L1CSR0:
379  		*spr_val = vcpu_e500->l1csr0;
380  		break;
381  	case SPRN_L1CSR1:
382  		*spr_val = vcpu_e500->l1csr1;
383  		break;
384  	case SPRN_HID0:
385  		*spr_val = vcpu_e500->hid0;
386  		break;
387  	case SPRN_HID1:
388  		*spr_val = vcpu_e500->hid1;
389  		break;
390  	case SPRN_SVR:
391  		*spr_val = vcpu_e500->svr;
392  		break;
393  
394  	case SPRN_MMUCSR0:
395  		*spr_val = 0;
396  		break;
397  
398  	case SPRN_MMUCFG:
399  		*spr_val = vcpu->arch.mmucfg;
400  		break;
401  	case SPRN_EPTCFG:
402  		if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
403  			return EMULATE_FAIL;
404  		/*
405  		 * Legacy Linux guests access EPTCFG register even if the E.PT
406  		 * category is disabled in the VM. Give them a chance to live.
407  		 */
408  		*spr_val = vcpu->arch.eptcfg;
409  		break;
410  
411  	case SPRN_PWRMGTCR0:
412  		*spr_val = vcpu->arch.pwrmgtcr0;
413  		break;
414  
415  	/* extra exceptions */
416  #ifdef CONFIG_SPE_POSSIBLE
417  	case SPRN_IVOR32:
418  		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
419  		break;
420  	case SPRN_IVOR33:
421  		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
422  		break;
423  	case SPRN_IVOR34:
424  		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
425  		break;
426  #endif
427  #ifdef CONFIG_ALTIVEC
428  	case SPRN_IVOR32:
429  		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL];
430  		break;
431  	case SPRN_IVOR33:
432  		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST];
433  		break;
434  #endif
435  	case SPRN_IVOR35:
436  		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
437  		break;
438  #ifdef CONFIG_KVM_BOOKE_HV
439  	case SPRN_IVOR36:
440  		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
441  		break;
442  	case SPRN_IVOR37:
443  		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
444  		break;
445  #endif
446  	default:
447  		emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
448  	}
449  
450  	return emulated;
451  }
452  
453