xref: /openbmc/linux/arch/s390/kvm/vsie.c (revision f8b04488b060b155f8b6769aa70412c3630b03f0)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * kvm nested virtualization support for s390x
4   *
5   * Copyright IBM Corp. 2016, 2018
6   *
7   *    Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
8   */
9  #include <linux/vmalloc.h>
10  #include <linux/kvm_host.h>
11  #include <linux/bug.h>
12  #include <linux/list.h>
13  #include <linux/bitmap.h>
14  #include <linux/sched/signal.h>
15  
16  #include <asm/gmap.h>
17  #include <asm/mmu_context.h>
18  #include <asm/sclp.h>
19  #include <asm/nmi.h>
20  #include <asm/dis.h>
21  #include <asm/fpu/api.h>
22  #include "kvm-s390.h"
23  #include "gaccess.h"
24  
25  struct vsie_page {
26  	struct kvm_s390_sie_block scb_s;	/* 0x0000 */
27  	/*
28  	 * the backup info for machine check. ensure it's at
29  	 * the same offset as that in struct sie_page!
30  	 */
31  	struct mcck_volatile_info mcck_info;    /* 0x0200 */
32  	/*
33  	 * The pinned original scb. Be aware that other VCPUs can modify
34  	 * it while we read from it. Values that are used for conditions or
35  	 * are reused conditionally, should be accessed via READ_ONCE.
36  	 */
37  	struct kvm_s390_sie_block *scb_o;	/* 0x0218 */
38  	/* the shadow gmap in use by the vsie_page */
39  	struct gmap *gmap;			/* 0x0220 */
40  	/* address of the last reported fault to guest2 */
41  	unsigned long fault_addr;		/* 0x0228 */
42  	/* calculated guest addresses of satellite control blocks */
43  	gpa_t sca_gpa;				/* 0x0230 */
44  	gpa_t itdba_gpa;			/* 0x0238 */
45  	gpa_t gvrd_gpa;				/* 0x0240 */
46  	gpa_t riccbd_gpa;			/* 0x0248 */
47  	gpa_t sdnx_gpa;				/* 0x0250 */
48  	__u8 reserved[0x0700 - 0x0258];		/* 0x0258 */
49  	struct kvm_s390_crypto_cb crycb;	/* 0x0700 */
50  	__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE];	/* 0x0800 */
51  };
52  
53  /* trigger a validity icpt for the given scb */
54  static int set_validity_icpt(struct kvm_s390_sie_block *scb,
55  			     __u16 reason_code)
56  {
57  	scb->ipa = 0x1000;
58  	scb->ipb = ((__u32) reason_code) << 16;
59  	scb->icptcode = ICPT_VALIDITY;
60  	return 1;
61  }
62  
63  /* mark the prefix as unmapped, this will block the VSIE */
64  static void prefix_unmapped(struct vsie_page *vsie_page)
65  {
66  	atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
67  }
68  
69  /* mark the prefix as unmapped and wait until the VSIE has been left */
70  static void prefix_unmapped_sync(struct vsie_page *vsie_page)
71  {
72  	prefix_unmapped(vsie_page);
73  	if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
74  		atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
75  	while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
76  		cpu_relax();
77  }
78  
79  /* mark the prefix as mapped, this will allow the VSIE to run */
80  static void prefix_mapped(struct vsie_page *vsie_page)
81  {
82  	atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
83  }
84  
85  /* test if the prefix is mapped into the gmap shadow */
86  static int prefix_is_mapped(struct vsie_page *vsie_page)
87  {
88  	return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
89  }
90  
91  /* copy the updated intervention request bits into the shadow scb */
92  static void update_intervention_requests(struct vsie_page *vsie_page)
93  {
94  	const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
95  	int cpuflags;
96  
97  	cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
98  	atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
99  	atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
100  }
101  
102  /* shadow (filter and validate) the cpuflags  */
103  static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
104  {
105  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
106  	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
107  	int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
108  
109  	/* we don't allow ESA/390 guests */
110  	if (!(cpuflags & CPUSTAT_ZARCH))
111  		return set_validity_icpt(scb_s, 0x0001U);
112  
113  	if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
114  		return set_validity_icpt(scb_s, 0x0001U);
115  	else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
116  		return set_validity_icpt(scb_s, 0x0007U);
117  
118  	/* intervention requests will be set later */
119  	newflags = CPUSTAT_ZARCH;
120  	if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
121  		newflags |= CPUSTAT_GED;
122  	if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
123  		if (cpuflags & CPUSTAT_GED)
124  			return set_validity_icpt(scb_s, 0x0001U);
125  		newflags |= CPUSTAT_GED2;
126  	}
127  	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
128  		newflags |= cpuflags & CPUSTAT_P;
129  	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
130  		newflags |= cpuflags & CPUSTAT_SM;
131  	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
132  		newflags |= cpuflags & CPUSTAT_IBS;
133  	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
134  		newflags |= cpuflags & CPUSTAT_KSS;
135  
136  	atomic_set(&scb_s->cpuflags, newflags);
137  	return 0;
138  }
139  /* Copy to APCB FORMAT1 from APCB FORMAT0 */
140  static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
141  			unsigned long crycb_gpa, struct kvm_s390_apcb1 *apcb_h)
142  {
143  	struct kvm_s390_apcb0 tmp;
144  	unsigned long apcb_gpa;
145  
146  	apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
147  
148  	if (read_guest_real(vcpu, apcb_gpa, &tmp,
149  			    sizeof(struct kvm_s390_apcb0)))
150  		return -EFAULT;
151  
152  	apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
153  	apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
154  	apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
155  
156  	return 0;
157  
158  }
159  
160  /**
161   * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
162   * @vcpu: pointer to the virtual CPU
163   * @apcb_s: pointer to start of apcb in the shadow crycb
164   * @crycb_gpa: guest physical address to start of original guest crycb
165   * @apcb_h: pointer to start of apcb in the guest1
166   *
167   * Returns 0 and -EFAULT on error reading guest apcb
168   */
169  static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
170  			unsigned long crycb_gpa, unsigned long *apcb_h)
171  {
172  	unsigned long apcb_gpa;
173  
174  	apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
175  
176  	if (read_guest_real(vcpu, apcb_gpa, apcb_s,
177  			    sizeof(struct kvm_s390_apcb0)))
178  		return -EFAULT;
179  
180  	bitmap_and(apcb_s, apcb_s, apcb_h,
181  		   BITS_PER_BYTE * sizeof(struct kvm_s390_apcb0));
182  
183  	return 0;
184  }
185  
186  /**
187   * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
188   * @vcpu: pointer to the virtual CPU
189   * @apcb_s: pointer to start of apcb in the shadow crycb
190   * @crycb_gpa: guest physical address to start of original guest crycb
191   * @apcb_h: pointer to start of apcb in the host
192   *
193   * Returns 0 and -EFAULT on error reading guest apcb
194   */
195  static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
196  			unsigned long crycb_gpa,
197  			unsigned long *apcb_h)
198  {
199  	unsigned long apcb_gpa;
200  
201  	apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb1);
202  
203  	if (read_guest_real(vcpu, apcb_gpa, apcb_s,
204  			    sizeof(struct kvm_s390_apcb1)))
205  		return -EFAULT;
206  
207  	bitmap_and(apcb_s, apcb_s, apcb_h,
208  		   BITS_PER_BYTE * sizeof(struct kvm_s390_apcb1));
209  
210  	return 0;
211  }
212  
213  /**
214   * setup_apcb - Create a shadow copy of the apcb.
215   * @vcpu: pointer to the virtual CPU
216   * @crycb_s: pointer to shadow crycb
217   * @crycb_gpa: guest physical address of original guest crycb
218   * @crycb_h: pointer to the host crycb
219   * @fmt_o: format of the original guest crycb.
220   * @fmt_h: format of the host crycb.
221   *
222   * Checks the compatibility between the guest and host crycb and calls the
223   * appropriate copy function.
224   *
225   * Return 0 or an error number if the guest and host crycb are incompatible.
226   */
227  static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
228  	       const u32 crycb_gpa,
229  	       struct kvm_s390_crypto_cb *crycb_h,
230  	       int fmt_o, int fmt_h)
231  {
232  	switch (fmt_o) {
233  	case CRYCB_FORMAT2:
234  		if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 256) & PAGE_MASK))
235  			return -EACCES;
236  		if (fmt_h != CRYCB_FORMAT2)
237  			return -EINVAL;
238  		return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
239  				    crycb_gpa,
240  				    (unsigned long *)&crycb_h->apcb1);
241  	case CRYCB_FORMAT1:
242  		switch (fmt_h) {
243  		case CRYCB_FORMAT2:
244  			return setup_apcb10(vcpu, &crycb_s->apcb1,
245  					    crycb_gpa,
246  					    &crycb_h->apcb1);
247  		case CRYCB_FORMAT1:
248  			return setup_apcb00(vcpu,
249  					    (unsigned long *) &crycb_s->apcb0,
250  					    crycb_gpa,
251  					    (unsigned long *) &crycb_h->apcb0);
252  		}
253  		break;
254  	case CRYCB_FORMAT0:
255  		if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 32) & PAGE_MASK))
256  			return -EACCES;
257  
258  		switch (fmt_h) {
259  		case CRYCB_FORMAT2:
260  			return setup_apcb10(vcpu, &crycb_s->apcb1,
261  					    crycb_gpa,
262  					    &crycb_h->apcb1);
263  		case CRYCB_FORMAT1:
264  		case CRYCB_FORMAT0:
265  			return setup_apcb00(vcpu,
266  					    (unsigned long *) &crycb_s->apcb0,
267  					    crycb_gpa,
268  					    (unsigned long *) &crycb_h->apcb0);
269  		}
270  	}
271  	return -EINVAL;
272  }
273  
274  /**
275   * shadow_crycb - Create a shadow copy of the crycb block
276   * @vcpu: a pointer to the virtual CPU
277   * @vsie_page: a pointer to internal date used for the vSIE
278   *
279   * Create a shadow copy of the crycb block and setup key wrapping, if
280   * requested for guest 3 and enabled for guest 2.
281   *
282   * We accept format-1 or format-2, but we convert format-1 into format-2
283   * in the shadow CRYCB.
284   * Using format-2 enables the firmware to choose the right format when
285   * scheduling the SIE.
286   * There is nothing to do for format-0.
287   *
288   * This function centralize the issuing of set_validity_icpt() for all
289   * the subfunctions working on the crycb.
290   *
291   * Returns: - 0 if shadowed or nothing to do
292   *          - > 0 if control has to be given to guest 2
293   */
294  static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
295  {
296  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
297  	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
298  	const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
299  	const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
300  	unsigned long *b1, *b2;
301  	u8 ecb3_flags;
302  	u32 ecd_flags;
303  	int apie_h;
304  	int apie_s;
305  	int key_msk = test_kvm_facility(vcpu->kvm, 76);
306  	int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
307  	int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
308  	int ret = 0;
309  
310  	scb_s->crycbd = 0;
311  
312  	apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
313  	apie_s = apie_h & scb_o->eca;
314  	if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0)))
315  		return 0;
316  
317  	if (!crycb_addr)
318  		return set_validity_icpt(scb_s, 0x0039U);
319  
320  	if (fmt_o == CRYCB_FORMAT1)
321  		if ((crycb_addr & PAGE_MASK) !=
322  		    ((crycb_addr + 128) & PAGE_MASK))
323  			return set_validity_icpt(scb_s, 0x003CU);
324  
325  	if (apie_s) {
326  		ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
327  				 vcpu->kvm->arch.crypto.crycb,
328  				 fmt_o, fmt_h);
329  		if (ret)
330  			goto end;
331  		scb_s->eca |= scb_o->eca & ECA_APIE;
332  	}
333  
334  	/* we may only allow it if enabled for guest 2 */
335  	ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
336  		     (ECB3_AES | ECB3_DEA);
337  	ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC;
338  	if (!ecb3_flags && !ecd_flags)
339  		goto end;
340  
341  	/* copy only the wrapping keys */
342  	if (read_guest_real(vcpu, crycb_addr + 72,
343  			    vsie_page->crycb.dea_wrapping_key_mask, 56))
344  		return set_validity_icpt(scb_s, 0x0035U);
345  
346  	scb_s->ecb3 |= ecb3_flags;
347  	scb_s->ecd |= ecd_flags;
348  
349  	/* xor both blocks in one run */
350  	b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
351  	b2 = (unsigned long *)
352  			    vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
353  	/* as 56%8 == 0, bitmap_xor won't overwrite any data */
354  	bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
355  end:
356  	switch (ret) {
357  	case -EINVAL:
358  		return set_validity_icpt(scb_s, 0x0022U);
359  	case -EFAULT:
360  		return set_validity_icpt(scb_s, 0x0035U);
361  	case -EACCES:
362  		return set_validity_icpt(scb_s, 0x003CU);
363  	}
364  	scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
365  	return 0;
366  }
367  
368  /* shadow (round up/down) the ibc to avoid validity icpt */
369  static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
370  {
371  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
372  	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
373  	/* READ_ONCE does not work on bitfields - use a temporary variable */
374  	const uint32_t __new_ibc = scb_o->ibc;
375  	const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
376  	__u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
377  
378  	scb_s->ibc = 0;
379  	/* ibc installed in g2 and requested for g3 */
380  	if (vcpu->kvm->arch.model.ibc && new_ibc) {
381  		scb_s->ibc = new_ibc;
382  		/* takte care of the minimum ibc level of the machine */
383  		if (scb_s->ibc < min_ibc)
384  			scb_s->ibc = min_ibc;
385  		/* take care of the maximum ibc level set for the guest */
386  		if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
387  			scb_s->ibc = vcpu->kvm->arch.model.ibc;
388  	}
389  }
390  
391  /* unshadow the scb, copying parameters back to the real scb */
392  static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
393  {
394  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
395  	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
396  
397  	/* interception */
398  	scb_o->icptcode = scb_s->icptcode;
399  	scb_o->icptstatus = scb_s->icptstatus;
400  	scb_o->ipa = scb_s->ipa;
401  	scb_o->ipb = scb_s->ipb;
402  	scb_o->gbea = scb_s->gbea;
403  
404  	/* timer */
405  	scb_o->cputm = scb_s->cputm;
406  	scb_o->ckc = scb_s->ckc;
407  	scb_o->todpr = scb_s->todpr;
408  
409  	/* guest state */
410  	scb_o->gpsw = scb_s->gpsw;
411  	scb_o->gg14 = scb_s->gg14;
412  	scb_o->gg15 = scb_s->gg15;
413  	memcpy(scb_o->gcr, scb_s->gcr, 128);
414  	scb_o->pp = scb_s->pp;
415  
416  	/* branch prediction */
417  	if (test_kvm_facility(vcpu->kvm, 82)) {
418  		scb_o->fpf &= ~FPF_BPBC;
419  		scb_o->fpf |= scb_s->fpf & FPF_BPBC;
420  	}
421  
422  	/* interrupt intercept */
423  	switch (scb_s->icptcode) {
424  	case ICPT_PROGI:
425  	case ICPT_INSTPROGI:
426  	case ICPT_EXTINT:
427  		memcpy((void *)((u64)scb_o + 0xc0),
428  		       (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
429  		break;
430  	}
431  
432  	if (scb_s->ihcpu != 0xffffU)
433  		scb_o->ihcpu = scb_s->ihcpu;
434  }
435  
436  /*
437   * Setup the shadow scb by copying and checking the relevant parts of the g2
438   * provided scb.
439   *
440   * Returns: - 0 if the scb has been shadowed
441   *          - > 0 if control has to be given to guest 2
442   */
443  static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
444  {
445  	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
446  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
447  	/* READ_ONCE does not work on bitfields - use a temporary variable */
448  	const uint32_t __new_prefix = scb_o->prefix;
449  	const uint32_t new_prefix = READ_ONCE(__new_prefix);
450  	const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
451  	bool had_tx = scb_s->ecb & ECB_TE;
452  	unsigned long new_mso = 0;
453  	int rc;
454  
455  	/* make sure we don't have any leftovers when reusing the scb */
456  	scb_s->icptcode = 0;
457  	scb_s->eca = 0;
458  	scb_s->ecb = 0;
459  	scb_s->ecb2 = 0;
460  	scb_s->ecb3 = 0;
461  	scb_s->ecd = 0;
462  	scb_s->fac = 0;
463  	scb_s->fpf = 0;
464  
465  	rc = prepare_cpuflags(vcpu, vsie_page);
466  	if (rc)
467  		goto out;
468  
469  	/* timer */
470  	scb_s->cputm = scb_o->cputm;
471  	scb_s->ckc = scb_o->ckc;
472  	scb_s->todpr = scb_o->todpr;
473  	scb_s->epoch = scb_o->epoch;
474  
475  	/* guest state */
476  	scb_s->gpsw = scb_o->gpsw;
477  	scb_s->gg14 = scb_o->gg14;
478  	scb_s->gg15 = scb_o->gg15;
479  	memcpy(scb_s->gcr, scb_o->gcr, 128);
480  	scb_s->pp = scb_o->pp;
481  
482  	/* interception / execution handling */
483  	scb_s->gbea = scb_o->gbea;
484  	scb_s->lctl = scb_o->lctl;
485  	scb_s->svcc = scb_o->svcc;
486  	scb_s->ictl = scb_o->ictl;
487  	/*
488  	 * SKEY handling functions can't deal with false setting of PTE invalid
489  	 * bits. Therefore we cannot provide interpretation and would later
490  	 * have to provide own emulation handlers.
491  	 */
492  	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
493  		scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
494  
495  	scb_s->icpua = scb_o->icpua;
496  
497  	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
498  		new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
499  	/* if the hva of the prefix changes, we have to remap the prefix */
500  	if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
501  		prefix_unmapped(vsie_page);
502  	 /* SIE will do mso/msl validity and exception checks for us */
503  	scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
504  	scb_s->mso = new_mso;
505  	scb_s->prefix = new_prefix;
506  
507  	/* We have to definitely flush the tlb if this scb never ran */
508  	if (scb_s->ihcpu != 0xffffU)
509  		scb_s->ihcpu = scb_o->ihcpu;
510  
511  	/* MVPG and Protection Exception Interpretation are always available */
512  	scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
513  	/* Host-protection-interruption introduced with ESOP */
514  	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
515  		scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
516  	/*
517  	 * CPU Topology
518  	 * This facility only uses the utility field of the SCA and none of
519  	 * the cpu entries that are problematic with the other interpretation
520  	 * facilities so we can pass it through
521  	 */
522  	if (test_kvm_facility(vcpu->kvm, 11))
523  		scb_s->ecb |= scb_o->ecb & ECB_PTF;
524  	/* transactional execution */
525  	if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
526  		/* remap the prefix is tx is toggled on */
527  		if (!had_tx)
528  			prefix_unmapped(vsie_page);
529  		scb_s->ecb |= ECB_TE;
530  	}
531  	/* specification exception interpretation */
532  	scb_s->ecb |= scb_o->ecb & ECB_SPECI;
533  	/* branch prediction */
534  	if (test_kvm_facility(vcpu->kvm, 82))
535  		scb_s->fpf |= scb_o->fpf & FPF_BPBC;
536  	/* SIMD */
537  	if (test_kvm_facility(vcpu->kvm, 129)) {
538  		scb_s->eca |= scb_o->eca & ECA_VX;
539  		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
540  	}
541  	/* Run-time-Instrumentation */
542  	if (test_kvm_facility(vcpu->kvm, 64))
543  		scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
544  	/* Instruction Execution Prevention */
545  	if (test_kvm_facility(vcpu->kvm, 130))
546  		scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
547  	/* Guarded Storage */
548  	if (test_kvm_facility(vcpu->kvm, 133)) {
549  		scb_s->ecb |= scb_o->ecb & ECB_GS;
550  		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
551  	}
552  	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
553  		scb_s->eca |= scb_o->eca & ECA_SII;
554  	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
555  		scb_s->eca |= scb_o->eca & ECA_IB;
556  	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
557  		scb_s->eca |= scb_o->eca & ECA_CEI;
558  	/* Epoch Extension */
559  	if (test_kvm_facility(vcpu->kvm, 139)) {
560  		scb_s->ecd |= scb_o->ecd & ECD_MEF;
561  		scb_s->epdx = scb_o->epdx;
562  	}
563  
564  	/* etoken */
565  	if (test_kvm_facility(vcpu->kvm, 156))
566  		scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
567  
568  	scb_s->hpid = HPID_VSIE;
569  	scb_s->cpnc = scb_o->cpnc;
570  
571  	prepare_ibc(vcpu, vsie_page);
572  	rc = shadow_crycb(vcpu, vsie_page);
573  out:
574  	if (rc)
575  		unshadow_scb(vcpu, vsie_page);
576  	return rc;
577  }
578  
579  void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
580  				 unsigned long end)
581  {
582  	struct kvm *kvm = gmap->private;
583  	struct vsie_page *cur;
584  	unsigned long prefix;
585  	struct page *page;
586  	int i;
587  
588  	if (!gmap_is_shadow(gmap))
589  		return;
590  	/*
591  	 * Only new shadow blocks are added to the list during runtime,
592  	 * therefore we can safely reference them all the time.
593  	 */
594  	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
595  		page = READ_ONCE(kvm->arch.vsie.pages[i]);
596  		if (!page)
597  			continue;
598  		cur = page_to_virt(page);
599  		if (READ_ONCE(cur->gmap) != gmap)
600  			continue;
601  		prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
602  		/* with mso/msl, the prefix lies at an offset */
603  		prefix += cur->scb_s.mso;
604  		if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
605  			prefix_unmapped_sync(cur);
606  	}
607  }
608  
609  /*
610   * Map the first prefix page and if tx is enabled also the second prefix page.
611   *
612   * The prefix will be protected, a gmap notifier will inform about unmaps.
613   * The shadow scb must not be executed until the prefix is remapped, this is
614   * guaranteed by properly handling PROG_REQUEST.
615   *
616   * Returns: - 0 on if successfully mapped or already mapped
617   *          - > 0 if control has to be given to guest 2
618   *          - -EAGAIN if the caller can retry immediately
619   *          - -ENOMEM if out of memory
620   */
621  static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
622  {
623  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
624  	u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
625  	int rc;
626  
627  	if (prefix_is_mapped(vsie_page))
628  		return 0;
629  
630  	/* mark it as mapped so we can catch any concurrent unmappers */
631  	prefix_mapped(vsie_page);
632  
633  	/* with mso/msl, the prefix lies at offset *mso* */
634  	prefix += scb_s->mso;
635  
636  	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
637  	if (!rc && (scb_s->ecb & ECB_TE))
638  		rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
639  					   prefix + PAGE_SIZE, NULL);
640  	/*
641  	 * We don't have to mprotect, we will be called for all unshadows.
642  	 * SIE will detect if protection applies and trigger a validity.
643  	 */
644  	if (rc)
645  		prefix_unmapped(vsie_page);
646  	if (rc > 0 || rc == -EFAULT)
647  		rc = set_validity_icpt(scb_s, 0x0037U);
648  	return rc;
649  }
650  
651  /*
652   * Pin the guest page given by gpa and set hpa to the pinned host address.
653   * Will always be pinned writable.
654   *
655   * Returns: - 0 on success
656   *          - -EINVAL if the gpa is not valid guest storage
657   */
658  static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
659  {
660  	struct page *page;
661  
662  	page = gfn_to_page(kvm, gpa_to_gfn(gpa));
663  	if (is_error_page(page))
664  		return -EINVAL;
665  	*hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK);
666  	return 0;
667  }
668  
669  /* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
670  static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
671  {
672  	kvm_release_pfn_dirty(hpa >> PAGE_SHIFT);
673  	/* mark the page always as dirty for migration */
674  	mark_page_dirty(kvm, gpa_to_gfn(gpa));
675  }
676  
677  /* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
678  static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
679  {
680  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
681  	hpa_t hpa;
682  
683  	hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
684  	if (hpa) {
685  		unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
686  		vsie_page->sca_gpa = 0;
687  		scb_s->scaol = 0;
688  		scb_s->scaoh = 0;
689  	}
690  
691  	hpa = scb_s->itdba;
692  	if (hpa) {
693  		unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
694  		vsie_page->itdba_gpa = 0;
695  		scb_s->itdba = 0;
696  	}
697  
698  	hpa = scb_s->gvrd;
699  	if (hpa) {
700  		unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
701  		vsie_page->gvrd_gpa = 0;
702  		scb_s->gvrd = 0;
703  	}
704  
705  	hpa = scb_s->riccbd;
706  	if (hpa) {
707  		unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
708  		vsie_page->riccbd_gpa = 0;
709  		scb_s->riccbd = 0;
710  	}
711  
712  	hpa = scb_s->sdnxo;
713  	if (hpa) {
714  		unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
715  		vsie_page->sdnx_gpa = 0;
716  		scb_s->sdnxo = 0;
717  	}
718  }
719  
720  /*
721   * Instead of shadowing some blocks, we can simply forward them because the
722   * addresses in the scb are 64 bit long.
723   *
724   * This works as long as the data lies in one page. If blocks ever exceed one
725   * page, we have to fall back to shadowing.
726   *
727   * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
728   * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
729   *
730   * Returns: - 0 if all blocks were pinned.
731   *          - > 0 if control has to be given to guest 2
732   *          - -ENOMEM if out of memory
733   */
734  static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
735  {
736  	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
737  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
738  	hpa_t hpa;
739  	gpa_t gpa;
740  	int rc = 0;
741  
742  	gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
743  	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
744  		gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
745  	if (gpa) {
746  		if (gpa < 2 * PAGE_SIZE)
747  			rc = set_validity_icpt(scb_s, 0x0038U);
748  		else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
749  			rc = set_validity_icpt(scb_s, 0x0011U);
750  		else if ((gpa & PAGE_MASK) !=
751  			 ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
752  			rc = set_validity_icpt(scb_s, 0x003bU);
753  		if (!rc) {
754  			rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
755  			if (rc)
756  				rc = set_validity_icpt(scb_s, 0x0034U);
757  		}
758  		if (rc)
759  			goto unpin;
760  		vsie_page->sca_gpa = gpa;
761  		scb_s->scaoh = (u32)((u64)hpa >> 32);
762  		scb_s->scaol = (u32)(u64)hpa;
763  	}
764  
765  	gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
766  	if (gpa && (scb_s->ecb & ECB_TE)) {
767  		if (gpa < 2 * PAGE_SIZE) {
768  			rc = set_validity_icpt(scb_s, 0x0080U);
769  			goto unpin;
770  		}
771  		/* 256 bytes cannot cross page boundaries */
772  		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
773  		if (rc) {
774  			rc = set_validity_icpt(scb_s, 0x0080U);
775  			goto unpin;
776  		}
777  		vsie_page->itdba_gpa = gpa;
778  		scb_s->itdba = hpa;
779  	}
780  
781  	gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
782  	if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
783  		if (gpa < 2 * PAGE_SIZE) {
784  			rc = set_validity_icpt(scb_s, 0x1310U);
785  			goto unpin;
786  		}
787  		/*
788  		 * 512 bytes vector registers cannot cross page boundaries
789  		 * if this block gets bigger, we have to shadow it.
790  		 */
791  		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
792  		if (rc) {
793  			rc = set_validity_icpt(scb_s, 0x1310U);
794  			goto unpin;
795  		}
796  		vsie_page->gvrd_gpa = gpa;
797  		scb_s->gvrd = hpa;
798  	}
799  
800  	gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
801  	if (gpa && (scb_s->ecb3 & ECB3_RI)) {
802  		if (gpa < 2 * PAGE_SIZE) {
803  			rc = set_validity_icpt(scb_s, 0x0043U);
804  			goto unpin;
805  		}
806  		/* 64 bytes cannot cross page boundaries */
807  		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
808  		if (rc) {
809  			rc = set_validity_icpt(scb_s, 0x0043U);
810  			goto unpin;
811  		}
812  		/* Validity 0x0044 will be checked by SIE */
813  		vsie_page->riccbd_gpa = gpa;
814  		scb_s->riccbd = hpa;
815  	}
816  	if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) ||
817  	    (scb_s->ecd & ECD_ETOKENF)) {
818  		unsigned long sdnxc;
819  
820  		gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
821  		sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
822  		if (!gpa || gpa < 2 * PAGE_SIZE) {
823  			rc = set_validity_icpt(scb_s, 0x10b0U);
824  			goto unpin;
825  		}
826  		if (sdnxc < 6 || sdnxc > 12) {
827  			rc = set_validity_icpt(scb_s, 0x10b1U);
828  			goto unpin;
829  		}
830  		if (gpa & ((1 << sdnxc) - 1)) {
831  			rc = set_validity_icpt(scb_s, 0x10b2U);
832  			goto unpin;
833  		}
834  		/* Due to alignment rules (checked above) this cannot
835  		 * cross page boundaries
836  		 */
837  		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
838  		if (rc) {
839  			rc = set_validity_icpt(scb_s, 0x10b0U);
840  			goto unpin;
841  		}
842  		vsie_page->sdnx_gpa = gpa;
843  		scb_s->sdnxo = hpa | sdnxc;
844  	}
845  	return 0;
846  unpin:
847  	unpin_blocks(vcpu, vsie_page);
848  	return rc;
849  }
850  
851  /* unpin the scb provided by guest 2, marking it as dirty */
852  static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
853  		      gpa_t gpa)
854  {
855  	hpa_t hpa = (hpa_t) vsie_page->scb_o;
856  
857  	if (hpa)
858  		unpin_guest_page(vcpu->kvm, gpa, hpa);
859  	vsie_page->scb_o = NULL;
860  }
861  
862  /*
863   * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
864   *
865   * Returns: - 0 if the scb was pinned.
866   *          - > 0 if control has to be given to guest 2
867   */
868  static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
869  		   gpa_t gpa)
870  {
871  	hpa_t hpa;
872  	int rc;
873  
874  	rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
875  	if (rc) {
876  		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
877  		WARN_ON_ONCE(rc);
878  		return 1;
879  	}
880  	vsie_page->scb_o = phys_to_virt(hpa);
881  	return 0;
882  }
883  
884  /*
885   * Inject a fault into guest 2.
886   *
887   * Returns: - > 0 if control has to be given to guest 2
888   *            < 0 if an error occurred during injection.
889   */
890  static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
891  			bool write_flag)
892  {
893  	struct kvm_s390_pgm_info pgm = {
894  		.code = code,
895  		.trans_exc_code =
896  			/* 0-51: virtual address */
897  			(vaddr & 0xfffffffffffff000UL) |
898  			/* 52-53: store / fetch */
899  			(((unsigned int) !write_flag) + 1) << 10,
900  			/* 62-63: asce id (always primary == 0) */
901  		.exc_access_id = 0, /* always primary */
902  		.op_access_id = 0, /* not MVPG */
903  	};
904  	int rc;
905  
906  	if (code == PGM_PROTECTION)
907  		pgm.trans_exc_code |= 0x4UL;
908  
909  	rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
910  	return rc ? rc : 1;
911  }
912  
913  /*
914   * Handle a fault during vsie execution on a gmap shadow.
915   *
916   * Returns: - 0 if the fault was resolved
917   *          - > 0 if control has to be given to guest 2
918   *          - < 0 if an error occurred
919   */
920  static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
921  {
922  	int rc;
923  
924  	if (current->thread.gmap_int_code == PGM_PROTECTION)
925  		/* we can directly forward all protection exceptions */
926  		return inject_fault(vcpu, PGM_PROTECTION,
927  				    current->thread.gmap_addr, 1);
928  
929  	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
930  				   current->thread.gmap_addr, NULL);
931  	if (rc > 0) {
932  		rc = inject_fault(vcpu, rc,
933  				  current->thread.gmap_addr,
934  				  current->thread.gmap_write_flag);
935  		if (rc >= 0)
936  			vsie_page->fault_addr = current->thread.gmap_addr;
937  	}
938  	return rc;
939  }
940  
941  /*
942   * Retry the previous fault that required guest 2 intervention. This avoids
943   * one superfluous SIE re-entry and direct exit.
944   *
945   * Will ignore any errors. The next SIE fault will do proper fault handling.
946   */
947  static void handle_last_fault(struct kvm_vcpu *vcpu,
948  			      struct vsie_page *vsie_page)
949  {
950  	if (vsie_page->fault_addr)
951  		kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
952  				      vsie_page->fault_addr, NULL);
953  	vsie_page->fault_addr = 0;
954  }
955  
956  static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
957  {
958  	vsie_page->scb_s.icptcode = 0;
959  }
960  
961  /* rewind the psw and clear the vsie icpt, so we can retry execution */
962  static void retry_vsie_icpt(struct vsie_page *vsie_page)
963  {
964  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
965  	int ilen = insn_length(scb_s->ipa >> 8);
966  
967  	/* take care of EXECUTE instructions */
968  	if (scb_s->icptstatus & 1) {
969  		ilen = (scb_s->icptstatus >> 4) & 0x6;
970  		if (!ilen)
971  			ilen = 4;
972  	}
973  	scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
974  	clear_vsie_icpt(vsie_page);
975  }
976  
977  /*
978   * Try to shadow + enable the guest 2 provided facility list.
979   * Retry instruction execution if enabled for and provided by guest 2.
980   *
981   * Returns: - 0 if handled (retry or guest 2 icpt)
982   *          - > 0 if control has to be given to guest 2
983   */
984  static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
985  {
986  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
987  	__u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U;
988  
989  	if (fac && test_kvm_facility(vcpu->kvm, 7)) {
990  		retry_vsie_icpt(vsie_page);
991  		if (read_guest_real(vcpu, fac, &vsie_page->fac,
992  				    sizeof(vsie_page->fac)))
993  			return set_validity_icpt(scb_s, 0x1090U);
994  		scb_s->fac = (__u32)(__u64) &vsie_page->fac;
995  	}
996  	return 0;
997  }
998  
999  /*
1000   * Get a register for a nested guest.
1001   * @vcpu the vcpu of the guest
1002   * @vsie_page the vsie_page for the nested guest
1003   * @reg the register number, the upper 4 bits are ignored.
1004   * returns: the value of the register.
1005   */
1006  static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
1007  {
1008  	/* no need to validate the parameter and/or perform error handling */
1009  	reg &= 0xf;
1010  	switch (reg) {
1011  	case 15:
1012  		return vsie_page->scb_s.gg15;
1013  	case 14:
1014  		return vsie_page->scb_s.gg14;
1015  	default:
1016  		return vcpu->run->s.regs.gprs[reg];
1017  	}
1018  }
1019  
1020  static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1021  {
1022  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1023  	unsigned long pei_dest, pei_src, src, dest, mask, prefix;
1024  	u64 *pei_block = &vsie_page->scb_o->mcic;
1025  	int edat, rc_dest, rc_src;
1026  	union ctlreg0 cr0;
1027  
1028  	cr0.val = vcpu->arch.sie_block->gcr[0];
1029  	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
1030  	mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
1031  	prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
1032  
1033  	dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
1034  	dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
1035  	src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
1036  	src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
1037  
1038  	rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
1039  	rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
1040  	/*
1041  	 * Either everything went well, or something non-critical went wrong
1042  	 * e.g. because of a race. In either case, simply retry.
1043  	 */
1044  	if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
1045  		retry_vsie_icpt(vsie_page);
1046  		return -EAGAIN;
1047  	}
1048  	/* Something more serious went wrong, propagate the error */
1049  	if (rc_dest < 0)
1050  		return rc_dest;
1051  	if (rc_src < 0)
1052  		return rc_src;
1053  
1054  	/* The only possible suppressing exception: just deliver it */
1055  	if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
1056  		clear_vsie_icpt(vsie_page);
1057  		rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
1058  		WARN_ON_ONCE(rc_dest);
1059  		return 1;
1060  	}
1061  
1062  	/*
1063  	 * Forward the PEI intercept to the guest if it was a page fault, or
1064  	 * also for segment and region table faults if EDAT applies.
1065  	 */
1066  	if (edat) {
1067  		rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
1068  		rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
1069  	} else {
1070  		rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
1071  		rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
1072  	}
1073  	if (!rc_dest && !rc_src) {
1074  		pei_block[0] = pei_dest;
1075  		pei_block[1] = pei_src;
1076  		return 1;
1077  	}
1078  
1079  	retry_vsie_icpt(vsie_page);
1080  
1081  	/*
1082  	 * The host has edat, and the guest does not, or it was an ASCE type
1083  	 * exception. The host needs to inject the appropriate DAT interrupts
1084  	 * into the guest.
1085  	 */
1086  	if (rc_dest)
1087  		return inject_fault(vcpu, rc_dest, dest, 1);
1088  	return inject_fault(vcpu, rc_src, src, 0);
1089  }
1090  
1091  /*
1092   * Run the vsie on a shadow scb and a shadow gmap, without any further
1093   * sanity checks, handling SIE faults.
1094   *
1095   * Returns: - 0 everything went fine
1096   *          - > 0 if control has to be given to guest 2
1097   *          - < 0 if an error occurred
1098   */
1099  static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1100  	__releases(vcpu->kvm->srcu)
1101  	__acquires(vcpu->kvm->srcu)
1102  {
1103  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1104  	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
1105  	int guest_bp_isolation;
1106  	int rc = 0;
1107  
1108  	handle_last_fault(vcpu, vsie_page);
1109  
1110  	kvm_vcpu_srcu_read_unlock(vcpu);
1111  
1112  	/* save current guest state of bp isolation override */
1113  	guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
1114  
1115  	/*
1116  	 * The guest is running with BPBC, so we have to force it on for our
1117  	 * nested guest. This is done by enabling BPBC globally, so the BPBC
1118  	 * control in the SCB (which the nested guest can modify) is simply
1119  	 * ignored.
1120  	 */
1121  	if (test_kvm_facility(vcpu->kvm, 82) &&
1122  	    vcpu->arch.sie_block->fpf & FPF_BPBC)
1123  		set_thread_flag(TIF_ISOLATE_BP_GUEST);
1124  
1125  	local_irq_disable();
1126  	guest_enter_irqoff();
1127  	local_irq_enable();
1128  
1129  	/*
1130  	 * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
1131  	 * and VCPU requests also hinder the vSIE from running and lead
1132  	 * to an immediate exit. kvm_s390_vsie_kick() has to be used to
1133  	 * also kick the vSIE.
1134  	 */
1135  	vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
1136  	barrier();
1137  	if (test_cpu_flag(CIF_FPU))
1138  		load_fpu_regs();
1139  	if (!kvm_s390_vcpu_sie_inhibited(vcpu))
1140  		rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
1141  	barrier();
1142  	vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
1143  
1144  	local_irq_disable();
1145  	guest_exit_irqoff();
1146  	local_irq_enable();
1147  
1148  	/* restore guest state for bp isolation override */
1149  	if (!guest_bp_isolation)
1150  		clear_thread_flag(TIF_ISOLATE_BP_GUEST);
1151  
1152  	kvm_vcpu_srcu_read_lock(vcpu);
1153  
1154  	if (rc == -EINTR) {
1155  		VCPU_EVENT(vcpu, 3, "%s", "machine check");
1156  		kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
1157  		return 0;
1158  	}
1159  
1160  	if (rc > 0)
1161  		rc = 0; /* we could still have an icpt */
1162  	else if (rc == -EFAULT)
1163  		return handle_fault(vcpu, vsie_page);
1164  
1165  	switch (scb_s->icptcode) {
1166  	case ICPT_INST:
1167  		if (scb_s->ipa == 0xb2b0)
1168  			rc = handle_stfle(vcpu, vsie_page);
1169  		break;
1170  	case ICPT_STOP:
1171  		/* stop not requested by g2 - must have been a kick */
1172  		if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
1173  			clear_vsie_icpt(vsie_page);
1174  		break;
1175  	case ICPT_VALIDITY:
1176  		if ((scb_s->ipa & 0xf000) != 0xf000)
1177  			scb_s->ipa += 0x1000;
1178  		break;
1179  	case ICPT_PARTEXEC:
1180  		if (scb_s->ipa == 0xb254)
1181  			rc = vsie_handle_mvpg(vcpu, vsie_page);
1182  		break;
1183  	}
1184  	return rc;
1185  }
1186  
1187  static void release_gmap_shadow(struct vsie_page *vsie_page)
1188  {
1189  	if (vsie_page->gmap)
1190  		gmap_put(vsie_page->gmap);
1191  	WRITE_ONCE(vsie_page->gmap, NULL);
1192  	prefix_unmapped(vsie_page);
1193  }
1194  
1195  static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
1196  			       struct vsie_page *vsie_page)
1197  {
1198  	unsigned long asce;
1199  	union ctlreg0 cr0;
1200  	struct gmap *gmap;
1201  	int edat;
1202  
1203  	asce = vcpu->arch.sie_block->gcr[1];
1204  	cr0.val = vcpu->arch.sie_block->gcr[0];
1205  	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
1206  	edat += edat && test_kvm_facility(vcpu->kvm, 78);
1207  
1208  	/*
1209  	 * ASCE or EDAT could have changed since last icpt, or the gmap
1210  	 * we're holding has been unshadowed. If the gmap is still valid,
1211  	 * we can safely reuse it.
1212  	 */
1213  	if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) {
1214  		vcpu->kvm->stat.gmap_shadow_reuse++;
1215  		return 0;
1216  	}
1217  
1218  	/* release the old shadow - if any, and mark the prefix as unmapped */
1219  	release_gmap_shadow(vsie_page);
1220  	gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
1221  	if (IS_ERR(gmap))
1222  		return PTR_ERR(gmap);
1223  	vcpu->kvm->stat.gmap_shadow_create++;
1224  	WRITE_ONCE(vsie_page->gmap, gmap);
1225  	return 0;
1226  }
1227  
1228  /*
1229   * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
1230   */
1231  static void register_shadow_scb(struct kvm_vcpu *vcpu,
1232  				struct vsie_page *vsie_page)
1233  {
1234  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1235  
1236  	WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
1237  	/*
1238  	 * External calls have to lead to a kick of the vcpu and
1239  	 * therefore the vsie -> Simulate Wait state.
1240  	 */
1241  	kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
1242  	/*
1243  	 * We have to adjust the g3 epoch by the g2 epoch. The epoch will
1244  	 * automatically be adjusted on tod clock changes via kvm_sync_clock.
1245  	 */
1246  	preempt_disable();
1247  	scb_s->epoch += vcpu->kvm->arch.epoch;
1248  
1249  	if (scb_s->ecd & ECD_MEF) {
1250  		scb_s->epdx += vcpu->kvm->arch.epdx;
1251  		if (scb_s->epoch < vcpu->kvm->arch.epoch)
1252  			scb_s->epdx += 1;
1253  	}
1254  
1255  	preempt_enable();
1256  }
1257  
1258  /*
1259   * Unregister a shadow scb from a VCPU.
1260   */
1261  static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
1262  {
1263  	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
1264  	WRITE_ONCE(vcpu->arch.vsie_block, NULL);
1265  }
1266  
1267  /*
1268   * Run the vsie on a shadowed scb, managing the gmap shadow, handling
1269   * prefix pages and faults.
1270   *
1271   * Returns: - 0 if no errors occurred
1272   *          - > 0 if control has to be given to guest 2
1273   *          - -ENOMEM if out of memory
1274   */
1275  static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1276  {
1277  	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1278  	int rc = 0;
1279  
1280  	while (1) {
1281  		rc = acquire_gmap_shadow(vcpu, vsie_page);
1282  		if (!rc)
1283  			rc = map_prefix(vcpu, vsie_page);
1284  		if (!rc) {
1285  			gmap_enable(vsie_page->gmap);
1286  			update_intervention_requests(vsie_page);
1287  			rc = do_vsie_run(vcpu, vsie_page);
1288  			gmap_enable(vcpu->arch.gmap);
1289  		}
1290  		atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
1291  
1292  		if (rc == -EAGAIN)
1293  			rc = 0;
1294  		if (rc || scb_s->icptcode || signal_pending(current) ||
1295  		    kvm_s390_vcpu_has_irq(vcpu, 0) ||
1296  		    kvm_s390_vcpu_sie_inhibited(vcpu))
1297  			break;
1298  		cond_resched();
1299  	}
1300  
1301  	if (rc == -EFAULT) {
1302  		/*
1303  		 * Addressing exceptions are always presentes as intercepts.
1304  		 * As addressing exceptions are suppressing and our guest 3 PSW
1305  		 * points at the responsible instruction, we have to
1306  		 * forward the PSW and set the ilc. If we can't read guest 3
1307  		 * instruction, we can use an arbitrary ilc. Let's always use
1308  		 * ilen = 4 for now, so we can avoid reading in guest 3 virtual
1309  		 * memory. (we could also fake the shadow so the hardware
1310  		 * handles it).
1311  		 */
1312  		scb_s->icptcode = ICPT_PROGI;
1313  		scb_s->iprcc = PGM_ADDRESSING;
1314  		scb_s->pgmilc = 4;
1315  		scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
1316  		rc = 1;
1317  	}
1318  	return rc;
1319  }
1320  
1321  /*
1322   * Get or create a vsie page for a scb address.
1323   *
1324   * Returns: - address of a vsie page (cached or new one)
1325   *          - NULL if the same scb address is already used by another VCPU
1326   *          - ERR_PTR(-ENOMEM) if out of memory
1327   */
1328  static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
1329  {
1330  	struct vsie_page *vsie_page;
1331  	struct page *page;
1332  	int nr_vcpus;
1333  
1334  	rcu_read_lock();
1335  	page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
1336  	rcu_read_unlock();
1337  	if (page) {
1338  		if (page_ref_inc_return(page) == 2)
1339  			return page_to_virt(page);
1340  		page_ref_dec(page);
1341  	}
1342  
1343  	/*
1344  	 * We want at least #online_vcpus shadows, so every VCPU can execute
1345  	 * the VSIE in parallel.
1346  	 */
1347  	nr_vcpus = atomic_read(&kvm->online_vcpus);
1348  
1349  	mutex_lock(&kvm->arch.vsie.mutex);
1350  	if (kvm->arch.vsie.page_count < nr_vcpus) {
1351  		page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
1352  		if (!page) {
1353  			mutex_unlock(&kvm->arch.vsie.mutex);
1354  			return ERR_PTR(-ENOMEM);
1355  		}
1356  		page_ref_inc(page);
1357  		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
1358  		kvm->arch.vsie.page_count++;
1359  	} else {
1360  		/* reuse an existing entry that belongs to nobody */
1361  		while (true) {
1362  			page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
1363  			if (page_ref_inc_return(page) == 2)
1364  				break;
1365  			page_ref_dec(page);
1366  			kvm->arch.vsie.next++;
1367  			kvm->arch.vsie.next %= nr_vcpus;
1368  		}
1369  		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
1370  	}
1371  	page->index = addr;
1372  	/* double use of the same address */
1373  	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
1374  		page_ref_dec(page);
1375  		mutex_unlock(&kvm->arch.vsie.mutex);
1376  		return NULL;
1377  	}
1378  	mutex_unlock(&kvm->arch.vsie.mutex);
1379  
1380  	vsie_page = page_to_virt(page);
1381  	memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
1382  	release_gmap_shadow(vsie_page);
1383  	vsie_page->fault_addr = 0;
1384  	vsie_page->scb_s.ihcpu = 0xffffU;
1385  	return vsie_page;
1386  }
1387  
1388  /* put a vsie page acquired via get_vsie_page */
1389  static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
1390  {
1391  	struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
1392  
1393  	page_ref_dec(page);
1394  }
1395  
1396  int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
1397  {
1398  	struct vsie_page *vsie_page;
1399  	unsigned long scb_addr;
1400  	int rc;
1401  
1402  	vcpu->stat.instruction_sie++;
1403  	if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
1404  		return -EOPNOTSUPP;
1405  	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1406  		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1407  
1408  	BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
1409  	scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
1410  
1411  	/* 512 byte alignment */
1412  	if (unlikely(scb_addr & 0x1ffUL))
1413  		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1414  
1415  	if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
1416  	    kvm_s390_vcpu_sie_inhibited(vcpu))
1417  		return 0;
1418  
1419  	vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
1420  	if (IS_ERR(vsie_page))
1421  		return PTR_ERR(vsie_page);
1422  	else if (!vsie_page)
1423  		/* double use of sie control block - simply do nothing */
1424  		return 0;
1425  
1426  	rc = pin_scb(vcpu, vsie_page, scb_addr);
1427  	if (rc)
1428  		goto out_put;
1429  	rc = shadow_scb(vcpu, vsie_page);
1430  	if (rc)
1431  		goto out_unpin_scb;
1432  	rc = pin_blocks(vcpu, vsie_page);
1433  	if (rc)
1434  		goto out_unshadow;
1435  	register_shadow_scb(vcpu, vsie_page);
1436  	rc = vsie_run(vcpu, vsie_page);
1437  	unregister_shadow_scb(vcpu);
1438  	unpin_blocks(vcpu, vsie_page);
1439  out_unshadow:
1440  	unshadow_scb(vcpu, vsie_page);
1441  out_unpin_scb:
1442  	unpin_scb(vcpu, vsie_page, scb_addr);
1443  out_put:
1444  	put_vsie_page(vcpu->kvm, vsie_page);
1445  
1446  	return rc < 0 ? rc : 0;
1447  }
1448  
1449  /* Init the vsie data structures. To be called when a vm is initialized. */
1450  void kvm_s390_vsie_init(struct kvm *kvm)
1451  {
1452  	mutex_init(&kvm->arch.vsie.mutex);
1453  	INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL_ACCOUNT);
1454  }
1455  
1456  /* Destroy the vsie data structures. To be called when a vm is destroyed. */
1457  void kvm_s390_vsie_destroy(struct kvm *kvm)
1458  {
1459  	struct vsie_page *vsie_page;
1460  	struct page *page;
1461  	int i;
1462  
1463  	mutex_lock(&kvm->arch.vsie.mutex);
1464  	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
1465  		page = kvm->arch.vsie.pages[i];
1466  		kvm->arch.vsie.pages[i] = NULL;
1467  		vsie_page = page_to_virt(page);
1468  		release_gmap_shadow(vsie_page);
1469  		/* free the radix tree entry */
1470  		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
1471  		__free_page(page);
1472  	}
1473  	kvm->arch.vsie.page_count = 0;
1474  	mutex_unlock(&kvm->arch.vsie.mutex);
1475  }
1476  
1477  void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
1478  {
1479  	struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
1480  
1481  	/*
1482  	 * Even if the VCPU lets go of the shadow sie block reference, it is
1483  	 * still valid in the cache. So we can safely kick it.
1484  	 */
1485  	if (scb) {
1486  		atomic_or(PROG_BLOCK_SIE, &scb->prog20);
1487  		if (scb->prog0c & PROG_IN_SIE)
1488  			atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
1489  	}
1490  }
1491