xref: /openbmc/linux/arch/s390/kvm/intercept.c (revision 1edd0337)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * in-kernel handling for sie intercepts
4  *
5  * Copyright IBM Corp. 2008, 2020
6  *
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  *               Christian Borntraeger <borntraeger@de.ibm.com>
9  */
10 
11 #include <linux/kvm_host.h>
12 #include <linux/errno.h>
13 #include <linux/pagemap.h>
14 
15 #include <asm/asm-offsets.h>
16 #include <asm/irq.h>
17 #include <asm/sysinfo.h>
18 #include <asm/uv.h>
19 
20 #include "kvm-s390.h"
21 #include "gaccess.h"
22 #include "trace.h"
23 #include "trace-s390.h"
24 
25 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
26 {
27 	struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
28 	u8 ilen = 0;
29 
30 	switch (vcpu->arch.sie_block->icptcode) {
31 	case ICPT_INST:
32 	case ICPT_INSTPROGI:
33 	case ICPT_OPEREXC:
34 	case ICPT_PARTEXEC:
35 	case ICPT_IOINST:
36 		/* instruction only stored for these icptcodes */
37 		ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
38 		/* Use the length of the EXECUTE instruction if necessary */
39 		if (sie_block->icptstatus & 1) {
40 			ilen = (sie_block->icptstatus >> 4) & 0x6;
41 			if (!ilen)
42 				ilen = 4;
43 		}
44 		break;
45 	case ICPT_PROGI:
46 		/* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
47 		ilen = vcpu->arch.sie_block->pgmilc & 0x6;
48 		break;
49 	}
50 	return ilen;
51 }
52 
53 static int handle_stop(struct kvm_vcpu *vcpu)
54 {
55 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
56 	int rc = 0;
57 	uint8_t flags, stop_pending;
58 
59 	vcpu->stat.exit_stop_request++;
60 
61 	/* delay the stop if any non-stop irq is pending */
62 	if (kvm_s390_vcpu_has_irq(vcpu, 1))
63 		return 0;
64 
65 	/* avoid races with the injection/SIGP STOP code */
66 	spin_lock(&li->lock);
67 	flags = li->irq.stop.flags;
68 	stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
69 	spin_unlock(&li->lock);
70 
71 	trace_kvm_s390_stop_request(stop_pending, flags);
72 	if (!stop_pending)
73 		return 0;
74 
75 	if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
76 		rc = kvm_s390_vcpu_store_status(vcpu,
77 						KVM_S390_STORE_STATUS_NOADDR);
78 		if (rc)
79 			return rc;
80 	}
81 
82 	/*
83 	 * no need to check the return value of vcpu_stop as it can only have
84 	 * an error for protvirt, but protvirt means user cpu state
85 	 */
86 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
87 		kvm_s390_vcpu_stop(vcpu);
88 	return -EOPNOTSUPP;
89 }
90 
91 static int handle_validity(struct kvm_vcpu *vcpu)
92 {
93 	int viwhy = vcpu->arch.sie_block->ipb >> 16;
94 
95 	vcpu->stat.exit_validity++;
96 	trace_kvm_s390_intercept_validity(vcpu, viwhy);
97 	KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
98 		  current->pid, vcpu->kvm);
99 
100 	/* do not warn on invalid runtime instrumentation mode */
101 	WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
102 		  viwhy);
103 	return -EINVAL;
104 }
105 
106 static int handle_instruction(struct kvm_vcpu *vcpu)
107 {
108 	vcpu->stat.exit_instruction++;
109 	trace_kvm_s390_intercept_instruction(vcpu,
110 					     vcpu->arch.sie_block->ipa,
111 					     vcpu->arch.sie_block->ipb);
112 
113 	switch (vcpu->arch.sie_block->ipa >> 8) {
114 	case 0x01:
115 		return kvm_s390_handle_01(vcpu);
116 	case 0x82:
117 		return kvm_s390_handle_lpsw(vcpu);
118 	case 0x83:
119 		return kvm_s390_handle_diag(vcpu);
120 	case 0xaa:
121 		return kvm_s390_handle_aa(vcpu);
122 	case 0xae:
123 		return kvm_s390_handle_sigp(vcpu);
124 	case 0xb2:
125 		return kvm_s390_handle_b2(vcpu);
126 	case 0xb6:
127 		return kvm_s390_handle_stctl(vcpu);
128 	case 0xb7:
129 		return kvm_s390_handle_lctl(vcpu);
130 	case 0xb9:
131 		return kvm_s390_handle_b9(vcpu);
132 	case 0xe3:
133 		return kvm_s390_handle_e3(vcpu);
134 	case 0xe5:
135 		return kvm_s390_handle_e5(vcpu);
136 	case 0xeb:
137 		return kvm_s390_handle_eb(vcpu);
138 	default:
139 		return -EOPNOTSUPP;
140 	}
141 }
142 
143 static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
144 {
145 	struct kvm_s390_pgm_info pgm_info = {
146 		.code = vcpu->arch.sie_block->iprcc,
147 		/* the PSW has already been rewound */
148 		.flags = KVM_S390_PGM_FLAGS_NO_REWIND,
149 	};
150 
151 	switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
152 	case PGM_AFX_TRANSLATION:
153 	case PGM_ASX_TRANSLATION:
154 	case PGM_EX_TRANSLATION:
155 	case PGM_LFX_TRANSLATION:
156 	case PGM_LSTE_SEQUENCE:
157 	case PGM_LSX_TRANSLATION:
158 	case PGM_LX_TRANSLATION:
159 	case PGM_PRIMARY_AUTHORITY:
160 	case PGM_SECONDARY_AUTHORITY:
161 	case PGM_SPACE_SWITCH:
162 		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
163 		break;
164 	case PGM_ALEN_TRANSLATION:
165 	case PGM_ALE_SEQUENCE:
166 	case PGM_ASTE_INSTANCE:
167 	case PGM_ASTE_SEQUENCE:
168 	case PGM_ASTE_VALIDITY:
169 	case PGM_EXTENDED_AUTHORITY:
170 		pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
171 		break;
172 	case PGM_ASCE_TYPE:
173 	case PGM_PAGE_TRANSLATION:
174 	case PGM_REGION_FIRST_TRANS:
175 	case PGM_REGION_SECOND_TRANS:
176 	case PGM_REGION_THIRD_TRANS:
177 	case PGM_SEGMENT_TRANSLATION:
178 		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
179 		pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
180 		pgm_info.op_access_id  = vcpu->arch.sie_block->oai;
181 		break;
182 	case PGM_MONITOR:
183 		pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
184 		pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
185 		break;
186 	case PGM_VECTOR_PROCESSING:
187 	case PGM_DATA:
188 		pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
189 		break;
190 	case PGM_PROTECTION:
191 		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
192 		pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
193 		break;
194 	default:
195 		break;
196 	}
197 
198 	if (vcpu->arch.sie_block->iprcc & PGM_PER) {
199 		pgm_info.per_code = vcpu->arch.sie_block->perc;
200 		pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
201 		pgm_info.per_address = vcpu->arch.sie_block->peraddr;
202 		pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
203 	}
204 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
205 }
206 
207 /*
208  * restore ITDB to program-interruption TDB in guest lowcore
209  * and set TX abort indication if required
210 */
211 static int handle_itdb(struct kvm_vcpu *vcpu)
212 {
213 	struct kvm_s390_itdb *itdb;
214 	int rc;
215 
216 	if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
217 		return 0;
218 	if (current->thread.per_flags & PER_FLAG_NO_TE)
219 		return 0;
220 	itdb = phys_to_virt(vcpu->arch.sie_block->itdba);
221 	rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
222 	if (rc)
223 		return rc;
224 	memset(itdb, 0, sizeof(*itdb));
225 
226 	return 0;
227 }
228 
229 #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
230 
231 static int handle_prog(struct kvm_vcpu *vcpu)
232 {
233 	psw_t psw;
234 	int rc;
235 
236 	vcpu->stat.exit_program_interruption++;
237 
238 	/*
239 	 * Intercept 8 indicates a loop of specification exceptions
240 	 * for protected guests.
241 	 */
242 	if (kvm_s390_pv_cpu_is_protected(vcpu))
243 		return -EOPNOTSUPP;
244 
245 	if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
246 		rc = kvm_s390_handle_per_event(vcpu);
247 		if (rc)
248 			return rc;
249 		/* the interrupt might have been filtered out completely */
250 		if (vcpu->arch.sie_block->iprcc == 0)
251 			return 0;
252 	}
253 
254 	trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
255 	if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
256 		rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
257 		if (rc)
258 			return rc;
259 		/* Avoid endless loops of specification exceptions */
260 		if (!is_valid_psw(&psw))
261 			return -EOPNOTSUPP;
262 	}
263 	rc = handle_itdb(vcpu);
264 	if (rc)
265 		return rc;
266 
267 	return inject_prog_on_prog_intercept(vcpu);
268 }
269 
270 /**
271  * handle_external_interrupt - used for external interruption interceptions
272  * @vcpu: virtual cpu
273  *
274  * This interception occurs if:
275  * - the CPUSTAT_EXT_INT bit was already set when the external interrupt
276  *   occurred. In this case, the interrupt needs to be injected manually to
277  *   preserve interrupt priority.
278  * - the external new PSW has external interrupts enabled, which will cause an
279  *   interruption loop. We drop to userspace in this case.
280  *
281  * The latter case can be detected by inspecting the external mask bit in the
282  * external new psw.
283  *
284  * Under PV, only the latter case can occur, since interrupt priorities are
285  * handled in the ultravisor.
286  */
287 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
288 {
289 	u16 eic = vcpu->arch.sie_block->eic;
290 	struct kvm_s390_irq irq;
291 	psw_t newpsw;
292 	int rc;
293 
294 	vcpu->stat.exit_external_interrupt++;
295 
296 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
297 		newpsw = vcpu->arch.sie_block->gpsw;
298 	} else {
299 		rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
300 		if (rc)
301 			return rc;
302 	}
303 
304 	/*
305 	 * Clock comparator or timer interrupt with external interrupt enabled
306 	 * will cause interrupt loop. Drop to userspace.
307 	 */
308 	if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
309 	    (newpsw.mask & PSW_MASK_EXT))
310 		return -EOPNOTSUPP;
311 
312 	switch (eic) {
313 	case EXT_IRQ_CLK_COMP:
314 		irq.type = KVM_S390_INT_CLOCK_COMP;
315 		break;
316 	case EXT_IRQ_CPU_TIMER:
317 		irq.type = KVM_S390_INT_CPU_TIMER;
318 		break;
319 	case EXT_IRQ_EXTERNAL_CALL:
320 		irq.type = KVM_S390_INT_EXTERNAL_CALL;
321 		irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
322 		rc = kvm_s390_inject_vcpu(vcpu, &irq);
323 		/* ignore if another external call is already pending */
324 		if (rc == -EBUSY)
325 			return 0;
326 		return rc;
327 	default:
328 		return -EOPNOTSUPP;
329 	}
330 
331 	return kvm_s390_inject_vcpu(vcpu, &irq);
332 }
333 
334 /**
335  * handle_mvpg_pei - Handle MOVE PAGE partial execution interception.
336  * @vcpu: virtual cpu
337  *
338  * This interception can only happen for guests with DAT disabled and
339  * addresses that are currently not mapped in the host. Thus we try to
340  * set up the mappings for the corresponding user pages here (or throw
341  * addressing exceptions in case of illegal guest addresses).
342  */
343 static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
344 {
345 	unsigned long srcaddr, dstaddr;
346 	int reg1, reg2, rc;
347 
348 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
349 
350 	/* Ensure that the source is paged-in, no actual access -> no key checking */
351 	rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg2],
352 					      reg2, &srcaddr, GACC_FETCH, 0);
353 	if (rc)
354 		return kvm_s390_inject_prog_cond(vcpu, rc);
355 	rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
356 	if (rc != 0)
357 		return rc;
358 
359 	/* Ensure that the source is paged-in, no actual access -> no key checking */
360 	rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg1],
361 					      reg1, &dstaddr, GACC_STORE, 0);
362 	if (rc)
363 		return kvm_s390_inject_prog_cond(vcpu, rc);
364 	rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
365 	if (rc != 0)
366 		return rc;
367 
368 	kvm_s390_retry_instr(vcpu);
369 
370 	return 0;
371 }
372 
373 static int handle_partial_execution(struct kvm_vcpu *vcpu)
374 {
375 	vcpu->stat.exit_pei++;
376 
377 	if (vcpu->arch.sie_block->ipa == 0xb254)	/* MVPG */
378 		return handle_mvpg_pei(vcpu);
379 	if (vcpu->arch.sie_block->ipa >> 8 == 0xae)	/* SIGP */
380 		return kvm_s390_handle_sigp_pei(vcpu);
381 
382 	return -EOPNOTSUPP;
383 }
384 
385 /*
386  * Handle the sthyi instruction that provides the guest with system
387  * information, like current CPU resources available at each level of
388  * the machine.
389  */
390 int handle_sthyi(struct kvm_vcpu *vcpu)
391 {
392 	int reg1, reg2, r = 0;
393 	u64 code, addr, cc = 0, rc = 0;
394 	struct sthyi_sctns *sctns = NULL;
395 
396 	if (!test_kvm_facility(vcpu->kvm, 74))
397 		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
398 
399 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
400 	code = vcpu->run->s.regs.gprs[reg1];
401 	addr = vcpu->run->s.regs.gprs[reg2];
402 
403 	vcpu->stat.instruction_sthyi++;
404 	VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
405 	trace_kvm_s390_handle_sthyi(vcpu, code, addr);
406 
407 	if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
408 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
409 
410 	if (code & 0xffff) {
411 		cc = 3;
412 		rc = 4;
413 		goto out;
414 	}
415 
416 	if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK))
417 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
418 
419 	sctns = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
420 	if (!sctns)
421 		return -ENOMEM;
422 
423 	cc = sthyi_fill(sctns, &rc);
424 
425 out:
426 	if (!cc) {
427 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
428 			memcpy(sida_addr(vcpu->arch.sie_block), sctns, PAGE_SIZE);
429 		} else {
430 			r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
431 			if (r) {
432 				free_page((unsigned long)sctns);
433 				return kvm_s390_inject_prog_cond(vcpu, r);
434 			}
435 		}
436 	}
437 
438 	free_page((unsigned long)sctns);
439 	vcpu->run->s.regs.gprs[reg2 + 1] = rc;
440 	kvm_s390_set_psw_cc(vcpu, cc);
441 	return r;
442 }
443 
444 static int handle_operexc(struct kvm_vcpu *vcpu)
445 {
446 	psw_t oldpsw, newpsw;
447 	int rc;
448 
449 	vcpu->stat.exit_operation_exception++;
450 	trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
451 				      vcpu->arch.sie_block->ipb);
452 
453 	if (vcpu->arch.sie_block->ipa == 0xb256)
454 		return handle_sthyi(vcpu);
455 
456 	if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
457 		return -EOPNOTSUPP;
458 	rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
459 	if (rc)
460 		return rc;
461 	/*
462 	 * Avoid endless loops of operation exceptions, if the pgm new
463 	 * PSW will cause a new operation exception.
464 	 * The heuristic checks if the pgm new psw is within 6 bytes before
465 	 * the faulting psw address (with same DAT, AS settings) and the
466 	 * new psw is not a wait psw and the fault was not triggered by
467 	 * problem state.
468 	 */
469 	oldpsw = vcpu->arch.sie_block->gpsw;
470 	if (oldpsw.addr - newpsw.addr <= 6 &&
471 	    !(newpsw.mask & PSW_MASK_WAIT) &&
472 	    !(oldpsw.mask & PSW_MASK_PSTATE) &&
473 	    (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
474 	    (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT))
475 		return -EOPNOTSUPP;
476 
477 	return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
478 }
479 
480 static int handle_pv_spx(struct kvm_vcpu *vcpu)
481 {
482 	u32 pref = *(u32 *)sida_addr(vcpu->arch.sie_block);
483 
484 	kvm_s390_set_prefix(vcpu, pref);
485 	trace_kvm_s390_handle_prefix(vcpu, 1, pref);
486 	return 0;
487 }
488 
489 static int handle_pv_sclp(struct kvm_vcpu *vcpu)
490 {
491 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
492 
493 	spin_lock(&fi->lock);
494 	/*
495 	 * 2 cases:
496 	 * a: an sccb answering interrupt was already pending or in flight.
497 	 *    As the sccb value is not known we can simply set some value to
498 	 *    trigger delivery of a saved SCCB. UV will then use its saved
499 	 *    copy of the SCCB value.
500 	 * b: an error SCCB interrupt needs to be injected so we also inject
501 	 *    a fake SCCB address. Firmware will use the proper one.
502 	 * This makes sure, that both errors and real sccb returns will only
503 	 * be delivered after a notification intercept (instruction has
504 	 * finished) but not after others.
505 	 */
506 	fi->srv_signal.ext_params |= 0x43000;
507 	set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
508 	clear_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
509 	spin_unlock(&fi->lock);
510 	return 0;
511 }
512 
513 static int handle_pv_uvc(struct kvm_vcpu *vcpu)
514 {
515 	struct uv_cb_share *guest_uvcb = sida_addr(vcpu->arch.sie_block);
516 	struct uv_cb_cts uvcb = {
517 		.header.cmd	= UVC_CMD_UNPIN_PAGE_SHARED,
518 		.header.len	= sizeof(uvcb),
519 		.guest_handle	= kvm_s390_pv_get_handle(vcpu->kvm),
520 		.gaddr		= guest_uvcb->paddr,
521 	};
522 	int rc;
523 
524 	if (guest_uvcb->header.cmd != UVC_CMD_REMOVE_SHARED_ACCESS) {
525 		WARN_ONCE(1, "Unexpected notification intercept for UVC 0x%x\n",
526 			  guest_uvcb->header.cmd);
527 		return 0;
528 	}
529 	rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb);
530 	/*
531 	 * If the unpin did not succeed, the guest will exit again for the UVC
532 	 * and we will retry the unpin.
533 	 */
534 	if (rc == -EINVAL)
535 		return 0;
536 	/*
537 	 * If we got -EAGAIN here, we simply return it. It will eventually
538 	 * get propagated all the way to userspace, which should then try
539 	 * again.
540 	 */
541 	return rc;
542 }
543 
544 static int handle_pv_notification(struct kvm_vcpu *vcpu)
545 {
546 	int ret;
547 
548 	if (vcpu->arch.sie_block->ipa == 0xb210)
549 		return handle_pv_spx(vcpu);
550 	if (vcpu->arch.sie_block->ipa == 0xb220)
551 		return handle_pv_sclp(vcpu);
552 	if (vcpu->arch.sie_block->ipa == 0xb9a4)
553 		return handle_pv_uvc(vcpu);
554 	if (vcpu->arch.sie_block->ipa >> 8 == 0xae) {
555 		/*
556 		 * Besides external call, other SIGP orders also cause a
557 		 * 108 (pv notify) intercept. In contrast to external call,
558 		 * these orders need to be emulated and hence the appropriate
559 		 * place to handle them is in handle_instruction().
560 		 * So first try kvm_s390_handle_sigp_pei() and if that isn't
561 		 * successful, go on with handle_instruction().
562 		 */
563 		ret = kvm_s390_handle_sigp_pei(vcpu);
564 		if (!ret)
565 			return ret;
566 	}
567 
568 	return handle_instruction(vcpu);
569 }
570 
571 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
572 {
573 	int rc, per_rc = 0;
574 
575 	if (kvm_is_ucontrol(vcpu->kvm))
576 		return -EOPNOTSUPP;
577 
578 	switch (vcpu->arch.sie_block->icptcode) {
579 	case ICPT_EXTREQ:
580 		vcpu->stat.exit_external_request++;
581 		return 0;
582 	case ICPT_IOREQ:
583 		vcpu->stat.exit_io_request++;
584 		return 0;
585 	case ICPT_INST:
586 		rc = handle_instruction(vcpu);
587 		break;
588 	case ICPT_PROGI:
589 		return handle_prog(vcpu);
590 	case ICPT_EXTINT:
591 		return handle_external_interrupt(vcpu);
592 	case ICPT_WAIT:
593 		return kvm_s390_handle_wait(vcpu);
594 	case ICPT_VALIDITY:
595 		return handle_validity(vcpu);
596 	case ICPT_STOP:
597 		return handle_stop(vcpu);
598 	case ICPT_OPEREXC:
599 		rc = handle_operexc(vcpu);
600 		break;
601 	case ICPT_PARTEXEC:
602 		rc = handle_partial_execution(vcpu);
603 		break;
604 	case ICPT_KSS:
605 		rc = kvm_s390_skey_check_enable(vcpu);
606 		break;
607 	case ICPT_MCHKREQ:
608 	case ICPT_INT_ENABLE:
609 		/*
610 		 * PSW bit 13 or a CR (0, 6, 14) changed and we might
611 		 * now be able to deliver interrupts. The pre-run code
612 		 * will take care of this.
613 		 */
614 		rc = 0;
615 		break;
616 	case ICPT_PV_INSTR:
617 		rc = handle_instruction(vcpu);
618 		break;
619 	case ICPT_PV_NOTIFY:
620 		rc = handle_pv_notification(vcpu);
621 		break;
622 	case ICPT_PV_PREF:
623 		rc = 0;
624 		gmap_convert_to_secure(vcpu->arch.gmap,
625 				       kvm_s390_get_prefix(vcpu));
626 		gmap_convert_to_secure(vcpu->arch.gmap,
627 				       kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
628 		break;
629 	default:
630 		return -EOPNOTSUPP;
631 	}
632 
633 	/* process PER, also if the instrution is processed in user space */
634 	if (vcpu->arch.sie_block->icptstatus & 0x02 &&
635 	    (!rc || rc == -EOPNOTSUPP))
636 		per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
637 	return per_rc ? per_rc : rc;
638 }
639