xref: /openbmc/linux/arch/s390/kvm/priv.c (revision f35e839a)
1 /*
2  * handling privileged instructions
3  *
4  * Copyright IBM Corp. 2008
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13 
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/current.h>
20 #include <asm/debug.h>
21 #include <asm/ebcdic.h>
22 #include <asm/sysinfo.h>
23 #include <asm/ptrace.h>
24 #include <asm/compat.h>
25 #include "gaccess.h"
26 #include "kvm-s390.h"
27 #include "trace.h"
28 
29 static int handle_set_prefix(struct kvm_vcpu *vcpu)
30 {
31 	u64 operand2;
32 	u32 address = 0;
33 	u8 tmp;
34 
35 	vcpu->stat.instruction_spx++;
36 
37 	operand2 = kvm_s390_get_base_disp_s(vcpu);
38 
39 	/* must be word boundary */
40 	if (operand2 & 3)
41 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
42 
43 	/* get the value */
44 	if (get_guest(vcpu, address, (u32 __user *) operand2))
45 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
46 
47 	address = address & 0x7fffe000u;
48 
49 	/* make sure that the new value is valid memory */
50 	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
51 	   (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
52 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
53 
54 	kvm_s390_set_prefix(vcpu, address);
55 
56 	VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
57 	trace_kvm_s390_handle_prefix(vcpu, 1, address);
58 	return 0;
59 }
60 
61 static int handle_store_prefix(struct kvm_vcpu *vcpu)
62 {
63 	u64 operand2;
64 	u32 address;
65 
66 	vcpu->stat.instruction_stpx++;
67 
68 	operand2 = kvm_s390_get_base_disp_s(vcpu);
69 
70 	/* must be word boundary */
71 	if (operand2 & 3)
72 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
73 
74 	address = vcpu->arch.sie_block->prefix;
75 	address = address & 0x7fffe000u;
76 
77 	/* get the value */
78 	if (put_guest(vcpu, address, (u32 __user *)operand2))
79 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
80 
81 	VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
82 	trace_kvm_s390_handle_prefix(vcpu, 0, address);
83 	return 0;
84 }
85 
86 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
87 {
88 	u64 useraddr;
89 
90 	vcpu->stat.instruction_stap++;
91 
92 	useraddr = kvm_s390_get_base_disp_s(vcpu);
93 
94 	if (useraddr & 1)
95 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
96 
97 	if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
98 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
99 
100 	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
101 	trace_kvm_s390_handle_stap(vcpu, useraddr);
102 	return 0;
103 }
104 
105 static int handle_skey(struct kvm_vcpu *vcpu)
106 {
107 	vcpu->stat.instruction_storage_key++;
108 	vcpu->arch.sie_block->gpsw.addr -= 4;
109 	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
110 	return 0;
111 }
112 
113 static int handle_tpi(struct kvm_vcpu *vcpu)
114 {
115 	struct kvm_s390_interrupt_info *inti;
116 	u64 addr;
117 	int cc;
118 
119 	addr = kvm_s390_get_base_disp_s(vcpu);
120 	if (addr & 3)
121 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
122 	cc = 0;
123 	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
124 	if (!inti)
125 		goto no_interrupt;
126 	cc = 1;
127 	if (addr) {
128 		/*
129 		 * Store the two-word I/O interruption code into the
130 		 * provided area.
131 		 */
132 		put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr);
133 		put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2));
134 		put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4));
135 	} else {
136 		/*
137 		 * Store the three-word I/O interruption code into
138 		 * the appropriate lowcore area.
139 		 */
140 		put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
141 		put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
142 		put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
143 		put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
144 	}
145 	kfree(inti);
146 no_interrupt:
147 	/* Set condition code and we're done. */
148 	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
149 	vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
150 	return 0;
151 }
152 
153 static int handle_tsch(struct kvm_vcpu *vcpu)
154 {
155 	struct kvm_s390_interrupt_info *inti;
156 
157 	inti = kvm_s390_get_io_int(vcpu->kvm, 0,
158 				   vcpu->run->s.regs.gprs[1]);
159 
160 	/*
161 	 * Prepare exit to userspace.
162 	 * We indicate whether we dequeued a pending I/O interrupt
163 	 * so that userspace can re-inject it if the instruction gets
164 	 * a program check. While this may re-order the pending I/O
165 	 * interrupts, this is no problem since the priority is kept
166 	 * intact.
167 	 */
168 	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
169 	vcpu->run->s390_tsch.dequeued = !!inti;
170 	if (inti) {
171 		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
172 		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
173 		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
174 		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
175 	}
176 	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
177 	kfree(inti);
178 	return -EREMOTE;
179 }
180 
181 static int handle_io_inst(struct kvm_vcpu *vcpu)
182 {
183 	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
184 
185 	if (vcpu->kvm->arch.css_support) {
186 		/*
187 		 * Most I/O instructions will be handled by userspace.
188 		 * Exceptions are tpi and the interrupt portion of tsch.
189 		 */
190 		if (vcpu->arch.sie_block->ipa == 0xb236)
191 			return handle_tpi(vcpu);
192 		if (vcpu->arch.sie_block->ipa == 0xb235)
193 			return handle_tsch(vcpu);
194 		/* Handle in userspace. */
195 		return -EOPNOTSUPP;
196 	} else {
197 		/*
198 		 * Set condition code 3 to stop the guest from issueing channel
199 		 * I/O instructions.
200 		 */
201 		vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
202 		vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
203 		return 0;
204 	}
205 }
206 
207 static int handle_stfl(struct kvm_vcpu *vcpu)
208 {
209 	unsigned int facility_list;
210 	int rc;
211 
212 	vcpu->stat.instruction_stfl++;
213 	/* only pass the facility bits, which we can handle */
214 	facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
215 
216 	rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
217 			   &facility_list, sizeof(facility_list));
218 	if (rc)
219 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
220 	VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list);
221 	trace_kvm_s390_handle_stfl(vcpu, facility_list);
222 	return 0;
223 }
224 
225 static void handle_new_psw(struct kvm_vcpu *vcpu)
226 {
227 	/* Check whether the new psw is enabled for machine checks. */
228 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
229 		kvm_s390_deliver_pending_machine_checks(vcpu);
230 }
231 
232 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
233 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
234 #define PSW_ADDR_24 0x0000000000ffffffUL
235 #define PSW_ADDR_31 0x000000007fffffffUL
236 
237 static int is_valid_psw(psw_t *psw) {
238 	if (psw->mask & PSW_MASK_UNASSIGNED)
239 		return 0;
240 	if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
241 		if (psw->addr & ~PSW_ADDR_31)
242 			return 0;
243 	}
244 	if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
245 		return 0;
246 	if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
247 		return 0;
248 	return 1;
249 }
250 
251 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
252 {
253 	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
254 	psw_compat_t new_psw;
255 	u64 addr;
256 
257 	if (gpsw->mask & PSW_MASK_PSTATE)
258 		return kvm_s390_inject_program_int(vcpu,
259 						   PGM_PRIVILEGED_OPERATION);
260 	addr = kvm_s390_get_base_disp_s(vcpu);
261 	if (addr & 7)
262 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
263 	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
264 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
265 	if (!(new_psw.mask & PSW32_MASK_BASE))
266 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
267 	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
268 	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
269 	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
270 	if (!is_valid_psw(gpsw))
271 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
272 	handle_new_psw(vcpu);
273 	return 0;
274 }
275 
276 static int handle_lpswe(struct kvm_vcpu *vcpu)
277 {
278 	psw_t new_psw;
279 	u64 addr;
280 
281 	addr = kvm_s390_get_base_disp_s(vcpu);
282 	if (addr & 7)
283 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
284 	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
285 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
286 	vcpu->arch.sie_block->gpsw = new_psw;
287 	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
288 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
289 	handle_new_psw(vcpu);
290 	return 0;
291 }
292 
293 static int handle_stidp(struct kvm_vcpu *vcpu)
294 {
295 	u64 operand2;
296 
297 	vcpu->stat.instruction_stidp++;
298 
299 	operand2 = kvm_s390_get_base_disp_s(vcpu);
300 
301 	if (operand2 & 7)
302 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
303 
304 	if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
305 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
306 
307 	VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
308 	return 0;
309 }
310 
311 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
312 {
313 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
314 	int cpus = 0;
315 	int n;
316 
317 	spin_lock(&fi->lock);
318 	for (n = 0; n < KVM_MAX_VCPUS; n++)
319 		if (fi->local_int[n])
320 			cpus++;
321 	spin_unlock(&fi->lock);
322 
323 	/* deal with other level 3 hypervisors */
324 	if (stsi(mem, 3, 2, 2))
325 		mem->count = 0;
326 	if (mem->count < 8)
327 		mem->count++;
328 	for (n = mem->count - 1; n > 0 ; n--)
329 		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
330 
331 	mem->vm[0].cpus_total = cpus;
332 	mem->vm[0].cpus_configured = cpus;
333 	mem->vm[0].cpus_standby = 0;
334 	mem->vm[0].cpus_reserved = 0;
335 	mem->vm[0].caf = 1000;
336 	memcpy(mem->vm[0].name, "KVMguest", 8);
337 	ASCEBC(mem->vm[0].name, 8);
338 	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
339 	ASCEBC(mem->vm[0].cpi, 16);
340 }
341 
342 static int handle_stsi(struct kvm_vcpu *vcpu)
343 {
344 	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
345 	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
346 	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
347 	unsigned long mem = 0;
348 	u64 operand2;
349 	int rc = 0;
350 
351 	vcpu->stat.instruction_stsi++;
352 	VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
353 
354 	operand2 = kvm_s390_get_base_disp_s(vcpu);
355 
356 	if (operand2 & 0xfff && fc > 0)
357 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
358 
359 	switch (fc) {
360 	case 0:
361 		vcpu->run->s.regs.gprs[0] = 3 << 28;
362 		vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
363 		return 0;
364 	case 1: /* same handling for 1 and 2 */
365 	case 2:
366 		mem = get_zeroed_page(GFP_KERNEL);
367 		if (!mem)
368 			goto out_no_data;
369 		if (stsi((void *) mem, fc, sel1, sel2))
370 			goto out_no_data;
371 		break;
372 	case 3:
373 		if (sel1 != 2 || sel2 != 2)
374 			goto out_no_data;
375 		mem = get_zeroed_page(GFP_KERNEL);
376 		if (!mem)
377 			goto out_no_data;
378 		handle_stsi_3_2_2(vcpu, (void *) mem);
379 		break;
380 	default:
381 		goto out_no_data;
382 	}
383 
384 	if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
385 		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
386 		goto out_exception;
387 	}
388 	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
389 	free_page(mem);
390 	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
391 	vcpu->run->s.regs.gprs[0] = 0;
392 	return 0;
393 out_no_data:
394 	/* condition code 3 */
395 	vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
396 out_exception:
397 	free_page(mem);
398 	return rc;
399 }
400 
401 static const intercept_handler_t b2_handlers[256] = {
402 	[0x02] = handle_stidp,
403 	[0x10] = handle_set_prefix,
404 	[0x11] = handle_store_prefix,
405 	[0x12] = handle_store_cpu_address,
406 	[0x29] = handle_skey,
407 	[0x2a] = handle_skey,
408 	[0x2b] = handle_skey,
409 	[0x30] = handle_io_inst,
410 	[0x31] = handle_io_inst,
411 	[0x32] = handle_io_inst,
412 	[0x33] = handle_io_inst,
413 	[0x34] = handle_io_inst,
414 	[0x35] = handle_io_inst,
415 	[0x36] = handle_io_inst,
416 	[0x37] = handle_io_inst,
417 	[0x38] = handle_io_inst,
418 	[0x39] = handle_io_inst,
419 	[0x3a] = handle_io_inst,
420 	[0x3b] = handle_io_inst,
421 	[0x3c] = handle_io_inst,
422 	[0x5f] = handle_io_inst,
423 	[0x74] = handle_io_inst,
424 	[0x76] = handle_io_inst,
425 	[0x7d] = handle_stsi,
426 	[0xb1] = handle_stfl,
427 	[0xb2] = handle_lpswe,
428 };
429 
430 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
431 {
432 	intercept_handler_t handler;
433 
434 	/*
435 	 * a lot of B2 instructions are priviledged. We first check for
436 	 * the privileged ones, that we can handle in the kernel. If the
437 	 * kernel can handle this instruction, we check for the problem
438 	 * state bit and (a) handle the instruction or (b) send a code 2
439 	 * program check.
440 	 * Anything else goes to userspace.*/
441 	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
442 	if (handler) {
443 		if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
444 			return kvm_s390_inject_program_int(vcpu,
445 						   PGM_PRIVILEGED_OPERATION);
446 		else
447 			return handler(vcpu);
448 	}
449 	return -EOPNOTSUPP;
450 }
451 
452 static int handle_epsw(struct kvm_vcpu *vcpu)
453 {
454 	int reg1, reg2;
455 
456 	reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
457 	reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
458 
459 	/* This basically extracts the mask half of the psw. */
460 	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
461 	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
462 	if (reg2) {
463 		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
464 		vcpu->run->s.regs.gprs[reg2] |=
465 			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
466 	}
467 	return 0;
468 }
469 
470 static const intercept_handler_t b9_handlers[256] = {
471 	[0x8d] = handle_epsw,
472 	[0x9c] = handle_io_inst,
473 };
474 
475 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
476 {
477 	intercept_handler_t handler;
478 
479 	/* This is handled just as for the B2 instructions. */
480 	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
481 	if (handler) {
482 		if ((handler != handle_epsw) &&
483 		    (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
484 			return kvm_s390_inject_program_int(vcpu,
485 						   PGM_PRIVILEGED_OPERATION);
486 		else
487 			return handler(vcpu);
488 	}
489 	return -EOPNOTSUPP;
490 }
491 
492 static const intercept_handler_t eb_handlers[256] = {
493 	[0x8a] = handle_io_inst,
494 };
495 
496 int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu)
497 {
498 	intercept_handler_t handler;
499 
500 	/* All eb instructions that end up here are privileged. */
501 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
502 		return kvm_s390_inject_program_int(vcpu,
503 						   PGM_PRIVILEGED_OPERATION);
504 	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
505 	if (handler)
506 		return handler(vcpu);
507 	return -EOPNOTSUPP;
508 }
509 
510 static int handle_tprot(struct kvm_vcpu *vcpu)
511 {
512 	u64 address1, address2;
513 	struct vm_area_struct *vma;
514 	unsigned long user_address;
515 
516 	vcpu->stat.instruction_tprot++;
517 
518 	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
519 
520 	/* we only handle the Linux memory detection case:
521 	 * access key == 0
522 	 * guest DAT == off
523 	 * everything else goes to userspace. */
524 	if (address2 & 0xf0)
525 		return -EOPNOTSUPP;
526 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
527 		return -EOPNOTSUPP;
528 
529 	down_read(&current->mm->mmap_sem);
530 	user_address = __gmap_translate(address1, vcpu->arch.gmap);
531 	if (IS_ERR_VALUE(user_address))
532 		goto out_inject;
533 	vma = find_vma(current->mm, user_address);
534 	if (!vma)
535 		goto out_inject;
536 	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
537 	if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
538 		vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
539 	if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
540 		vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
541 
542 	up_read(&current->mm->mmap_sem);
543 	return 0;
544 
545 out_inject:
546 	up_read(&current->mm->mmap_sem);
547 	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
548 }
549 
550 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
551 {
552 	/* For e5xx... instructions we only handle TPROT */
553 	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
554 		return handle_tprot(vcpu);
555 	return -EOPNOTSUPP;
556 }
557 
558 static int handle_sckpf(struct kvm_vcpu *vcpu)
559 {
560 	u32 value;
561 
562 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
563 		return kvm_s390_inject_program_int(vcpu,
564 						   PGM_PRIVILEGED_OPERATION);
565 
566 	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
567 		return kvm_s390_inject_program_int(vcpu,
568 						   PGM_SPECIFICATION);
569 
570 	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
571 	vcpu->arch.sie_block->todpr = value;
572 
573 	return 0;
574 }
575 
576 static const intercept_handler_t x01_handlers[256] = {
577 	[0x07] = handle_sckpf,
578 };
579 
580 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
581 {
582 	intercept_handler_t handler;
583 
584 	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
585 	if (handler)
586 		return handler(vcpu);
587 	return -EOPNOTSUPP;
588 }
589