xref: /openbmc/linux/arch/s390/kvm/priv.c (revision 840ef8b7)
1 /*
2  * handling privileged instructions
3  *
4  * Copyright IBM Corp. 2008
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13 
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <asm/current.h>
18 #include <asm/debug.h>
19 #include <asm/ebcdic.h>
20 #include <asm/sysinfo.h>
21 #include <asm/ptrace.h>
22 #include <asm/compat.h>
23 #include "gaccess.h"
24 #include "kvm-s390.h"
25 #include "trace.h"
26 
27 static int handle_set_prefix(struct kvm_vcpu *vcpu)
28 {
29 	u64 operand2;
30 	u32 address = 0;
31 	u8 tmp;
32 
33 	vcpu->stat.instruction_spx++;
34 
35 	operand2 = kvm_s390_get_base_disp_s(vcpu);
36 
37 	/* must be word boundary */
38 	if (operand2 & 3) {
39 		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
40 		goto out;
41 	}
42 
43 	/* get the value */
44 	if (get_guest_u32(vcpu, operand2, &address)) {
45 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
46 		goto out;
47 	}
48 
49 	address = address & 0x7fffe000u;
50 
51 	/* make sure that the new value is valid memory */
52 	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
53 	   (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
54 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
55 		goto out;
56 	}
57 
58 	kvm_s390_set_prefix(vcpu, address);
59 
60 	VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
61 	trace_kvm_s390_handle_prefix(vcpu, 1, address);
62 out:
63 	return 0;
64 }
65 
66 static int handle_store_prefix(struct kvm_vcpu *vcpu)
67 {
68 	u64 operand2;
69 	u32 address;
70 
71 	vcpu->stat.instruction_stpx++;
72 
73 	operand2 = kvm_s390_get_base_disp_s(vcpu);
74 
75 	/* must be word boundary */
76 	if (operand2 & 3) {
77 		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
78 		goto out;
79 	}
80 
81 	address = vcpu->arch.sie_block->prefix;
82 	address = address & 0x7fffe000u;
83 
84 	/* get the value */
85 	if (put_guest_u32(vcpu, operand2, address)) {
86 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
87 		goto out;
88 	}
89 
90 	VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
91 	trace_kvm_s390_handle_prefix(vcpu, 0, address);
92 out:
93 	return 0;
94 }
95 
96 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
97 {
98 	u64 useraddr;
99 	int rc;
100 
101 	vcpu->stat.instruction_stap++;
102 
103 	useraddr = kvm_s390_get_base_disp_s(vcpu);
104 
105 	if (useraddr & 1) {
106 		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
107 		goto out;
108 	}
109 
110 	rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
111 	if (rc == -EFAULT) {
112 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
113 		goto out;
114 	}
115 
116 	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
117 	trace_kvm_s390_handle_stap(vcpu, useraddr);
118 out:
119 	return 0;
120 }
121 
122 static int handle_skey(struct kvm_vcpu *vcpu)
123 {
124 	vcpu->stat.instruction_storage_key++;
125 	vcpu->arch.sie_block->gpsw.addr -= 4;
126 	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
127 	return 0;
128 }
129 
130 static int handle_tpi(struct kvm_vcpu *vcpu)
131 {
132 	u64 addr;
133 	struct kvm_s390_interrupt_info *inti;
134 	int cc;
135 
136 	addr = kvm_s390_get_base_disp_s(vcpu);
137 
138 	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
139 	if (inti) {
140 		if (addr) {
141 			/*
142 			 * Store the two-word I/O interruption code into the
143 			 * provided area.
144 			 */
145 			put_guest_u16(vcpu, addr, inti->io.subchannel_id);
146 			put_guest_u16(vcpu, addr + 2, inti->io.subchannel_nr);
147 			put_guest_u32(vcpu, addr + 4, inti->io.io_int_parm);
148 		} else {
149 			/*
150 			 * Store the three-word I/O interruption code into
151 			 * the appropriate lowcore area.
152 			 */
153 			put_guest_u16(vcpu, 184, inti->io.subchannel_id);
154 			put_guest_u16(vcpu, 186, inti->io.subchannel_nr);
155 			put_guest_u32(vcpu, 188, inti->io.io_int_parm);
156 			put_guest_u32(vcpu, 192, inti->io.io_int_word);
157 		}
158 		cc = 1;
159 	} else
160 		cc = 0;
161 	kfree(inti);
162 	/* Set condition code and we're done. */
163 	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
164 	vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
165 	return 0;
166 }
167 
168 static int handle_tsch(struct kvm_vcpu *vcpu)
169 {
170 	struct kvm_s390_interrupt_info *inti;
171 
172 	inti = kvm_s390_get_io_int(vcpu->kvm, 0,
173 				   vcpu->run->s.regs.gprs[1]);
174 
175 	/*
176 	 * Prepare exit to userspace.
177 	 * We indicate whether we dequeued a pending I/O interrupt
178 	 * so that userspace can re-inject it if the instruction gets
179 	 * a program check. While this may re-order the pending I/O
180 	 * interrupts, this is no problem since the priority is kept
181 	 * intact.
182 	 */
183 	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
184 	vcpu->run->s390_tsch.dequeued = !!inti;
185 	if (inti) {
186 		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
187 		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
188 		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
189 		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
190 	}
191 	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
192 	kfree(inti);
193 	return -EREMOTE;
194 }
195 
196 static int handle_io_inst(struct kvm_vcpu *vcpu)
197 {
198 	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
199 
200 	if (vcpu->kvm->arch.css_support) {
201 		/*
202 		 * Most I/O instructions will be handled by userspace.
203 		 * Exceptions are tpi and the interrupt portion of tsch.
204 		 */
205 		if (vcpu->arch.sie_block->ipa == 0xb236)
206 			return handle_tpi(vcpu);
207 		if (vcpu->arch.sie_block->ipa == 0xb235)
208 			return handle_tsch(vcpu);
209 		/* Handle in userspace. */
210 		return -EOPNOTSUPP;
211 	} else {
212 		/*
213 		 * Set condition code 3 to stop the guest from issueing channel
214 		 * I/O instructions.
215 		 */
216 		vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
217 		vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
218 		return 0;
219 	}
220 }
221 
222 static int handle_stfl(struct kvm_vcpu *vcpu)
223 {
224 	unsigned int facility_list;
225 	int rc;
226 
227 	vcpu->stat.instruction_stfl++;
228 	/* only pass the facility bits, which we can handle */
229 	facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
230 
231 	rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
232 			   &facility_list, sizeof(facility_list));
233 	if (rc == -EFAULT)
234 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
235 	else {
236 		VCPU_EVENT(vcpu, 5, "store facility list value %x",
237 			   facility_list);
238 		trace_kvm_s390_handle_stfl(vcpu, facility_list);
239 	}
240 	return 0;
241 }
242 
243 static void handle_new_psw(struct kvm_vcpu *vcpu)
244 {
245 	/* Check whether the new psw is enabled for machine checks. */
246 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
247 		kvm_s390_deliver_pending_machine_checks(vcpu);
248 }
249 
250 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
251 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
252 #define PSW_ADDR_24 0x00000000000fffffUL
253 #define PSW_ADDR_31 0x000000007fffffffUL
254 
255 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
256 {
257 	u64 addr;
258 	psw_compat_t new_psw;
259 
260 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
261 		return kvm_s390_inject_program_int(vcpu,
262 						   PGM_PRIVILEGED_OPERATION);
263 
264 	addr = kvm_s390_get_base_disp_s(vcpu);
265 
266 	if (addr & 7) {
267 		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
268 		goto out;
269 	}
270 
271 	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
272 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
273 		goto out;
274 	}
275 
276 	if (!(new_psw.mask & PSW32_MASK_BASE)) {
277 		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
278 		goto out;
279 	}
280 
281 	vcpu->arch.sie_block->gpsw.mask =
282 		(new_psw.mask & ~PSW32_MASK_BASE) << 32;
283 	vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
284 
285 	if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
286 	    (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
287 	     (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
288 	    ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
289 	     PSW_MASK_EA)) {
290 		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
291 		goto out;
292 	}
293 
294 	handle_new_psw(vcpu);
295 out:
296 	return 0;
297 }
298 
299 static int handle_lpswe(struct kvm_vcpu *vcpu)
300 {
301 	u64 addr;
302 	psw_t new_psw;
303 
304 	addr = kvm_s390_get_base_disp_s(vcpu);
305 
306 	if (addr & 7) {
307 		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
308 		goto out;
309 	}
310 
311 	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
312 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
313 		goto out;
314 	}
315 
316 	vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
317 	vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
318 
319 	if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
320 	    (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
321 	      PSW_MASK_BA) &&
322 	     (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
323 	    (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
324 	     (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
325 	    ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
326 	     PSW_MASK_EA)) {
327 		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
328 		goto out;
329 	}
330 
331 	handle_new_psw(vcpu);
332 out:
333 	return 0;
334 }
335 
336 static int handle_stidp(struct kvm_vcpu *vcpu)
337 {
338 	u64 operand2;
339 	int rc;
340 
341 	vcpu->stat.instruction_stidp++;
342 
343 	operand2 = kvm_s390_get_base_disp_s(vcpu);
344 
345 	if (operand2 & 7) {
346 		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
347 		goto out;
348 	}
349 
350 	rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
351 	if (rc == -EFAULT) {
352 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
353 		goto out;
354 	}
355 
356 	VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
357 out:
358 	return 0;
359 }
360 
361 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
362 {
363 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
364 	int cpus = 0;
365 	int n;
366 
367 	spin_lock(&fi->lock);
368 	for (n = 0; n < KVM_MAX_VCPUS; n++)
369 		if (fi->local_int[n])
370 			cpus++;
371 	spin_unlock(&fi->lock);
372 
373 	/* deal with other level 3 hypervisors */
374 	if (stsi(mem, 3, 2, 2))
375 		mem->count = 0;
376 	if (mem->count < 8)
377 		mem->count++;
378 	for (n = mem->count - 1; n > 0 ; n--)
379 		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
380 
381 	mem->vm[0].cpus_total = cpus;
382 	mem->vm[0].cpus_configured = cpus;
383 	mem->vm[0].cpus_standby = 0;
384 	mem->vm[0].cpus_reserved = 0;
385 	mem->vm[0].caf = 1000;
386 	memcpy(mem->vm[0].name, "KVMguest", 8);
387 	ASCEBC(mem->vm[0].name, 8);
388 	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
389 	ASCEBC(mem->vm[0].cpi, 16);
390 }
391 
392 static int handle_stsi(struct kvm_vcpu *vcpu)
393 {
394 	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
395 	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
396 	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
397 	u64 operand2;
398 	unsigned long mem;
399 
400 	vcpu->stat.instruction_stsi++;
401 	VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
402 
403 	operand2 = kvm_s390_get_base_disp_s(vcpu);
404 
405 	if (operand2 & 0xfff && fc > 0)
406 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
407 
408 	switch (fc) {
409 	case 0:
410 		vcpu->run->s.regs.gprs[0] = 3 << 28;
411 		vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
412 		return 0;
413 	case 1: /* same handling for 1 and 2 */
414 	case 2:
415 		mem = get_zeroed_page(GFP_KERNEL);
416 		if (!mem)
417 			goto out_fail;
418 		if (stsi((void *) mem, fc, sel1, sel2))
419 			goto out_mem;
420 		break;
421 	case 3:
422 		if (sel1 != 2 || sel2 != 2)
423 			goto out_fail;
424 		mem = get_zeroed_page(GFP_KERNEL);
425 		if (!mem)
426 			goto out_fail;
427 		handle_stsi_3_2_2(vcpu, (void *) mem);
428 		break;
429 	default:
430 		goto out_fail;
431 	}
432 
433 	if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
434 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
435 		goto out_mem;
436 	}
437 	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
438 	free_page(mem);
439 	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
440 	vcpu->run->s.regs.gprs[0] = 0;
441 	return 0;
442 out_mem:
443 	free_page(mem);
444 out_fail:
445 	/* condition code 3 */
446 	vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
447 	return 0;
448 }
449 
450 static const intercept_handler_t b2_handlers[256] = {
451 	[0x02] = handle_stidp,
452 	[0x10] = handle_set_prefix,
453 	[0x11] = handle_store_prefix,
454 	[0x12] = handle_store_cpu_address,
455 	[0x29] = handle_skey,
456 	[0x2a] = handle_skey,
457 	[0x2b] = handle_skey,
458 	[0x30] = handle_io_inst,
459 	[0x31] = handle_io_inst,
460 	[0x32] = handle_io_inst,
461 	[0x33] = handle_io_inst,
462 	[0x34] = handle_io_inst,
463 	[0x35] = handle_io_inst,
464 	[0x36] = handle_io_inst,
465 	[0x37] = handle_io_inst,
466 	[0x38] = handle_io_inst,
467 	[0x39] = handle_io_inst,
468 	[0x3a] = handle_io_inst,
469 	[0x3b] = handle_io_inst,
470 	[0x3c] = handle_io_inst,
471 	[0x5f] = handle_io_inst,
472 	[0x74] = handle_io_inst,
473 	[0x76] = handle_io_inst,
474 	[0x7d] = handle_stsi,
475 	[0xb1] = handle_stfl,
476 	[0xb2] = handle_lpswe,
477 };
478 
479 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
480 {
481 	intercept_handler_t handler;
482 
483 	/*
484 	 * a lot of B2 instructions are priviledged. We first check for
485 	 * the privileged ones, that we can handle in the kernel. If the
486 	 * kernel can handle this instruction, we check for the problem
487 	 * state bit and (a) handle the instruction or (b) send a code 2
488 	 * program check.
489 	 * Anything else goes to userspace.*/
490 	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
491 	if (handler) {
492 		if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
493 			return kvm_s390_inject_program_int(vcpu,
494 						   PGM_PRIVILEGED_OPERATION);
495 		else
496 			return handler(vcpu);
497 	}
498 	return -EOPNOTSUPP;
499 }
500 
501 static int handle_epsw(struct kvm_vcpu *vcpu)
502 {
503 	int reg1, reg2;
504 
505 	reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
506 	reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
507 
508 	/* This basically extracts the mask half of the psw. */
509 	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
510 	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
511 	if (reg2) {
512 		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
513 		vcpu->run->s.regs.gprs[reg2] |=
514 			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
515 	}
516 	return 0;
517 }
518 
519 static const intercept_handler_t b9_handlers[256] = {
520 	[0x8d] = handle_epsw,
521 	[0x9c] = handle_io_inst,
522 };
523 
524 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
525 {
526 	intercept_handler_t handler;
527 
528 	/* This is handled just as for the B2 instructions. */
529 	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
530 	if (handler) {
531 		if ((handler != handle_epsw) &&
532 		    (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
533 			return kvm_s390_inject_program_int(vcpu,
534 						   PGM_PRIVILEGED_OPERATION);
535 		else
536 			return handler(vcpu);
537 	}
538 	return -EOPNOTSUPP;
539 }
540 
541 static const intercept_handler_t eb_handlers[256] = {
542 	[0x8a] = handle_io_inst,
543 };
544 
545 int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu)
546 {
547 	intercept_handler_t handler;
548 
549 	/* All eb instructions that end up here are privileged. */
550 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
551 		return kvm_s390_inject_program_int(vcpu,
552 						   PGM_PRIVILEGED_OPERATION);
553 	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
554 	if (handler)
555 		return handler(vcpu);
556 	return -EOPNOTSUPP;
557 }
558 
559 static int handle_tprot(struct kvm_vcpu *vcpu)
560 {
561 	u64 address1, address2;
562 	struct vm_area_struct *vma;
563 	unsigned long user_address;
564 
565 	vcpu->stat.instruction_tprot++;
566 
567 	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
568 
569 	/* we only handle the Linux memory detection case:
570 	 * access key == 0
571 	 * guest DAT == off
572 	 * everything else goes to userspace. */
573 	if (address2 & 0xf0)
574 		return -EOPNOTSUPP;
575 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
576 		return -EOPNOTSUPP;
577 
578 
579 	/* we must resolve the address without holding the mmap semaphore.
580 	 * This is ok since the userspace hypervisor is not supposed to change
581 	 * the mapping while the guest queries the memory. Otherwise the guest
582 	 * might crash or get wrong info anyway. */
583 	user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
584 
585 	down_read(&current->mm->mmap_sem);
586 	vma = find_vma(current->mm, user_address);
587 	if (!vma) {
588 		up_read(&current->mm->mmap_sem);
589 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
590 	}
591 
592 	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
593 	if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
594 		vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
595 	if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
596 		vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
597 
598 	up_read(&current->mm->mmap_sem);
599 	return 0;
600 }
601 
602 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
603 {
604 	/* For e5xx... instructions we only handle TPROT */
605 	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
606 		return handle_tprot(vcpu);
607 	return -EOPNOTSUPP;
608 }
609 
610 static int handle_sckpf(struct kvm_vcpu *vcpu)
611 {
612 	u32 value;
613 
614 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
615 		return kvm_s390_inject_program_int(vcpu,
616 						   PGM_PRIVILEGED_OPERATION);
617 
618 	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
619 		return kvm_s390_inject_program_int(vcpu,
620 						   PGM_SPECIFICATION);
621 
622 	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
623 	vcpu->arch.sie_block->todpr = value;
624 
625 	return 0;
626 }
627 
628 static const intercept_handler_t x01_handlers[256] = {
629 	[0x07] = handle_sckpf,
630 };
631 
632 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
633 {
634 	intercept_handler_t handler;
635 
636 	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
637 	if (handler)
638 		return handler(vcpu);
639 	return -EOPNOTSUPP;
640 }
641