xref: /openbmc/linux/arch/s390/kvm/gaccess.c (revision 88607ed9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * guest access functions
4  *
5  * Copyright IBM Corp. 2014
6  *
7  */
8 
9 #include <linux/vmalloc.h>
10 #include <linux/mm_types.h>
11 #include <linux/err.h>
12 #include <linux/pgtable.h>
13 #include <linux/bitfield.h>
14 
15 #include <asm/gmap.h>
16 #include "kvm-s390.h"
17 #include "gaccess.h"
18 #include <asm/switch_to.h>
19 
20 union asce {
21 	unsigned long val;
22 	struct {
23 		unsigned long origin : 52; /* Region- or Segment-Table Origin */
24 		unsigned long	 : 2;
25 		unsigned long g  : 1; /* Subspace Group Control */
26 		unsigned long p  : 1; /* Private Space Control */
27 		unsigned long s  : 1; /* Storage-Alteration-Event Control */
28 		unsigned long x  : 1; /* Space-Switch-Event Control */
29 		unsigned long r  : 1; /* Real-Space Control */
30 		unsigned long	 : 1;
31 		unsigned long dt : 2; /* Designation-Type Control */
32 		unsigned long tl : 2; /* Region- or Segment-Table Length */
33 	};
34 };
35 
36 enum {
37 	ASCE_TYPE_SEGMENT = 0,
38 	ASCE_TYPE_REGION3 = 1,
39 	ASCE_TYPE_REGION2 = 2,
40 	ASCE_TYPE_REGION1 = 3
41 };
42 
43 union region1_table_entry {
44 	unsigned long val;
45 	struct {
46 		unsigned long rto: 52;/* Region-Table Origin */
47 		unsigned long	 : 2;
48 		unsigned long p  : 1; /* DAT-Protection Bit */
49 		unsigned long	 : 1;
50 		unsigned long tf : 2; /* Region-Second-Table Offset */
51 		unsigned long i  : 1; /* Region-Invalid Bit */
52 		unsigned long	 : 1;
53 		unsigned long tt : 2; /* Table-Type Bits */
54 		unsigned long tl : 2; /* Region-Second-Table Length */
55 	};
56 };
57 
58 union region2_table_entry {
59 	unsigned long val;
60 	struct {
61 		unsigned long rto: 52;/* Region-Table Origin */
62 		unsigned long	 : 2;
63 		unsigned long p  : 1; /* DAT-Protection Bit */
64 		unsigned long	 : 1;
65 		unsigned long tf : 2; /* Region-Third-Table Offset */
66 		unsigned long i  : 1; /* Region-Invalid Bit */
67 		unsigned long	 : 1;
68 		unsigned long tt : 2; /* Table-Type Bits */
69 		unsigned long tl : 2; /* Region-Third-Table Length */
70 	};
71 };
72 
73 struct region3_table_entry_fc0 {
74 	unsigned long sto: 52;/* Segment-Table Origin */
75 	unsigned long	 : 1;
76 	unsigned long fc : 1; /* Format-Control */
77 	unsigned long p  : 1; /* DAT-Protection Bit */
78 	unsigned long	 : 1;
79 	unsigned long tf : 2; /* Segment-Table Offset */
80 	unsigned long i  : 1; /* Region-Invalid Bit */
81 	unsigned long cr : 1; /* Common-Region Bit */
82 	unsigned long tt : 2; /* Table-Type Bits */
83 	unsigned long tl : 2; /* Segment-Table Length */
84 };
85 
86 struct region3_table_entry_fc1 {
87 	unsigned long rfaa : 33; /* Region-Frame Absolute Address */
88 	unsigned long	 : 14;
89 	unsigned long av : 1; /* ACCF-Validity Control */
90 	unsigned long acc: 4; /* Access-Control Bits */
91 	unsigned long f  : 1; /* Fetch-Protection Bit */
92 	unsigned long fc : 1; /* Format-Control */
93 	unsigned long p  : 1; /* DAT-Protection Bit */
94 	unsigned long iep: 1; /* Instruction-Execution-Protection */
95 	unsigned long	 : 2;
96 	unsigned long i  : 1; /* Region-Invalid Bit */
97 	unsigned long cr : 1; /* Common-Region Bit */
98 	unsigned long tt : 2; /* Table-Type Bits */
99 	unsigned long	 : 2;
100 };
101 
102 union region3_table_entry {
103 	unsigned long val;
104 	struct region3_table_entry_fc0 fc0;
105 	struct region3_table_entry_fc1 fc1;
106 	struct {
107 		unsigned long	 : 53;
108 		unsigned long fc : 1; /* Format-Control */
109 		unsigned long	 : 4;
110 		unsigned long i  : 1; /* Region-Invalid Bit */
111 		unsigned long cr : 1; /* Common-Region Bit */
112 		unsigned long tt : 2; /* Table-Type Bits */
113 		unsigned long	 : 2;
114 	};
115 };
116 
117 struct segment_entry_fc0 {
118 	unsigned long pto: 53;/* Page-Table Origin */
119 	unsigned long fc : 1; /* Format-Control */
120 	unsigned long p  : 1; /* DAT-Protection Bit */
121 	unsigned long	 : 3;
122 	unsigned long i  : 1; /* Segment-Invalid Bit */
123 	unsigned long cs : 1; /* Common-Segment Bit */
124 	unsigned long tt : 2; /* Table-Type Bits */
125 	unsigned long	 : 2;
126 };
127 
128 struct segment_entry_fc1 {
129 	unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
130 	unsigned long	 : 3;
131 	unsigned long av : 1; /* ACCF-Validity Control */
132 	unsigned long acc: 4; /* Access-Control Bits */
133 	unsigned long f  : 1; /* Fetch-Protection Bit */
134 	unsigned long fc : 1; /* Format-Control */
135 	unsigned long p  : 1; /* DAT-Protection Bit */
136 	unsigned long iep: 1; /* Instruction-Execution-Protection */
137 	unsigned long	 : 2;
138 	unsigned long i  : 1; /* Segment-Invalid Bit */
139 	unsigned long cs : 1; /* Common-Segment Bit */
140 	unsigned long tt : 2; /* Table-Type Bits */
141 	unsigned long	 : 2;
142 };
143 
144 union segment_table_entry {
145 	unsigned long val;
146 	struct segment_entry_fc0 fc0;
147 	struct segment_entry_fc1 fc1;
148 	struct {
149 		unsigned long	 : 53;
150 		unsigned long fc : 1; /* Format-Control */
151 		unsigned long	 : 4;
152 		unsigned long i  : 1; /* Segment-Invalid Bit */
153 		unsigned long cs : 1; /* Common-Segment Bit */
154 		unsigned long tt : 2; /* Table-Type Bits */
155 		unsigned long	 : 2;
156 	};
157 };
158 
159 enum {
160 	TABLE_TYPE_SEGMENT = 0,
161 	TABLE_TYPE_REGION3 = 1,
162 	TABLE_TYPE_REGION2 = 2,
163 	TABLE_TYPE_REGION1 = 3
164 };
165 
166 union page_table_entry {
167 	unsigned long val;
168 	struct {
169 		unsigned long pfra : 52; /* Page-Frame Real Address */
170 		unsigned long z  : 1; /* Zero Bit */
171 		unsigned long i  : 1; /* Page-Invalid Bit */
172 		unsigned long p  : 1; /* DAT-Protection Bit */
173 		unsigned long iep: 1; /* Instruction-Execution-Protection */
174 		unsigned long	 : 8;
175 	};
176 };
177 
178 /*
179  * vaddress union in order to easily decode a virtual address into its
180  * region first index, region second index etc. parts.
181  */
182 union vaddress {
183 	unsigned long addr;
184 	struct {
185 		unsigned long rfx : 11;
186 		unsigned long rsx : 11;
187 		unsigned long rtx : 11;
188 		unsigned long sx  : 11;
189 		unsigned long px  : 8;
190 		unsigned long bx  : 12;
191 	};
192 	struct {
193 		unsigned long rfx01 : 2;
194 		unsigned long	    : 9;
195 		unsigned long rsx01 : 2;
196 		unsigned long	    : 9;
197 		unsigned long rtx01 : 2;
198 		unsigned long	    : 9;
199 		unsigned long sx01  : 2;
200 		unsigned long	    : 29;
201 	};
202 };
203 
204 /*
205  * raddress union which will contain the result (real or absolute address)
206  * after a page table walk. The rfaa, sfaa and pfra members are used to
207  * simply assign them the value of a region, segment or page table entry.
208  */
209 union raddress {
210 	unsigned long addr;
211 	unsigned long rfaa : 33; /* Region-Frame Absolute Address */
212 	unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
213 	unsigned long pfra : 52; /* Page-Frame Real Address */
214 };
215 
216 union alet {
217 	u32 val;
218 	struct {
219 		u32 reserved : 7;
220 		u32 p        : 1;
221 		u32 alesn    : 8;
222 		u32 alen     : 16;
223 	};
224 };
225 
226 union ald {
227 	u32 val;
228 	struct {
229 		u32     : 1;
230 		u32 alo : 24;
231 		u32 all : 7;
232 	};
233 };
234 
235 struct ale {
236 	unsigned long i      : 1; /* ALEN-Invalid Bit */
237 	unsigned long        : 5;
238 	unsigned long fo     : 1; /* Fetch-Only Bit */
239 	unsigned long p      : 1; /* Private Bit */
240 	unsigned long alesn  : 8; /* Access-List-Entry Sequence Number */
241 	unsigned long aleax  : 16; /* Access-List-Entry Authorization Index */
242 	unsigned long        : 32;
243 	unsigned long        : 1;
244 	unsigned long asteo  : 25; /* ASN-Second-Table-Entry Origin */
245 	unsigned long        : 6;
246 	unsigned long astesn : 32; /* ASTE Sequence Number */
247 };
248 
249 struct aste {
250 	unsigned long i      : 1; /* ASX-Invalid Bit */
251 	unsigned long ato    : 29; /* Authority-Table Origin */
252 	unsigned long        : 1;
253 	unsigned long b      : 1; /* Base-Space Bit */
254 	unsigned long ax     : 16; /* Authorization Index */
255 	unsigned long atl    : 12; /* Authority-Table Length */
256 	unsigned long        : 2;
257 	unsigned long ca     : 1; /* Controlled-ASN Bit */
258 	unsigned long ra     : 1; /* Reusable-ASN Bit */
259 	unsigned long asce   : 64; /* Address-Space-Control Element */
260 	unsigned long ald    : 32;
261 	unsigned long astesn : 32;
262 	/* .. more fields there */
263 };
264 
ipte_lock_held(struct kvm * kvm)265 int ipte_lock_held(struct kvm *kvm)
266 {
267 	if (sclp.has_siif) {
268 		int rc;
269 
270 		read_lock(&kvm->arch.sca_lock);
271 		rc = kvm_s390_get_ipte_control(kvm)->kh != 0;
272 		read_unlock(&kvm->arch.sca_lock);
273 		return rc;
274 	}
275 	return kvm->arch.ipte_lock_count != 0;
276 }
277 
ipte_lock_simple(struct kvm * kvm)278 static void ipte_lock_simple(struct kvm *kvm)
279 {
280 	union ipte_control old, new, *ic;
281 
282 	mutex_lock(&kvm->arch.ipte_mutex);
283 	kvm->arch.ipte_lock_count++;
284 	if (kvm->arch.ipte_lock_count > 1)
285 		goto out;
286 retry:
287 	read_lock(&kvm->arch.sca_lock);
288 	ic = kvm_s390_get_ipte_control(kvm);
289 	do {
290 		old = READ_ONCE(*ic);
291 		if (old.k) {
292 			read_unlock(&kvm->arch.sca_lock);
293 			cond_resched();
294 			goto retry;
295 		}
296 		new = old;
297 		new.k = 1;
298 	} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
299 	read_unlock(&kvm->arch.sca_lock);
300 out:
301 	mutex_unlock(&kvm->arch.ipte_mutex);
302 }
303 
ipte_unlock_simple(struct kvm * kvm)304 static void ipte_unlock_simple(struct kvm *kvm)
305 {
306 	union ipte_control old, new, *ic;
307 
308 	mutex_lock(&kvm->arch.ipte_mutex);
309 	kvm->arch.ipte_lock_count--;
310 	if (kvm->arch.ipte_lock_count)
311 		goto out;
312 	read_lock(&kvm->arch.sca_lock);
313 	ic = kvm_s390_get_ipte_control(kvm);
314 	do {
315 		old = READ_ONCE(*ic);
316 		new = old;
317 		new.k = 0;
318 	} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
319 	read_unlock(&kvm->arch.sca_lock);
320 	wake_up(&kvm->arch.ipte_wq);
321 out:
322 	mutex_unlock(&kvm->arch.ipte_mutex);
323 }
324 
ipte_lock_siif(struct kvm * kvm)325 static void ipte_lock_siif(struct kvm *kvm)
326 {
327 	union ipte_control old, new, *ic;
328 
329 retry:
330 	read_lock(&kvm->arch.sca_lock);
331 	ic = kvm_s390_get_ipte_control(kvm);
332 	do {
333 		old = READ_ONCE(*ic);
334 		if (old.kg) {
335 			read_unlock(&kvm->arch.sca_lock);
336 			cond_resched();
337 			goto retry;
338 		}
339 		new = old;
340 		new.k = 1;
341 		new.kh++;
342 	} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
343 	read_unlock(&kvm->arch.sca_lock);
344 }
345 
ipte_unlock_siif(struct kvm * kvm)346 static void ipte_unlock_siif(struct kvm *kvm)
347 {
348 	union ipte_control old, new, *ic;
349 
350 	read_lock(&kvm->arch.sca_lock);
351 	ic = kvm_s390_get_ipte_control(kvm);
352 	do {
353 		old = READ_ONCE(*ic);
354 		new = old;
355 		new.kh--;
356 		if (!new.kh)
357 			new.k = 0;
358 	} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
359 	read_unlock(&kvm->arch.sca_lock);
360 	if (!new.kh)
361 		wake_up(&kvm->arch.ipte_wq);
362 }
363 
ipte_lock(struct kvm * kvm)364 void ipte_lock(struct kvm *kvm)
365 {
366 	if (sclp.has_siif)
367 		ipte_lock_siif(kvm);
368 	else
369 		ipte_lock_simple(kvm);
370 }
371 
ipte_unlock(struct kvm * kvm)372 void ipte_unlock(struct kvm *kvm)
373 {
374 	if (sclp.has_siif)
375 		ipte_unlock_siif(kvm);
376 	else
377 		ipte_unlock_simple(kvm);
378 }
379 
ar_translation(struct kvm_vcpu * vcpu,union asce * asce,u8 ar,enum gacc_mode mode)380 static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
381 			  enum gacc_mode mode)
382 {
383 	union alet alet;
384 	struct ale ale;
385 	struct aste aste;
386 	unsigned long ald_addr, authority_table_addr;
387 	union ald ald;
388 	int eax, rc;
389 	u8 authority_table;
390 
391 	if (ar >= NUM_ACRS)
392 		return -EINVAL;
393 
394 	save_access_regs(vcpu->run->s.regs.acrs);
395 	alet.val = vcpu->run->s.regs.acrs[ar];
396 
397 	if (ar == 0 || alet.val == 0) {
398 		asce->val = vcpu->arch.sie_block->gcr[1];
399 		return 0;
400 	} else if (alet.val == 1) {
401 		asce->val = vcpu->arch.sie_block->gcr[7];
402 		return 0;
403 	}
404 
405 	if (alet.reserved)
406 		return PGM_ALET_SPECIFICATION;
407 
408 	if (alet.p)
409 		ald_addr = vcpu->arch.sie_block->gcr[5];
410 	else
411 		ald_addr = vcpu->arch.sie_block->gcr[2];
412 	ald_addr &= 0x7fffffc0;
413 
414 	rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
415 	if (rc)
416 		return rc;
417 
418 	if (alet.alen / 8 > ald.all)
419 		return PGM_ALEN_TRANSLATION;
420 
421 	if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
422 		return PGM_ADDRESSING;
423 
424 	rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
425 			     sizeof(struct ale));
426 	if (rc)
427 		return rc;
428 
429 	if (ale.i == 1)
430 		return PGM_ALEN_TRANSLATION;
431 	if (ale.alesn != alet.alesn)
432 		return PGM_ALE_SEQUENCE;
433 
434 	rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
435 	if (rc)
436 		return rc;
437 
438 	if (aste.i)
439 		return PGM_ASTE_VALIDITY;
440 	if (aste.astesn != ale.astesn)
441 		return PGM_ASTE_SEQUENCE;
442 
443 	if (ale.p == 1) {
444 		eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
445 		if (ale.aleax != eax) {
446 			if (eax / 16 > aste.atl)
447 				return PGM_EXTENDED_AUTHORITY;
448 
449 			authority_table_addr = aste.ato * 4 + eax / 4;
450 
451 			rc = read_guest_real(vcpu, authority_table_addr,
452 					     &authority_table,
453 					     sizeof(u8));
454 			if (rc)
455 				return rc;
456 
457 			if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
458 				return PGM_EXTENDED_AUTHORITY;
459 		}
460 	}
461 
462 	if (ale.fo == 1 && mode == GACC_STORE)
463 		return PGM_PROTECTION;
464 
465 	asce->val = aste.asce;
466 	return 0;
467 }
468 
469 struct trans_exc_code_bits {
470 	unsigned long addr : 52; /* Translation-exception Address */
471 	unsigned long fsi  : 2;  /* Access Exception Fetch/Store Indication */
472 	unsigned long	   : 2;
473 	unsigned long b56  : 1;
474 	unsigned long	   : 3;
475 	unsigned long b60  : 1;
476 	unsigned long b61  : 1;
477 	unsigned long as   : 2;  /* ASCE Identifier */
478 };
479 
480 enum {
481 	FSI_UNKNOWN = 0, /* Unknown whether fetch or store */
482 	FSI_STORE   = 1, /* Exception was due to store operation */
483 	FSI_FETCH   = 2  /* Exception was due to fetch operation */
484 };
485 
486 enum prot_type {
487 	PROT_TYPE_LA   = 0,
488 	PROT_TYPE_KEYC = 1,
489 	PROT_TYPE_ALC  = 2,
490 	PROT_TYPE_DAT  = 3,
491 	PROT_TYPE_IEP  = 4,
492 	/* Dummy value for passing an initialized value when code != PGM_PROTECTION */
493 	PROT_NONE,
494 };
495 
trans_exc_ending(struct kvm_vcpu * vcpu,int code,unsigned long gva,u8 ar,enum gacc_mode mode,enum prot_type prot,bool terminate)496 static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
497 			    enum gacc_mode mode, enum prot_type prot, bool terminate)
498 {
499 	struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
500 	struct trans_exc_code_bits *tec;
501 
502 	memset(pgm, 0, sizeof(*pgm));
503 	pgm->code = code;
504 	tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
505 
506 	switch (code) {
507 	case PGM_PROTECTION:
508 		switch (prot) {
509 		case PROT_NONE:
510 			/* We should never get here, acts like termination */
511 			WARN_ON_ONCE(1);
512 			break;
513 		case PROT_TYPE_IEP:
514 			tec->b61 = 1;
515 			fallthrough;
516 		case PROT_TYPE_LA:
517 			tec->b56 = 1;
518 			break;
519 		case PROT_TYPE_KEYC:
520 			tec->b60 = 1;
521 			break;
522 		case PROT_TYPE_ALC:
523 			tec->b60 = 1;
524 			fallthrough;
525 		case PROT_TYPE_DAT:
526 			tec->b61 = 1;
527 			break;
528 		}
529 		if (terminate) {
530 			tec->b56 = 0;
531 			tec->b60 = 0;
532 			tec->b61 = 0;
533 		}
534 		fallthrough;
535 	case PGM_ASCE_TYPE:
536 	case PGM_PAGE_TRANSLATION:
537 	case PGM_REGION_FIRST_TRANS:
538 	case PGM_REGION_SECOND_TRANS:
539 	case PGM_REGION_THIRD_TRANS:
540 	case PGM_SEGMENT_TRANSLATION:
541 		/*
542 		 * op_access_id only applies to MOVE_PAGE -> set bit 61
543 		 * exc_access_id has to be set to 0 for some instructions. Both
544 		 * cases have to be handled by the caller.
545 		 */
546 		tec->addr = gva >> PAGE_SHIFT;
547 		tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
548 		tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
549 		fallthrough;
550 	case PGM_ALEN_TRANSLATION:
551 	case PGM_ALE_SEQUENCE:
552 	case PGM_ASTE_VALIDITY:
553 	case PGM_ASTE_SEQUENCE:
554 	case PGM_EXTENDED_AUTHORITY:
555 		/*
556 		 * We can always store exc_access_id, as it is
557 		 * undefined for non-ar cases. It is undefined for
558 		 * most DAT protection exceptions.
559 		 */
560 		pgm->exc_access_id = ar;
561 		break;
562 	}
563 	return code;
564 }
565 
trans_exc(struct kvm_vcpu * vcpu,int code,unsigned long gva,u8 ar,enum gacc_mode mode,enum prot_type prot)566 static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
567 		     enum gacc_mode mode, enum prot_type prot)
568 {
569 	return trans_exc_ending(vcpu, code, gva, ar, mode, prot, false);
570 }
571 
get_vcpu_asce(struct kvm_vcpu * vcpu,union asce * asce,unsigned long ga,u8 ar,enum gacc_mode mode)572 static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
573 			 unsigned long ga, u8 ar, enum gacc_mode mode)
574 {
575 	int rc;
576 	struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
577 
578 	if (!psw.dat) {
579 		asce->val = 0;
580 		asce->r = 1;
581 		return 0;
582 	}
583 
584 	if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME))
585 		psw.as = PSW_BITS_AS_PRIMARY;
586 
587 	switch (psw.as) {
588 	case PSW_BITS_AS_PRIMARY:
589 		asce->val = vcpu->arch.sie_block->gcr[1];
590 		return 0;
591 	case PSW_BITS_AS_SECONDARY:
592 		asce->val = vcpu->arch.sie_block->gcr[7];
593 		return 0;
594 	case PSW_BITS_AS_HOME:
595 		asce->val = vcpu->arch.sie_block->gcr[13];
596 		return 0;
597 	case PSW_BITS_AS_ACCREG:
598 		rc = ar_translation(vcpu, asce, ar, mode);
599 		if (rc > 0)
600 			return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
601 		return rc;
602 	}
603 	return 0;
604 }
605 
deref_table(struct kvm * kvm,unsigned long gpa,unsigned long * val)606 static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
607 {
608 	return kvm_read_guest(kvm, gpa, val, sizeof(*val));
609 }
610 
611 /**
612  * guest_translate - translate a guest virtual into a guest absolute address
613  * @vcpu: virtual cpu
614  * @gva: guest virtual address
615  * @gpa: points to where guest physical (absolute) address should be stored
616  * @asce: effective asce
617  * @mode: indicates the access mode to be used
618  * @prot: returns the type for protection exceptions
619  *
620  * Translate a guest virtual address into a guest absolute address by means
621  * of dynamic address translation as specified by the architecture.
622  * If the resulting absolute address is not available in the configuration
623  * an addressing exception is indicated and @gpa will not be changed.
624  *
625  * Returns: - zero on success; @gpa contains the resulting absolute address
626  *	    - a negative value if guest access failed due to e.g. broken
627  *	      guest mapping
628  *	    - a positive value if an access exception happened. In this case
629  *	      the returned value is the program interruption code as defined
630  *	      by the architecture
631  */
guest_translate(struct kvm_vcpu * vcpu,unsigned long gva,unsigned long * gpa,const union asce asce,enum gacc_mode mode,enum prot_type * prot)632 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
633 				     unsigned long *gpa, const union asce asce,
634 				     enum gacc_mode mode, enum prot_type *prot)
635 {
636 	union vaddress vaddr = {.addr = gva};
637 	union raddress raddr = {.addr = gva};
638 	union page_table_entry pte;
639 	int dat_protection = 0;
640 	int iep_protection = 0;
641 	union ctlreg0 ctlreg0;
642 	unsigned long ptr;
643 	int edat1, edat2, iep;
644 
645 	ctlreg0.val = vcpu->arch.sie_block->gcr[0];
646 	edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
647 	edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
648 	iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130);
649 	if (asce.r)
650 		goto real_address;
651 	ptr = asce.origin * PAGE_SIZE;
652 	switch (asce.dt) {
653 	case ASCE_TYPE_REGION1:
654 		if (vaddr.rfx01 > asce.tl)
655 			return PGM_REGION_FIRST_TRANS;
656 		ptr += vaddr.rfx * 8;
657 		break;
658 	case ASCE_TYPE_REGION2:
659 		if (vaddr.rfx)
660 			return PGM_ASCE_TYPE;
661 		if (vaddr.rsx01 > asce.tl)
662 			return PGM_REGION_SECOND_TRANS;
663 		ptr += vaddr.rsx * 8;
664 		break;
665 	case ASCE_TYPE_REGION3:
666 		if (vaddr.rfx || vaddr.rsx)
667 			return PGM_ASCE_TYPE;
668 		if (vaddr.rtx01 > asce.tl)
669 			return PGM_REGION_THIRD_TRANS;
670 		ptr += vaddr.rtx * 8;
671 		break;
672 	case ASCE_TYPE_SEGMENT:
673 		if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
674 			return PGM_ASCE_TYPE;
675 		if (vaddr.sx01 > asce.tl)
676 			return PGM_SEGMENT_TRANSLATION;
677 		ptr += vaddr.sx * 8;
678 		break;
679 	}
680 	switch (asce.dt) {
681 	case ASCE_TYPE_REGION1:	{
682 		union region1_table_entry rfte;
683 
684 		if (kvm_is_error_gpa(vcpu->kvm, ptr))
685 			return PGM_ADDRESSING;
686 		if (deref_table(vcpu->kvm, ptr, &rfte.val))
687 			return -EFAULT;
688 		if (rfte.i)
689 			return PGM_REGION_FIRST_TRANS;
690 		if (rfte.tt != TABLE_TYPE_REGION1)
691 			return PGM_TRANSLATION_SPEC;
692 		if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
693 			return PGM_REGION_SECOND_TRANS;
694 		if (edat1)
695 			dat_protection |= rfte.p;
696 		ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8;
697 	}
698 		fallthrough;
699 	case ASCE_TYPE_REGION2: {
700 		union region2_table_entry rste;
701 
702 		if (kvm_is_error_gpa(vcpu->kvm, ptr))
703 			return PGM_ADDRESSING;
704 		if (deref_table(vcpu->kvm, ptr, &rste.val))
705 			return -EFAULT;
706 		if (rste.i)
707 			return PGM_REGION_SECOND_TRANS;
708 		if (rste.tt != TABLE_TYPE_REGION2)
709 			return PGM_TRANSLATION_SPEC;
710 		if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
711 			return PGM_REGION_THIRD_TRANS;
712 		if (edat1)
713 			dat_protection |= rste.p;
714 		ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8;
715 	}
716 		fallthrough;
717 	case ASCE_TYPE_REGION3: {
718 		union region3_table_entry rtte;
719 
720 		if (kvm_is_error_gpa(vcpu->kvm, ptr))
721 			return PGM_ADDRESSING;
722 		if (deref_table(vcpu->kvm, ptr, &rtte.val))
723 			return -EFAULT;
724 		if (rtte.i)
725 			return PGM_REGION_THIRD_TRANS;
726 		if (rtte.tt != TABLE_TYPE_REGION3)
727 			return PGM_TRANSLATION_SPEC;
728 		if (rtte.cr && asce.p && edat2)
729 			return PGM_TRANSLATION_SPEC;
730 		if (rtte.fc && edat2) {
731 			dat_protection |= rtte.fc1.p;
732 			iep_protection = rtte.fc1.iep;
733 			raddr.rfaa = rtte.fc1.rfaa;
734 			goto absolute_address;
735 		}
736 		if (vaddr.sx01 < rtte.fc0.tf)
737 			return PGM_SEGMENT_TRANSLATION;
738 		if (vaddr.sx01 > rtte.fc0.tl)
739 			return PGM_SEGMENT_TRANSLATION;
740 		if (edat1)
741 			dat_protection |= rtte.fc0.p;
742 		ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8;
743 	}
744 		fallthrough;
745 	case ASCE_TYPE_SEGMENT: {
746 		union segment_table_entry ste;
747 
748 		if (kvm_is_error_gpa(vcpu->kvm, ptr))
749 			return PGM_ADDRESSING;
750 		if (deref_table(vcpu->kvm, ptr, &ste.val))
751 			return -EFAULT;
752 		if (ste.i)
753 			return PGM_SEGMENT_TRANSLATION;
754 		if (ste.tt != TABLE_TYPE_SEGMENT)
755 			return PGM_TRANSLATION_SPEC;
756 		if (ste.cs && asce.p)
757 			return PGM_TRANSLATION_SPEC;
758 		if (ste.fc && edat1) {
759 			dat_protection |= ste.fc1.p;
760 			iep_protection = ste.fc1.iep;
761 			raddr.sfaa = ste.fc1.sfaa;
762 			goto absolute_address;
763 		}
764 		dat_protection |= ste.fc0.p;
765 		ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
766 	}
767 	}
768 	if (kvm_is_error_gpa(vcpu->kvm, ptr))
769 		return PGM_ADDRESSING;
770 	if (deref_table(vcpu->kvm, ptr, &pte.val))
771 		return -EFAULT;
772 	if (pte.i)
773 		return PGM_PAGE_TRANSLATION;
774 	if (pte.z)
775 		return PGM_TRANSLATION_SPEC;
776 	dat_protection |= pte.p;
777 	iep_protection = pte.iep;
778 	raddr.pfra = pte.pfra;
779 real_address:
780 	raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
781 absolute_address:
782 	if (mode == GACC_STORE && dat_protection) {
783 		*prot = PROT_TYPE_DAT;
784 		return PGM_PROTECTION;
785 	}
786 	if (mode == GACC_IFETCH && iep_protection && iep) {
787 		*prot = PROT_TYPE_IEP;
788 		return PGM_PROTECTION;
789 	}
790 	if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
791 		return PGM_ADDRESSING;
792 	*gpa = raddr.addr;
793 	return 0;
794 }
795 
is_low_address(unsigned long ga)796 static inline int is_low_address(unsigned long ga)
797 {
798 	/* Check for address ranges 0..511 and 4096..4607 */
799 	return (ga & ~0x11fful) == 0;
800 }
801 
low_address_protection_enabled(struct kvm_vcpu * vcpu,const union asce asce)802 static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
803 					  const union asce asce)
804 {
805 	union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
806 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
807 
808 	if (!ctlreg0.lap)
809 		return 0;
810 	if (psw_bits(*psw).dat && asce.p)
811 		return 0;
812 	return 1;
813 }
814 
vm_check_access_key(struct kvm * kvm,u8 access_key,enum gacc_mode mode,gpa_t gpa)815 static int vm_check_access_key(struct kvm *kvm, u8 access_key,
816 			       enum gacc_mode mode, gpa_t gpa)
817 {
818 	u8 storage_key, access_control;
819 	bool fetch_protected;
820 	unsigned long hva;
821 	int r;
822 
823 	if (access_key == 0)
824 		return 0;
825 
826 	hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
827 	if (kvm_is_error_hva(hva))
828 		return PGM_ADDRESSING;
829 
830 	mmap_read_lock(current->mm);
831 	r = get_guest_storage_key(current->mm, hva, &storage_key);
832 	mmap_read_unlock(current->mm);
833 	if (r)
834 		return r;
835 	access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key);
836 	if (access_control == access_key)
837 		return 0;
838 	fetch_protected = storage_key & _PAGE_FP_BIT;
839 	if ((mode == GACC_FETCH || mode == GACC_IFETCH) && !fetch_protected)
840 		return 0;
841 	return PGM_PROTECTION;
842 }
843 
fetch_prot_override_applicable(struct kvm_vcpu * vcpu,enum gacc_mode mode,union asce asce)844 static bool fetch_prot_override_applicable(struct kvm_vcpu *vcpu, enum gacc_mode mode,
845 					   union asce asce)
846 {
847 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
848 	unsigned long override;
849 
850 	if (mode == GACC_FETCH || mode == GACC_IFETCH) {
851 		/* check if fetch protection override enabled */
852 		override = vcpu->arch.sie_block->gcr[0];
853 		override &= CR0_FETCH_PROTECTION_OVERRIDE;
854 		/* not applicable if subject to DAT && private space */
855 		override = override && !(psw_bits(*psw).dat && asce.p);
856 		return override;
857 	}
858 	return false;
859 }
860 
fetch_prot_override_applies(unsigned long ga,unsigned int len)861 static bool fetch_prot_override_applies(unsigned long ga, unsigned int len)
862 {
863 	return ga < 2048 && ga + len <= 2048;
864 }
865 
storage_prot_override_applicable(struct kvm_vcpu * vcpu)866 static bool storage_prot_override_applicable(struct kvm_vcpu *vcpu)
867 {
868 	/* check if storage protection override enabled */
869 	return vcpu->arch.sie_block->gcr[0] & CR0_STORAGE_PROTECTION_OVERRIDE;
870 }
871 
storage_prot_override_applies(u8 access_control)872 static bool storage_prot_override_applies(u8 access_control)
873 {
874 	/* matches special storage protection override key (9) -> allow */
875 	return access_control == PAGE_SPO_ACC;
876 }
877 
vcpu_check_access_key(struct kvm_vcpu * vcpu,u8 access_key,enum gacc_mode mode,union asce asce,gpa_t gpa,unsigned long ga,unsigned int len)878 static int vcpu_check_access_key(struct kvm_vcpu *vcpu, u8 access_key,
879 				 enum gacc_mode mode, union asce asce, gpa_t gpa,
880 				 unsigned long ga, unsigned int len)
881 {
882 	u8 storage_key, access_control;
883 	unsigned long hva;
884 	int r;
885 
886 	/* access key 0 matches any storage key -> allow */
887 	if (access_key == 0)
888 		return 0;
889 	/*
890 	 * caller needs to ensure that gfn is accessible, so we can
891 	 * assume that this cannot fail
892 	 */
893 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa));
894 	mmap_read_lock(current->mm);
895 	r = get_guest_storage_key(current->mm, hva, &storage_key);
896 	mmap_read_unlock(current->mm);
897 	if (r)
898 		return r;
899 	access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key);
900 	/* access key matches storage key -> allow */
901 	if (access_control == access_key)
902 		return 0;
903 	if (mode == GACC_FETCH || mode == GACC_IFETCH) {
904 		/* it is a fetch and fetch protection is off -> allow */
905 		if (!(storage_key & _PAGE_FP_BIT))
906 			return 0;
907 		if (fetch_prot_override_applicable(vcpu, mode, asce) &&
908 		    fetch_prot_override_applies(ga, len))
909 			return 0;
910 	}
911 	if (storage_prot_override_applicable(vcpu) &&
912 	    storage_prot_override_applies(access_control))
913 		return 0;
914 	return PGM_PROTECTION;
915 }
916 
917 /**
918  * guest_range_to_gpas() - Calculate guest physical addresses of page fragments
919  * covering a logical range
920  * @vcpu: virtual cpu
921  * @ga: guest address, start of range
922  * @ar: access register
923  * @gpas: output argument, may be NULL
924  * @len: length of range in bytes
925  * @asce: address-space-control element to use for translation
926  * @mode: access mode
927  * @access_key: access key to mach the range's storage keys against
928  *
929  * Translate a logical range to a series of guest absolute addresses,
930  * such that the concatenation of page fragments starting at each gpa make up
931  * the whole range.
932  * The translation is performed as if done by the cpu for the given @asce, @ar,
933  * @mode and state of the @vcpu.
934  * If the translation causes an exception, its program interruption code is
935  * returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified
936  * such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject
937  * a correct exception into the guest.
938  * The resulting gpas are stored into @gpas, unless it is NULL.
939  *
940  * Note: All fragments except the first one start at the beginning of a page.
941  *	 When deriving the boundaries of a fragment from a gpa, all but the last
942  *	 fragment end at the end of the page.
943  *
944  * Return:
945  * * 0		- success
946  * * <0		- translation could not be performed, for example if  guest
947  *		  memory could not be accessed
948  * * >0		- an access exception occurred. In this case the returned value
949  *		  is the program interruption code and the contents of pgm may
950  *		  be used to inject an exception into the guest.
951  */
guest_range_to_gpas(struct kvm_vcpu * vcpu,unsigned long ga,u8 ar,unsigned long * gpas,unsigned long len,const union asce asce,enum gacc_mode mode,u8 access_key)952 static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
953 			       unsigned long *gpas, unsigned long len,
954 			       const union asce asce, enum gacc_mode mode,
955 			       u8 access_key)
956 {
957 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
958 	unsigned int offset = offset_in_page(ga);
959 	unsigned int fragment_len;
960 	int lap_enabled, rc = 0;
961 	enum prot_type prot;
962 	unsigned long gpa;
963 
964 	lap_enabled = low_address_protection_enabled(vcpu, asce);
965 	while (min(PAGE_SIZE - offset, len) > 0) {
966 		fragment_len = min(PAGE_SIZE - offset, len);
967 		ga = kvm_s390_logical_to_effective(vcpu, ga);
968 		if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
969 			return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
970 					 PROT_TYPE_LA);
971 		if (psw_bits(*psw).dat) {
972 			rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot);
973 			if (rc < 0)
974 				return rc;
975 		} else {
976 			gpa = kvm_s390_real_to_abs(vcpu, ga);
977 			if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
978 				rc = PGM_ADDRESSING;
979 				prot = PROT_NONE;
980 			}
981 		}
982 		if (rc)
983 			return trans_exc(vcpu, rc, ga, ar, mode, prot);
984 		rc = vcpu_check_access_key(vcpu, access_key, mode, asce, gpa, ga,
985 					   fragment_len);
986 		if (rc)
987 			return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_KEYC);
988 		if (gpas)
989 			*gpas++ = gpa;
990 		offset = 0;
991 		ga += fragment_len;
992 		len -= fragment_len;
993 	}
994 	return 0;
995 }
996 
access_guest_page(struct kvm * kvm,enum gacc_mode mode,gpa_t gpa,void * data,unsigned int len)997 static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
998 			     void *data, unsigned int len)
999 {
1000 	const unsigned int offset = offset_in_page(gpa);
1001 	const gfn_t gfn = gpa_to_gfn(gpa);
1002 	int rc;
1003 
1004 	if (!gfn_to_memslot(kvm, gfn))
1005 		return PGM_ADDRESSING;
1006 	if (mode == GACC_STORE)
1007 		rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
1008 	else
1009 		rc = kvm_read_guest_page(kvm, gfn, data, offset, len);
1010 	return rc;
1011 }
1012 
1013 static int
access_guest_page_with_key(struct kvm * kvm,enum gacc_mode mode,gpa_t gpa,void * data,unsigned int len,u8 access_key)1014 access_guest_page_with_key(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
1015 			   void *data, unsigned int len, u8 access_key)
1016 {
1017 	struct kvm_memory_slot *slot;
1018 	bool writable;
1019 	gfn_t gfn;
1020 	hva_t hva;
1021 	int rc;
1022 
1023 	gfn = gpa >> PAGE_SHIFT;
1024 	slot = gfn_to_memslot(kvm, gfn);
1025 	hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
1026 
1027 	if (kvm_is_error_hva(hva))
1028 		return PGM_ADDRESSING;
1029 	/*
1030 	 * Check if it's a ro memslot, even tho that can't occur (they're unsupported).
1031 	 * Don't try to actually handle that case.
1032 	 */
1033 	if (!writable && mode == GACC_STORE)
1034 		return -EOPNOTSUPP;
1035 	hva += offset_in_page(gpa);
1036 	if (mode == GACC_STORE)
1037 		rc = copy_to_user_key((void __user *)hva, data, len, access_key);
1038 	else
1039 		rc = copy_from_user_key(data, (void __user *)hva, len, access_key);
1040 	if (rc)
1041 		return PGM_PROTECTION;
1042 	if (mode == GACC_STORE)
1043 		mark_page_dirty_in_slot(kvm, slot, gfn);
1044 	return 0;
1045 }
1046 
access_guest_abs_with_key(struct kvm * kvm,gpa_t gpa,void * data,unsigned long len,enum gacc_mode mode,u8 access_key)1047 int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data,
1048 			      unsigned long len, enum gacc_mode mode, u8 access_key)
1049 {
1050 	int offset = offset_in_page(gpa);
1051 	int fragment_len;
1052 	int rc;
1053 
1054 	while (min(PAGE_SIZE - offset, len) > 0) {
1055 		fragment_len = min(PAGE_SIZE - offset, len);
1056 		rc = access_guest_page_with_key(kvm, mode, gpa, data, fragment_len, access_key);
1057 		if (rc)
1058 			return rc;
1059 		offset = 0;
1060 		len -= fragment_len;
1061 		data += fragment_len;
1062 		gpa += fragment_len;
1063 	}
1064 	return 0;
1065 }
1066 
access_guest_with_key(struct kvm_vcpu * vcpu,unsigned long ga,u8 ar,void * data,unsigned long len,enum gacc_mode mode,u8 access_key)1067 int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
1068 			  void *data, unsigned long len, enum gacc_mode mode,
1069 			  u8 access_key)
1070 {
1071 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
1072 	unsigned long nr_pages, idx;
1073 	unsigned long gpa_array[2];
1074 	unsigned int fragment_len;
1075 	unsigned long *gpas;
1076 	enum prot_type prot;
1077 	int need_ipte_lock;
1078 	union asce asce;
1079 	bool try_storage_prot_override;
1080 	bool try_fetch_prot_override;
1081 	int rc;
1082 
1083 	if (!len)
1084 		return 0;
1085 	ga = kvm_s390_logical_to_effective(vcpu, ga);
1086 	rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode);
1087 	if (rc)
1088 		return rc;
1089 	nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
1090 	gpas = gpa_array;
1091 	if (nr_pages > ARRAY_SIZE(gpa_array))
1092 		gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
1093 	if (!gpas)
1094 		return -ENOMEM;
1095 	try_fetch_prot_override = fetch_prot_override_applicable(vcpu, mode, asce);
1096 	try_storage_prot_override = storage_prot_override_applicable(vcpu);
1097 	need_ipte_lock = psw_bits(*psw).dat && !asce.r;
1098 	if (need_ipte_lock)
1099 		ipte_lock(vcpu->kvm);
1100 	/*
1101 	 * Since we do the access further down ultimately via a move instruction
1102 	 * that does key checking and returns an error in case of a protection
1103 	 * violation, we don't need to do the check during address translation.
1104 	 * Skip it by passing access key 0, which matches any storage key,
1105 	 * obviating the need for any further checks. As a result the check is
1106 	 * handled entirely in hardware on access, we only need to take care to
1107 	 * forego key protection checking if fetch protection override applies or
1108 	 * retry with the special key 9 in case of storage protection override.
1109 	 */
1110 	rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode, 0);
1111 	if (rc)
1112 		goto out_unlock;
1113 	for (idx = 0; idx < nr_pages; idx++) {
1114 		fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
1115 		if (try_fetch_prot_override && fetch_prot_override_applies(ga, fragment_len)) {
1116 			rc = access_guest_page(vcpu->kvm, mode, gpas[idx],
1117 					       data, fragment_len);
1118 		} else {
1119 			rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
1120 							data, fragment_len, access_key);
1121 		}
1122 		if (rc == PGM_PROTECTION && try_storage_prot_override)
1123 			rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
1124 							data, fragment_len, PAGE_SPO_ACC);
1125 		if (rc)
1126 			break;
1127 		len -= fragment_len;
1128 		data += fragment_len;
1129 		ga = kvm_s390_logical_to_effective(vcpu, ga + fragment_len);
1130 	}
1131 	if (rc > 0) {
1132 		bool terminate = (mode == GACC_STORE) && (idx > 0);
1133 
1134 		if (rc == PGM_PROTECTION)
1135 			prot = PROT_TYPE_KEYC;
1136 		else
1137 			prot = PROT_NONE;
1138 		rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate);
1139 	}
1140 out_unlock:
1141 	if (need_ipte_lock)
1142 		ipte_unlock(vcpu->kvm);
1143 	if (nr_pages > ARRAY_SIZE(gpa_array))
1144 		vfree(gpas);
1145 	return rc;
1146 }
1147 
access_guest_real(struct kvm_vcpu * vcpu,unsigned long gra,void * data,unsigned long len,enum gacc_mode mode)1148 int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
1149 		      void *data, unsigned long len, enum gacc_mode mode)
1150 {
1151 	unsigned int fragment_len;
1152 	unsigned long gpa;
1153 	int rc = 0;
1154 
1155 	while (len && !rc) {
1156 		gpa = kvm_s390_real_to_abs(vcpu, gra);
1157 		fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len);
1158 		rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len);
1159 		len -= fragment_len;
1160 		gra += fragment_len;
1161 		data += fragment_len;
1162 	}
1163 	if (rc > 0)
1164 		vcpu->arch.pgm.code = rc;
1165 	return rc;
1166 }
1167 
1168 /**
1169  * cmpxchg_guest_abs_with_key() - Perform cmpxchg on guest absolute address.
1170  * @kvm: Virtual machine instance.
1171  * @gpa: Absolute guest address of the location to be changed.
1172  * @len: Operand length of the cmpxchg, required: 1 <= len <= 16. Providing a
1173  *       non power of two will result in failure.
1174  * @old_addr: Pointer to old value. If the location at @gpa contains this value,
1175  *            the exchange will succeed. After calling cmpxchg_guest_abs_with_key()
1176  *            *@old_addr contains the value at @gpa before the attempt to
1177  *            exchange the value.
1178  * @new: The value to place at @gpa.
1179  * @access_key: The access key to use for the guest access.
1180  * @success: output value indicating if an exchange occurred.
1181  *
1182  * Atomically exchange the value at @gpa by @new, if it contains *@old.
1183  * Honors storage keys.
1184  *
1185  * Return: * 0: successful exchange
1186  *         * >0: a program interruption code indicating the reason cmpxchg could
1187  *               not be attempted
1188  *         * -EINVAL: address misaligned or len not power of two
1189  *         * -EAGAIN: transient failure (len 1 or 2)
1190  *         * -EOPNOTSUPP: read-only memslot (should never occur)
1191  */
cmpxchg_guest_abs_with_key(struct kvm * kvm,gpa_t gpa,int len,__uint128_t * old_addr,__uint128_t new,u8 access_key,bool * success)1192 int cmpxchg_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, int len,
1193 			       __uint128_t *old_addr, __uint128_t new,
1194 			       u8 access_key, bool *success)
1195 {
1196 	gfn_t gfn = gpa_to_gfn(gpa);
1197 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1198 	bool writable;
1199 	hva_t hva;
1200 	int ret;
1201 
1202 	if (!IS_ALIGNED(gpa, len))
1203 		return -EINVAL;
1204 
1205 	hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
1206 	if (kvm_is_error_hva(hva))
1207 		return PGM_ADDRESSING;
1208 	/*
1209 	 * Check if it's a read-only memslot, even though that cannot occur
1210 	 * since those are unsupported.
1211 	 * Don't try to actually handle that case.
1212 	 */
1213 	if (!writable)
1214 		return -EOPNOTSUPP;
1215 
1216 	hva += offset_in_page(gpa);
1217 	/*
1218 	 * The cmpxchg_user_key macro depends on the type of "old", so we need
1219 	 * a case for each valid length and get some code duplication as long
1220 	 * as we don't introduce a new macro.
1221 	 */
1222 	switch (len) {
1223 	case 1: {
1224 		u8 old;
1225 
1226 		ret = cmpxchg_user_key((u8 __user *)hva, &old, *old_addr, new, access_key);
1227 		*success = !ret && old == *old_addr;
1228 		*old_addr = old;
1229 		break;
1230 	}
1231 	case 2: {
1232 		u16 old;
1233 
1234 		ret = cmpxchg_user_key((u16 __user *)hva, &old, *old_addr, new, access_key);
1235 		*success = !ret && old == *old_addr;
1236 		*old_addr = old;
1237 		break;
1238 	}
1239 	case 4: {
1240 		u32 old;
1241 
1242 		ret = cmpxchg_user_key((u32 __user *)hva, &old, *old_addr, new, access_key);
1243 		*success = !ret && old == *old_addr;
1244 		*old_addr = old;
1245 		break;
1246 	}
1247 	case 8: {
1248 		u64 old;
1249 
1250 		ret = cmpxchg_user_key((u64 __user *)hva, &old, *old_addr, new, access_key);
1251 		*success = !ret && old == *old_addr;
1252 		*old_addr = old;
1253 		break;
1254 	}
1255 	case 16: {
1256 		__uint128_t old;
1257 
1258 		ret = cmpxchg_user_key((__uint128_t __user *)hva, &old, *old_addr, new, access_key);
1259 		*success = !ret && old == *old_addr;
1260 		*old_addr = old;
1261 		break;
1262 	}
1263 	default:
1264 		return -EINVAL;
1265 	}
1266 	if (*success)
1267 		mark_page_dirty_in_slot(kvm, slot, gfn);
1268 	/*
1269 	 * Assume that the fault is caused by protection, either key protection
1270 	 * or user page write protection.
1271 	 */
1272 	if (ret == -EFAULT)
1273 		ret = PGM_PROTECTION;
1274 	return ret;
1275 }
1276 
1277 /**
1278  * guest_translate_address_with_key - translate guest logical into guest absolute address
1279  * @vcpu: virtual cpu
1280  * @gva: Guest virtual address
1281  * @ar: Access register
1282  * @gpa: Guest physical address
1283  * @mode: Translation access mode
1284  * @access_key: access key to mach the storage key with
1285  *
1286  * Parameter semantics are the same as the ones from guest_translate.
1287  * The memory contents at the guest address are not changed.
1288  *
1289  * Note: The IPTE lock is not taken during this function, so the caller
1290  * has to take care of this.
1291  */
guest_translate_address_with_key(struct kvm_vcpu * vcpu,unsigned long gva,u8 ar,unsigned long * gpa,enum gacc_mode mode,u8 access_key)1292 int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
1293 				     unsigned long *gpa, enum gacc_mode mode,
1294 				     u8 access_key)
1295 {
1296 	union asce asce;
1297 	int rc;
1298 
1299 	gva = kvm_s390_logical_to_effective(vcpu, gva);
1300 	rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
1301 	if (rc)
1302 		return rc;
1303 	return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode,
1304 				   access_key);
1305 }
1306 
1307 /**
1308  * check_gva_range - test a range of guest virtual addresses for accessibility
1309  * @vcpu: virtual cpu
1310  * @gva: Guest virtual address
1311  * @ar: Access register
1312  * @length: Length of test range
1313  * @mode: Translation access mode
1314  * @access_key: access key to mach the storage keys with
1315  */
check_gva_range(struct kvm_vcpu * vcpu,unsigned long gva,u8 ar,unsigned long length,enum gacc_mode mode,u8 access_key)1316 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
1317 		    unsigned long length, enum gacc_mode mode, u8 access_key)
1318 {
1319 	union asce asce;
1320 	int rc = 0;
1321 
1322 	rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
1323 	if (rc)
1324 		return rc;
1325 	ipte_lock(vcpu->kvm);
1326 	rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode,
1327 				 access_key);
1328 	ipte_unlock(vcpu->kvm);
1329 
1330 	return rc;
1331 }
1332 
1333 /**
1334  * check_gpa_range - test a range of guest physical addresses for accessibility
1335  * @kvm: virtual machine instance
1336  * @gpa: guest physical address
1337  * @length: length of test range
1338  * @mode: access mode to test, relevant for storage keys
1339  * @access_key: access key to mach the storage keys with
1340  */
check_gpa_range(struct kvm * kvm,unsigned long gpa,unsigned long length,enum gacc_mode mode,u8 access_key)1341 int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length,
1342 		    enum gacc_mode mode, u8 access_key)
1343 {
1344 	unsigned int fragment_len;
1345 	int rc = 0;
1346 
1347 	while (length && !rc) {
1348 		fragment_len = min(PAGE_SIZE - offset_in_page(gpa), length);
1349 		rc = vm_check_access_key(kvm, access_key, mode, gpa);
1350 		length -= fragment_len;
1351 		gpa += fragment_len;
1352 	}
1353 	return rc;
1354 }
1355 
1356 /**
1357  * kvm_s390_check_low_addr_prot_real - check for low-address protection
1358  * @vcpu: virtual cpu
1359  * @gra: Guest real address
1360  *
1361  * Checks whether an address is subject to low-address protection and set
1362  * up vcpu->arch.pgm accordingly if necessary.
1363  *
1364  * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
1365  */
kvm_s390_check_low_addr_prot_real(struct kvm_vcpu * vcpu,unsigned long gra)1366 int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
1367 {
1368 	union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
1369 
1370 	if (!ctlreg0.lap || !is_low_address(gra))
1371 		return 0;
1372 	return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);
1373 }
1374 
1375 /**
1376  * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
1377  * @sg: pointer to the shadow guest address space structure
1378  * @saddr: faulting address in the shadow gmap
1379  * @pgt: pointer to the beginning of the page table for the given address if
1380  *	 successful (return value 0), or to the first invalid DAT entry in
1381  *	 case of exceptions (return value > 0)
1382  * @dat_protection: referenced memory is write protected
1383  * @fake: pgt references contiguous guest memory block, not a pgtable
1384  */
kvm_s390_shadow_tables(struct gmap * sg,unsigned long saddr,unsigned long * pgt,int * dat_protection,int * fake)1385 static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
1386 				  unsigned long *pgt, int *dat_protection,
1387 				  int *fake)
1388 {
1389 	struct kvm *kvm;
1390 	struct gmap *parent;
1391 	union asce asce;
1392 	union vaddress vaddr;
1393 	unsigned long ptr;
1394 	int rc;
1395 
1396 	*fake = 0;
1397 	*dat_protection = 0;
1398 	kvm = sg->private;
1399 	parent = sg->parent;
1400 	vaddr.addr = saddr;
1401 	asce.val = sg->orig_asce;
1402 	ptr = asce.origin * PAGE_SIZE;
1403 	if (asce.r) {
1404 		*fake = 1;
1405 		ptr = 0;
1406 		asce.dt = ASCE_TYPE_REGION1;
1407 	}
1408 	switch (asce.dt) {
1409 	case ASCE_TYPE_REGION1:
1410 		if (vaddr.rfx01 > asce.tl && !*fake)
1411 			return PGM_REGION_FIRST_TRANS;
1412 		break;
1413 	case ASCE_TYPE_REGION2:
1414 		if (vaddr.rfx)
1415 			return PGM_ASCE_TYPE;
1416 		if (vaddr.rsx01 > asce.tl)
1417 			return PGM_REGION_SECOND_TRANS;
1418 		break;
1419 	case ASCE_TYPE_REGION3:
1420 		if (vaddr.rfx || vaddr.rsx)
1421 			return PGM_ASCE_TYPE;
1422 		if (vaddr.rtx01 > asce.tl)
1423 			return PGM_REGION_THIRD_TRANS;
1424 		break;
1425 	case ASCE_TYPE_SEGMENT:
1426 		if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
1427 			return PGM_ASCE_TYPE;
1428 		if (vaddr.sx01 > asce.tl)
1429 			return PGM_SEGMENT_TRANSLATION;
1430 		break;
1431 	}
1432 
1433 	switch (asce.dt) {
1434 	case ASCE_TYPE_REGION1: {
1435 		union region1_table_entry rfte;
1436 
1437 		if (*fake) {
1438 			ptr += vaddr.rfx * _REGION1_SIZE;
1439 			rfte.val = ptr;
1440 			goto shadow_r2t;
1441 		}
1442 		*pgt = ptr + vaddr.rfx * 8;
1443 		rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
1444 		if (rc)
1445 			return rc;
1446 		if (rfte.i)
1447 			return PGM_REGION_FIRST_TRANS;
1448 		if (rfte.tt != TABLE_TYPE_REGION1)
1449 			return PGM_TRANSLATION_SPEC;
1450 		if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
1451 			return PGM_REGION_SECOND_TRANS;
1452 		if (sg->edat_level >= 1)
1453 			*dat_protection |= rfte.p;
1454 		ptr = rfte.rto * PAGE_SIZE;
1455 shadow_r2t:
1456 		rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
1457 		if (rc)
1458 			return rc;
1459 		kvm->stat.gmap_shadow_r1_entry++;
1460 	}
1461 		fallthrough;
1462 	case ASCE_TYPE_REGION2: {
1463 		union region2_table_entry rste;
1464 
1465 		if (*fake) {
1466 			ptr += vaddr.rsx * _REGION2_SIZE;
1467 			rste.val = ptr;
1468 			goto shadow_r3t;
1469 		}
1470 		*pgt = ptr + vaddr.rsx * 8;
1471 		rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
1472 		if (rc)
1473 			return rc;
1474 		if (rste.i)
1475 			return PGM_REGION_SECOND_TRANS;
1476 		if (rste.tt != TABLE_TYPE_REGION2)
1477 			return PGM_TRANSLATION_SPEC;
1478 		if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
1479 			return PGM_REGION_THIRD_TRANS;
1480 		if (sg->edat_level >= 1)
1481 			*dat_protection |= rste.p;
1482 		ptr = rste.rto * PAGE_SIZE;
1483 shadow_r3t:
1484 		rste.p |= *dat_protection;
1485 		rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
1486 		if (rc)
1487 			return rc;
1488 		kvm->stat.gmap_shadow_r2_entry++;
1489 	}
1490 		fallthrough;
1491 	case ASCE_TYPE_REGION3: {
1492 		union region3_table_entry rtte;
1493 
1494 		if (*fake) {
1495 			ptr += vaddr.rtx * _REGION3_SIZE;
1496 			rtte.val = ptr;
1497 			goto shadow_sgt;
1498 		}
1499 		*pgt = ptr + vaddr.rtx * 8;
1500 		rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
1501 		if (rc)
1502 			return rc;
1503 		if (rtte.i)
1504 			return PGM_REGION_THIRD_TRANS;
1505 		if (rtte.tt != TABLE_TYPE_REGION3)
1506 			return PGM_TRANSLATION_SPEC;
1507 		if (rtte.cr && asce.p && sg->edat_level >= 2)
1508 			return PGM_TRANSLATION_SPEC;
1509 		if (rtte.fc && sg->edat_level >= 2) {
1510 			*dat_protection |= rtte.fc0.p;
1511 			*fake = 1;
1512 			ptr = rtte.fc1.rfaa * _REGION3_SIZE;
1513 			rtte.val = ptr;
1514 			goto shadow_sgt;
1515 		}
1516 		if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl)
1517 			return PGM_SEGMENT_TRANSLATION;
1518 		if (sg->edat_level >= 1)
1519 			*dat_protection |= rtte.fc0.p;
1520 		ptr = rtte.fc0.sto * PAGE_SIZE;
1521 shadow_sgt:
1522 		rtte.fc0.p |= *dat_protection;
1523 		rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
1524 		if (rc)
1525 			return rc;
1526 		kvm->stat.gmap_shadow_r3_entry++;
1527 	}
1528 		fallthrough;
1529 	case ASCE_TYPE_SEGMENT: {
1530 		union segment_table_entry ste;
1531 
1532 		if (*fake) {
1533 			ptr += vaddr.sx * _SEGMENT_SIZE;
1534 			ste.val = ptr;
1535 			goto shadow_pgt;
1536 		}
1537 		*pgt = ptr + vaddr.sx * 8;
1538 		rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
1539 		if (rc)
1540 			return rc;
1541 		if (ste.i)
1542 			return PGM_SEGMENT_TRANSLATION;
1543 		if (ste.tt != TABLE_TYPE_SEGMENT)
1544 			return PGM_TRANSLATION_SPEC;
1545 		if (ste.cs && asce.p)
1546 			return PGM_TRANSLATION_SPEC;
1547 		*dat_protection |= ste.fc0.p;
1548 		if (ste.fc && sg->edat_level >= 1) {
1549 			*fake = 1;
1550 			ptr = ste.fc1.sfaa * _SEGMENT_SIZE;
1551 			ste.val = ptr;
1552 			goto shadow_pgt;
1553 		}
1554 		ptr = ste.fc0.pto * (PAGE_SIZE / 2);
1555 shadow_pgt:
1556 		ste.fc0.p |= *dat_protection;
1557 		rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
1558 		if (rc)
1559 			return rc;
1560 		kvm->stat.gmap_shadow_sg_entry++;
1561 	}
1562 	}
1563 	/* Return the parent address of the page table */
1564 	*pgt = ptr;
1565 	return 0;
1566 }
1567 
1568 /**
1569  * kvm_s390_shadow_fault - handle fault on a shadow page table
1570  * @vcpu: virtual cpu
1571  * @sg: pointer to the shadow guest address space structure
1572  * @saddr: faulting address in the shadow gmap
1573  * @datptr: will contain the address of the faulting DAT table entry, or of
1574  *	    the valid leaf, plus some flags
1575  *
1576  * Returns: - 0 if the shadow fault was successfully resolved
1577  *	    - > 0 (pgm exception code) on exceptions while faulting
1578  *	    - -EAGAIN if the caller can retry immediately
1579  *	    - -EFAULT when accessing invalid guest addresses
1580  *	    - -ENOMEM if out of memory
1581  */
kvm_s390_shadow_fault(struct kvm_vcpu * vcpu,struct gmap * sg,unsigned long saddr,unsigned long * datptr)1582 int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
1583 			  unsigned long saddr, unsigned long *datptr)
1584 {
1585 	union vaddress vaddr;
1586 	union page_table_entry pte;
1587 	unsigned long pgt = 0;
1588 	int dat_protection, fake;
1589 	int rc;
1590 
1591 	mmap_read_lock(sg->mm);
1592 	/*
1593 	 * We don't want any guest-2 tables to change - so the parent
1594 	 * tables/pointers we read stay valid - unshadowing is however
1595 	 * always possible - only guest_table_lock protects us.
1596 	 */
1597 	ipte_lock(vcpu->kvm);
1598 
1599 	rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
1600 	if (rc)
1601 		rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
1602 					    &fake);
1603 
1604 	vaddr.addr = saddr;
1605 	if (fake) {
1606 		pte.val = pgt + vaddr.px * PAGE_SIZE;
1607 		goto shadow_page;
1608 	}
1609 
1610 	switch (rc) {
1611 	case PGM_SEGMENT_TRANSLATION:
1612 	case PGM_REGION_THIRD_TRANS:
1613 	case PGM_REGION_SECOND_TRANS:
1614 	case PGM_REGION_FIRST_TRANS:
1615 		pgt |= PEI_NOT_PTE;
1616 		break;
1617 	case 0:
1618 		pgt += vaddr.px * 8;
1619 		rc = gmap_read_table(sg->parent, pgt, &pte.val);
1620 	}
1621 	if (datptr)
1622 		*datptr = pgt | dat_protection * PEI_DAT_PROT;
1623 	if (!rc && pte.i)
1624 		rc = PGM_PAGE_TRANSLATION;
1625 	if (!rc && pte.z)
1626 		rc = PGM_TRANSLATION_SPEC;
1627 shadow_page:
1628 	pte.p |= dat_protection;
1629 	if (!rc)
1630 		rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
1631 	vcpu->kvm->stat.gmap_shadow_pg_entry++;
1632 	ipte_unlock(vcpu->kvm);
1633 	mmap_read_unlock(sg->mm);
1634 	return rc;
1635 }
1636