1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * guest access functions 4 * 5 * Copyright IBM Corp. 2014 6 * 7 */ 8 9 #include <linux/vmalloc.h> 10 #include <linux/mm_types.h> 11 #include <linux/err.h> 12 #include <linux/pgtable.h> 13 #include <linux/bitfield.h> 14 15 #include <asm/gmap.h> 16 #include "kvm-s390.h" 17 #include "gaccess.h" 18 #include <asm/switch_to.h> 19 20 union asce { 21 unsigned long val; 22 struct { 23 unsigned long origin : 52; /* Region- or Segment-Table Origin */ 24 unsigned long : 2; 25 unsigned long g : 1; /* Subspace Group Control */ 26 unsigned long p : 1; /* Private Space Control */ 27 unsigned long s : 1; /* Storage-Alteration-Event Control */ 28 unsigned long x : 1; /* Space-Switch-Event Control */ 29 unsigned long r : 1; /* Real-Space Control */ 30 unsigned long : 1; 31 unsigned long dt : 2; /* Designation-Type Control */ 32 unsigned long tl : 2; /* Region- or Segment-Table Length */ 33 }; 34 }; 35 36 enum { 37 ASCE_TYPE_SEGMENT = 0, 38 ASCE_TYPE_REGION3 = 1, 39 ASCE_TYPE_REGION2 = 2, 40 ASCE_TYPE_REGION1 = 3 41 }; 42 43 union region1_table_entry { 44 unsigned long val; 45 struct { 46 unsigned long rto: 52;/* Region-Table Origin */ 47 unsigned long : 2; 48 unsigned long p : 1; /* DAT-Protection Bit */ 49 unsigned long : 1; 50 unsigned long tf : 2; /* Region-Second-Table Offset */ 51 unsigned long i : 1; /* Region-Invalid Bit */ 52 unsigned long : 1; 53 unsigned long tt : 2; /* Table-Type Bits */ 54 unsigned long tl : 2; /* Region-Second-Table Length */ 55 }; 56 }; 57 58 union region2_table_entry { 59 unsigned long val; 60 struct { 61 unsigned long rto: 52;/* Region-Table Origin */ 62 unsigned long : 2; 63 unsigned long p : 1; /* DAT-Protection Bit */ 64 unsigned long : 1; 65 unsigned long tf : 2; /* Region-Third-Table Offset */ 66 unsigned long i : 1; /* Region-Invalid Bit */ 67 unsigned long : 1; 68 unsigned long tt : 2; /* Table-Type Bits */ 69 unsigned long tl : 2; /* Region-Third-Table Length */ 70 }; 71 }; 72 73 struct region3_table_entry_fc0 { 74 unsigned long sto: 52;/* Segment-Table Origin */ 75 unsigned long : 1; 76 unsigned long fc : 1; /* Format-Control */ 77 unsigned long p : 1; /* DAT-Protection Bit */ 78 unsigned long : 1; 79 unsigned long tf : 2; /* Segment-Table Offset */ 80 unsigned long i : 1; /* Region-Invalid Bit */ 81 unsigned long cr : 1; /* Common-Region Bit */ 82 unsigned long tt : 2; /* Table-Type Bits */ 83 unsigned long tl : 2; /* Segment-Table Length */ 84 }; 85 86 struct region3_table_entry_fc1 { 87 unsigned long rfaa : 33; /* Region-Frame Absolute Address */ 88 unsigned long : 14; 89 unsigned long av : 1; /* ACCF-Validity Control */ 90 unsigned long acc: 4; /* Access-Control Bits */ 91 unsigned long f : 1; /* Fetch-Protection Bit */ 92 unsigned long fc : 1; /* Format-Control */ 93 unsigned long p : 1; /* DAT-Protection Bit */ 94 unsigned long iep: 1; /* Instruction-Execution-Protection */ 95 unsigned long : 2; 96 unsigned long i : 1; /* Region-Invalid Bit */ 97 unsigned long cr : 1; /* Common-Region Bit */ 98 unsigned long tt : 2; /* Table-Type Bits */ 99 unsigned long : 2; 100 }; 101 102 union region3_table_entry { 103 unsigned long val; 104 struct region3_table_entry_fc0 fc0; 105 struct region3_table_entry_fc1 fc1; 106 struct { 107 unsigned long : 53; 108 unsigned long fc : 1; /* Format-Control */ 109 unsigned long : 4; 110 unsigned long i : 1; /* Region-Invalid Bit */ 111 unsigned long cr : 1; /* Common-Region Bit */ 112 unsigned long tt : 2; /* Table-Type Bits */ 113 unsigned long : 2; 114 }; 115 }; 116 117 struct segment_entry_fc0 { 118 unsigned long pto: 53;/* Page-Table Origin */ 119 unsigned long fc : 1; /* Format-Control */ 120 unsigned long p : 1; /* DAT-Protection Bit */ 121 unsigned long : 3; 122 unsigned long i : 1; /* Segment-Invalid Bit */ 123 unsigned long cs : 1; /* Common-Segment Bit */ 124 unsigned long tt : 2; /* Table-Type Bits */ 125 unsigned long : 2; 126 }; 127 128 struct segment_entry_fc1 { 129 unsigned long sfaa : 44; /* Segment-Frame Absolute Address */ 130 unsigned long : 3; 131 unsigned long av : 1; /* ACCF-Validity Control */ 132 unsigned long acc: 4; /* Access-Control Bits */ 133 unsigned long f : 1; /* Fetch-Protection Bit */ 134 unsigned long fc : 1; /* Format-Control */ 135 unsigned long p : 1; /* DAT-Protection Bit */ 136 unsigned long iep: 1; /* Instruction-Execution-Protection */ 137 unsigned long : 2; 138 unsigned long i : 1; /* Segment-Invalid Bit */ 139 unsigned long cs : 1; /* Common-Segment Bit */ 140 unsigned long tt : 2; /* Table-Type Bits */ 141 unsigned long : 2; 142 }; 143 144 union segment_table_entry { 145 unsigned long val; 146 struct segment_entry_fc0 fc0; 147 struct segment_entry_fc1 fc1; 148 struct { 149 unsigned long : 53; 150 unsigned long fc : 1; /* Format-Control */ 151 unsigned long : 4; 152 unsigned long i : 1; /* Segment-Invalid Bit */ 153 unsigned long cs : 1; /* Common-Segment Bit */ 154 unsigned long tt : 2; /* Table-Type Bits */ 155 unsigned long : 2; 156 }; 157 }; 158 159 enum { 160 TABLE_TYPE_SEGMENT = 0, 161 TABLE_TYPE_REGION3 = 1, 162 TABLE_TYPE_REGION2 = 2, 163 TABLE_TYPE_REGION1 = 3 164 }; 165 166 union page_table_entry { 167 unsigned long val; 168 struct { 169 unsigned long pfra : 52; /* Page-Frame Real Address */ 170 unsigned long z : 1; /* Zero Bit */ 171 unsigned long i : 1; /* Page-Invalid Bit */ 172 unsigned long p : 1; /* DAT-Protection Bit */ 173 unsigned long iep: 1; /* Instruction-Execution-Protection */ 174 unsigned long : 8; 175 }; 176 }; 177 178 /* 179 * vaddress union in order to easily decode a virtual address into its 180 * region first index, region second index etc. parts. 181 */ 182 union vaddress { 183 unsigned long addr; 184 struct { 185 unsigned long rfx : 11; 186 unsigned long rsx : 11; 187 unsigned long rtx : 11; 188 unsigned long sx : 11; 189 unsigned long px : 8; 190 unsigned long bx : 12; 191 }; 192 struct { 193 unsigned long rfx01 : 2; 194 unsigned long : 9; 195 unsigned long rsx01 : 2; 196 unsigned long : 9; 197 unsigned long rtx01 : 2; 198 unsigned long : 9; 199 unsigned long sx01 : 2; 200 unsigned long : 29; 201 }; 202 }; 203 204 /* 205 * raddress union which will contain the result (real or absolute address) 206 * after a page table walk. The rfaa, sfaa and pfra members are used to 207 * simply assign them the value of a region, segment or page table entry. 208 */ 209 union raddress { 210 unsigned long addr; 211 unsigned long rfaa : 33; /* Region-Frame Absolute Address */ 212 unsigned long sfaa : 44; /* Segment-Frame Absolute Address */ 213 unsigned long pfra : 52; /* Page-Frame Real Address */ 214 }; 215 216 union alet { 217 u32 val; 218 struct { 219 u32 reserved : 7; 220 u32 p : 1; 221 u32 alesn : 8; 222 u32 alen : 16; 223 }; 224 }; 225 226 union ald { 227 u32 val; 228 struct { 229 u32 : 1; 230 u32 alo : 24; 231 u32 all : 7; 232 }; 233 }; 234 235 struct ale { 236 unsigned long i : 1; /* ALEN-Invalid Bit */ 237 unsigned long : 5; 238 unsigned long fo : 1; /* Fetch-Only Bit */ 239 unsigned long p : 1; /* Private Bit */ 240 unsigned long alesn : 8; /* Access-List-Entry Sequence Number */ 241 unsigned long aleax : 16; /* Access-List-Entry Authorization Index */ 242 unsigned long : 32; 243 unsigned long : 1; 244 unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */ 245 unsigned long : 6; 246 unsigned long astesn : 32; /* ASTE Sequence Number */ 247 }; 248 249 struct aste { 250 unsigned long i : 1; /* ASX-Invalid Bit */ 251 unsigned long ato : 29; /* Authority-Table Origin */ 252 unsigned long : 1; 253 unsigned long b : 1; /* Base-Space Bit */ 254 unsigned long ax : 16; /* Authorization Index */ 255 unsigned long atl : 12; /* Authority-Table Length */ 256 unsigned long : 2; 257 unsigned long ca : 1; /* Controlled-ASN Bit */ 258 unsigned long ra : 1; /* Reusable-ASN Bit */ 259 unsigned long asce : 64; /* Address-Space-Control Element */ 260 unsigned long ald : 32; 261 unsigned long astesn : 32; 262 /* .. more fields there */ 263 }; 264 265 int ipte_lock_held(struct kvm_vcpu *vcpu) 266 { 267 if (vcpu->arch.sie_block->eca & ECA_SII) { 268 int rc; 269 270 read_lock(&vcpu->kvm->arch.sca_lock); 271 rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0; 272 read_unlock(&vcpu->kvm->arch.sca_lock); 273 return rc; 274 } 275 return vcpu->kvm->arch.ipte_lock_count != 0; 276 } 277 278 static void ipte_lock_simple(struct kvm_vcpu *vcpu) 279 { 280 union ipte_control old, new, *ic; 281 282 mutex_lock(&vcpu->kvm->arch.ipte_mutex); 283 vcpu->kvm->arch.ipte_lock_count++; 284 if (vcpu->kvm->arch.ipte_lock_count > 1) 285 goto out; 286 retry: 287 read_lock(&vcpu->kvm->arch.sca_lock); 288 ic = kvm_s390_get_ipte_control(vcpu->kvm); 289 do { 290 old = READ_ONCE(*ic); 291 if (old.k) { 292 read_unlock(&vcpu->kvm->arch.sca_lock); 293 cond_resched(); 294 goto retry; 295 } 296 new = old; 297 new.k = 1; 298 } while (cmpxchg(&ic->val, old.val, new.val) != old.val); 299 read_unlock(&vcpu->kvm->arch.sca_lock); 300 out: 301 mutex_unlock(&vcpu->kvm->arch.ipte_mutex); 302 } 303 304 static void ipte_unlock_simple(struct kvm_vcpu *vcpu) 305 { 306 union ipte_control old, new, *ic; 307 308 mutex_lock(&vcpu->kvm->arch.ipte_mutex); 309 vcpu->kvm->arch.ipte_lock_count--; 310 if (vcpu->kvm->arch.ipte_lock_count) 311 goto out; 312 read_lock(&vcpu->kvm->arch.sca_lock); 313 ic = kvm_s390_get_ipte_control(vcpu->kvm); 314 do { 315 old = READ_ONCE(*ic); 316 new = old; 317 new.k = 0; 318 } while (cmpxchg(&ic->val, old.val, new.val) != old.val); 319 read_unlock(&vcpu->kvm->arch.sca_lock); 320 wake_up(&vcpu->kvm->arch.ipte_wq); 321 out: 322 mutex_unlock(&vcpu->kvm->arch.ipte_mutex); 323 } 324 325 static void ipte_lock_siif(struct kvm_vcpu *vcpu) 326 { 327 union ipte_control old, new, *ic; 328 329 retry: 330 read_lock(&vcpu->kvm->arch.sca_lock); 331 ic = kvm_s390_get_ipte_control(vcpu->kvm); 332 do { 333 old = READ_ONCE(*ic); 334 if (old.kg) { 335 read_unlock(&vcpu->kvm->arch.sca_lock); 336 cond_resched(); 337 goto retry; 338 } 339 new = old; 340 new.k = 1; 341 new.kh++; 342 } while (cmpxchg(&ic->val, old.val, new.val) != old.val); 343 read_unlock(&vcpu->kvm->arch.sca_lock); 344 } 345 346 static void ipte_unlock_siif(struct kvm_vcpu *vcpu) 347 { 348 union ipte_control old, new, *ic; 349 350 read_lock(&vcpu->kvm->arch.sca_lock); 351 ic = kvm_s390_get_ipte_control(vcpu->kvm); 352 do { 353 old = READ_ONCE(*ic); 354 new = old; 355 new.kh--; 356 if (!new.kh) 357 new.k = 0; 358 } while (cmpxchg(&ic->val, old.val, new.val) != old.val); 359 read_unlock(&vcpu->kvm->arch.sca_lock); 360 if (!new.kh) 361 wake_up(&vcpu->kvm->arch.ipte_wq); 362 } 363 364 void ipte_lock(struct kvm_vcpu *vcpu) 365 { 366 if (vcpu->arch.sie_block->eca & ECA_SII) 367 ipte_lock_siif(vcpu); 368 else 369 ipte_lock_simple(vcpu); 370 } 371 372 void ipte_unlock(struct kvm_vcpu *vcpu) 373 { 374 if (vcpu->arch.sie_block->eca & ECA_SII) 375 ipte_unlock_siif(vcpu); 376 else 377 ipte_unlock_simple(vcpu); 378 } 379 380 static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar, 381 enum gacc_mode mode) 382 { 383 union alet alet; 384 struct ale ale; 385 struct aste aste; 386 unsigned long ald_addr, authority_table_addr; 387 union ald ald; 388 int eax, rc; 389 u8 authority_table; 390 391 if (ar >= NUM_ACRS) 392 return -EINVAL; 393 394 save_access_regs(vcpu->run->s.regs.acrs); 395 alet.val = vcpu->run->s.regs.acrs[ar]; 396 397 if (ar == 0 || alet.val == 0) { 398 asce->val = vcpu->arch.sie_block->gcr[1]; 399 return 0; 400 } else if (alet.val == 1) { 401 asce->val = vcpu->arch.sie_block->gcr[7]; 402 return 0; 403 } 404 405 if (alet.reserved) 406 return PGM_ALET_SPECIFICATION; 407 408 if (alet.p) 409 ald_addr = vcpu->arch.sie_block->gcr[5]; 410 else 411 ald_addr = vcpu->arch.sie_block->gcr[2]; 412 ald_addr &= 0x7fffffc0; 413 414 rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald)); 415 if (rc) 416 return rc; 417 418 if (alet.alen / 8 > ald.all) 419 return PGM_ALEN_TRANSLATION; 420 421 if (0x7fffffff - ald.alo * 128 < alet.alen * 16) 422 return PGM_ADDRESSING; 423 424 rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale, 425 sizeof(struct ale)); 426 if (rc) 427 return rc; 428 429 if (ale.i == 1) 430 return PGM_ALEN_TRANSLATION; 431 if (ale.alesn != alet.alesn) 432 return PGM_ALE_SEQUENCE; 433 434 rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste)); 435 if (rc) 436 return rc; 437 438 if (aste.i) 439 return PGM_ASTE_VALIDITY; 440 if (aste.astesn != ale.astesn) 441 return PGM_ASTE_SEQUENCE; 442 443 if (ale.p == 1) { 444 eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff; 445 if (ale.aleax != eax) { 446 if (eax / 16 > aste.atl) 447 return PGM_EXTENDED_AUTHORITY; 448 449 authority_table_addr = aste.ato * 4 + eax / 4; 450 451 rc = read_guest_real(vcpu, authority_table_addr, 452 &authority_table, 453 sizeof(u8)); 454 if (rc) 455 return rc; 456 457 if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0) 458 return PGM_EXTENDED_AUTHORITY; 459 } 460 } 461 462 if (ale.fo == 1 && mode == GACC_STORE) 463 return PGM_PROTECTION; 464 465 asce->val = aste.asce; 466 return 0; 467 } 468 469 struct trans_exc_code_bits { 470 unsigned long addr : 52; /* Translation-exception Address */ 471 unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ 472 unsigned long : 2; 473 unsigned long b56 : 1; 474 unsigned long : 3; 475 unsigned long b60 : 1; 476 unsigned long b61 : 1; 477 unsigned long as : 2; /* ASCE Identifier */ 478 }; 479 480 enum { 481 FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ 482 FSI_STORE = 1, /* Exception was due to store operation */ 483 FSI_FETCH = 2 /* Exception was due to fetch operation */ 484 }; 485 486 enum prot_type { 487 PROT_TYPE_LA = 0, 488 PROT_TYPE_KEYC = 1, 489 PROT_TYPE_ALC = 2, 490 PROT_TYPE_DAT = 3, 491 PROT_TYPE_IEP = 4, 492 }; 493 494 static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, 495 u8 ar, enum gacc_mode mode, enum prot_type prot) 496 { 497 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; 498 struct trans_exc_code_bits *tec; 499 500 memset(pgm, 0, sizeof(*pgm)); 501 pgm->code = code; 502 tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; 503 504 switch (code) { 505 case PGM_PROTECTION: 506 switch (prot) { 507 case PROT_TYPE_IEP: 508 tec->b61 = 1; 509 fallthrough; 510 case PROT_TYPE_LA: 511 tec->b56 = 1; 512 break; 513 case PROT_TYPE_KEYC: 514 tec->b60 = 1; 515 break; 516 case PROT_TYPE_ALC: 517 tec->b60 = 1; 518 fallthrough; 519 case PROT_TYPE_DAT: 520 tec->b61 = 1; 521 break; 522 } 523 fallthrough; 524 case PGM_ASCE_TYPE: 525 case PGM_PAGE_TRANSLATION: 526 case PGM_REGION_FIRST_TRANS: 527 case PGM_REGION_SECOND_TRANS: 528 case PGM_REGION_THIRD_TRANS: 529 case PGM_SEGMENT_TRANSLATION: 530 /* 531 * op_access_id only applies to MOVE_PAGE -> set bit 61 532 * exc_access_id has to be set to 0 for some instructions. Both 533 * cases have to be handled by the caller. 534 */ 535 tec->addr = gva >> PAGE_SHIFT; 536 tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH; 537 tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as; 538 fallthrough; 539 case PGM_ALEN_TRANSLATION: 540 case PGM_ALE_SEQUENCE: 541 case PGM_ASTE_VALIDITY: 542 case PGM_ASTE_SEQUENCE: 543 case PGM_EXTENDED_AUTHORITY: 544 /* 545 * We can always store exc_access_id, as it is 546 * undefined for non-ar cases. It is undefined for 547 * most DAT protection exceptions. 548 */ 549 pgm->exc_access_id = ar; 550 break; 551 } 552 return code; 553 } 554 555 static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, 556 unsigned long ga, u8 ar, enum gacc_mode mode) 557 { 558 int rc; 559 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); 560 561 if (!psw.dat) { 562 asce->val = 0; 563 asce->r = 1; 564 return 0; 565 } 566 567 if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME)) 568 psw.as = PSW_BITS_AS_PRIMARY; 569 570 switch (psw.as) { 571 case PSW_BITS_AS_PRIMARY: 572 asce->val = vcpu->arch.sie_block->gcr[1]; 573 return 0; 574 case PSW_BITS_AS_SECONDARY: 575 asce->val = vcpu->arch.sie_block->gcr[7]; 576 return 0; 577 case PSW_BITS_AS_HOME: 578 asce->val = vcpu->arch.sie_block->gcr[13]; 579 return 0; 580 case PSW_BITS_AS_ACCREG: 581 rc = ar_translation(vcpu, asce, ar, mode); 582 if (rc > 0) 583 return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC); 584 return rc; 585 } 586 return 0; 587 } 588 589 static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) 590 { 591 return kvm_read_guest(kvm, gpa, val, sizeof(*val)); 592 } 593 594 /** 595 * guest_translate - translate a guest virtual into a guest absolute address 596 * @vcpu: virtual cpu 597 * @gva: guest virtual address 598 * @gpa: points to where guest physical (absolute) address should be stored 599 * @asce: effective asce 600 * @mode: indicates the access mode to be used 601 * @prot: returns the type for protection exceptions 602 * 603 * Translate a guest virtual address into a guest absolute address by means 604 * of dynamic address translation as specified by the architecture. 605 * If the resulting absolute address is not available in the configuration 606 * an addressing exception is indicated and @gpa will not be changed. 607 * 608 * Returns: - zero on success; @gpa contains the resulting absolute address 609 * - a negative value if guest access failed due to e.g. broken 610 * guest mapping 611 * - a positve value if an access exception happened. In this case 612 * the returned value is the program interruption code as defined 613 * by the architecture 614 */ 615 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, 616 unsigned long *gpa, const union asce asce, 617 enum gacc_mode mode, enum prot_type *prot) 618 { 619 union vaddress vaddr = {.addr = gva}; 620 union raddress raddr = {.addr = gva}; 621 union page_table_entry pte; 622 int dat_protection = 0; 623 int iep_protection = 0; 624 union ctlreg0 ctlreg0; 625 unsigned long ptr; 626 int edat1, edat2, iep; 627 628 ctlreg0.val = vcpu->arch.sie_block->gcr[0]; 629 edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); 630 edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); 631 iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130); 632 if (asce.r) 633 goto real_address; 634 ptr = asce.origin * PAGE_SIZE; 635 switch (asce.dt) { 636 case ASCE_TYPE_REGION1: 637 if (vaddr.rfx01 > asce.tl) 638 return PGM_REGION_FIRST_TRANS; 639 ptr += vaddr.rfx * 8; 640 break; 641 case ASCE_TYPE_REGION2: 642 if (vaddr.rfx) 643 return PGM_ASCE_TYPE; 644 if (vaddr.rsx01 > asce.tl) 645 return PGM_REGION_SECOND_TRANS; 646 ptr += vaddr.rsx * 8; 647 break; 648 case ASCE_TYPE_REGION3: 649 if (vaddr.rfx || vaddr.rsx) 650 return PGM_ASCE_TYPE; 651 if (vaddr.rtx01 > asce.tl) 652 return PGM_REGION_THIRD_TRANS; 653 ptr += vaddr.rtx * 8; 654 break; 655 case ASCE_TYPE_SEGMENT: 656 if (vaddr.rfx || vaddr.rsx || vaddr.rtx) 657 return PGM_ASCE_TYPE; 658 if (vaddr.sx01 > asce.tl) 659 return PGM_SEGMENT_TRANSLATION; 660 ptr += vaddr.sx * 8; 661 break; 662 } 663 switch (asce.dt) { 664 case ASCE_TYPE_REGION1: { 665 union region1_table_entry rfte; 666 667 if (kvm_is_error_gpa(vcpu->kvm, ptr)) 668 return PGM_ADDRESSING; 669 if (deref_table(vcpu->kvm, ptr, &rfte.val)) 670 return -EFAULT; 671 if (rfte.i) 672 return PGM_REGION_FIRST_TRANS; 673 if (rfte.tt != TABLE_TYPE_REGION1) 674 return PGM_TRANSLATION_SPEC; 675 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) 676 return PGM_REGION_SECOND_TRANS; 677 if (edat1) 678 dat_protection |= rfte.p; 679 ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8; 680 } 681 fallthrough; 682 case ASCE_TYPE_REGION2: { 683 union region2_table_entry rste; 684 685 if (kvm_is_error_gpa(vcpu->kvm, ptr)) 686 return PGM_ADDRESSING; 687 if (deref_table(vcpu->kvm, ptr, &rste.val)) 688 return -EFAULT; 689 if (rste.i) 690 return PGM_REGION_SECOND_TRANS; 691 if (rste.tt != TABLE_TYPE_REGION2) 692 return PGM_TRANSLATION_SPEC; 693 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) 694 return PGM_REGION_THIRD_TRANS; 695 if (edat1) 696 dat_protection |= rste.p; 697 ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8; 698 } 699 fallthrough; 700 case ASCE_TYPE_REGION3: { 701 union region3_table_entry rtte; 702 703 if (kvm_is_error_gpa(vcpu->kvm, ptr)) 704 return PGM_ADDRESSING; 705 if (deref_table(vcpu->kvm, ptr, &rtte.val)) 706 return -EFAULT; 707 if (rtte.i) 708 return PGM_REGION_THIRD_TRANS; 709 if (rtte.tt != TABLE_TYPE_REGION3) 710 return PGM_TRANSLATION_SPEC; 711 if (rtte.cr && asce.p && edat2) 712 return PGM_TRANSLATION_SPEC; 713 if (rtte.fc && edat2) { 714 dat_protection |= rtte.fc1.p; 715 iep_protection = rtte.fc1.iep; 716 raddr.rfaa = rtte.fc1.rfaa; 717 goto absolute_address; 718 } 719 if (vaddr.sx01 < rtte.fc0.tf) 720 return PGM_SEGMENT_TRANSLATION; 721 if (vaddr.sx01 > rtte.fc0.tl) 722 return PGM_SEGMENT_TRANSLATION; 723 if (edat1) 724 dat_protection |= rtte.fc0.p; 725 ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8; 726 } 727 fallthrough; 728 case ASCE_TYPE_SEGMENT: { 729 union segment_table_entry ste; 730 731 if (kvm_is_error_gpa(vcpu->kvm, ptr)) 732 return PGM_ADDRESSING; 733 if (deref_table(vcpu->kvm, ptr, &ste.val)) 734 return -EFAULT; 735 if (ste.i) 736 return PGM_SEGMENT_TRANSLATION; 737 if (ste.tt != TABLE_TYPE_SEGMENT) 738 return PGM_TRANSLATION_SPEC; 739 if (ste.cs && asce.p) 740 return PGM_TRANSLATION_SPEC; 741 if (ste.fc && edat1) { 742 dat_protection |= ste.fc1.p; 743 iep_protection = ste.fc1.iep; 744 raddr.sfaa = ste.fc1.sfaa; 745 goto absolute_address; 746 } 747 dat_protection |= ste.fc0.p; 748 ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8; 749 } 750 } 751 if (kvm_is_error_gpa(vcpu->kvm, ptr)) 752 return PGM_ADDRESSING; 753 if (deref_table(vcpu->kvm, ptr, &pte.val)) 754 return -EFAULT; 755 if (pte.i) 756 return PGM_PAGE_TRANSLATION; 757 if (pte.z) 758 return PGM_TRANSLATION_SPEC; 759 dat_protection |= pte.p; 760 iep_protection = pte.iep; 761 raddr.pfra = pte.pfra; 762 real_address: 763 raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr); 764 absolute_address: 765 if (mode == GACC_STORE && dat_protection) { 766 *prot = PROT_TYPE_DAT; 767 return PGM_PROTECTION; 768 } 769 if (mode == GACC_IFETCH && iep_protection && iep) { 770 *prot = PROT_TYPE_IEP; 771 return PGM_PROTECTION; 772 } 773 if (kvm_is_error_gpa(vcpu->kvm, raddr.addr)) 774 return PGM_ADDRESSING; 775 *gpa = raddr.addr; 776 return 0; 777 } 778 779 static inline int is_low_address(unsigned long ga) 780 { 781 /* Check for address ranges 0..511 and 4096..4607 */ 782 return (ga & ~0x11fful) == 0; 783 } 784 785 static int low_address_protection_enabled(struct kvm_vcpu *vcpu, 786 const union asce asce) 787 { 788 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; 789 psw_t *psw = &vcpu->arch.sie_block->gpsw; 790 791 if (!ctlreg0.lap) 792 return 0; 793 if (psw_bits(*psw).dat && asce.p) 794 return 0; 795 return 1; 796 } 797 798 static int vm_check_access_key(struct kvm *kvm, u8 access_key, 799 enum gacc_mode mode, gpa_t gpa) 800 { 801 u8 storage_key, access_control; 802 bool fetch_protected; 803 unsigned long hva; 804 int r; 805 806 if (access_key == 0) 807 return 0; 808 809 hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); 810 if (kvm_is_error_hva(hva)) 811 return PGM_ADDRESSING; 812 813 mmap_read_lock(current->mm); 814 r = get_guest_storage_key(current->mm, hva, &storage_key); 815 mmap_read_unlock(current->mm); 816 if (r) 817 return r; 818 access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key); 819 if (access_control == access_key) 820 return 0; 821 fetch_protected = storage_key & _PAGE_FP_BIT; 822 if ((mode == GACC_FETCH || mode == GACC_IFETCH) && !fetch_protected) 823 return 0; 824 return PGM_PROTECTION; 825 } 826 827 static bool fetch_prot_override_applicable(struct kvm_vcpu *vcpu, enum gacc_mode mode, 828 union asce asce) 829 { 830 psw_t *psw = &vcpu->arch.sie_block->gpsw; 831 unsigned long override; 832 833 if (mode == GACC_FETCH || mode == GACC_IFETCH) { 834 /* check if fetch protection override enabled */ 835 override = vcpu->arch.sie_block->gcr[0]; 836 override &= CR0_FETCH_PROTECTION_OVERRIDE; 837 /* not applicable if subject to DAT && private space */ 838 override = override && !(psw_bits(*psw).dat && asce.p); 839 return override; 840 } 841 return false; 842 } 843 844 static bool fetch_prot_override_applies(unsigned long ga, unsigned int len) 845 { 846 return ga < 2048 && ga + len <= 2048; 847 } 848 849 static bool storage_prot_override_applicable(struct kvm_vcpu *vcpu) 850 { 851 /* check if storage protection override enabled */ 852 return vcpu->arch.sie_block->gcr[0] & CR0_STORAGE_PROTECTION_OVERRIDE; 853 } 854 855 static bool storage_prot_override_applies(u8 access_control) 856 { 857 /* matches special storage protection override key (9) -> allow */ 858 return access_control == PAGE_SPO_ACC; 859 } 860 861 static int vcpu_check_access_key(struct kvm_vcpu *vcpu, u8 access_key, 862 enum gacc_mode mode, union asce asce, gpa_t gpa, 863 unsigned long ga, unsigned int len) 864 { 865 u8 storage_key, access_control; 866 unsigned long hva; 867 int r; 868 869 /* access key 0 matches any storage key -> allow */ 870 if (access_key == 0) 871 return 0; 872 /* 873 * caller needs to ensure that gfn is accessible, so we can 874 * assume that this cannot fail 875 */ 876 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa)); 877 mmap_read_lock(current->mm); 878 r = get_guest_storage_key(current->mm, hva, &storage_key); 879 mmap_read_unlock(current->mm); 880 if (r) 881 return r; 882 access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key); 883 /* access key matches storage key -> allow */ 884 if (access_control == access_key) 885 return 0; 886 if (mode == GACC_FETCH || mode == GACC_IFETCH) { 887 /* it is a fetch and fetch protection is off -> allow */ 888 if (!(storage_key & _PAGE_FP_BIT)) 889 return 0; 890 if (fetch_prot_override_applicable(vcpu, mode, asce) && 891 fetch_prot_override_applies(ga, len)) 892 return 0; 893 } 894 if (storage_prot_override_applicable(vcpu) && 895 storage_prot_override_applies(access_control)) 896 return 0; 897 return PGM_PROTECTION; 898 } 899 900 /** 901 * guest_range_to_gpas() - Calculate guest physical addresses of page fragments 902 * covering a logical range 903 * @vcpu: virtual cpu 904 * @ga: guest address, start of range 905 * @ar: access register 906 * @gpas: output argument, may be NULL 907 * @len: length of range in bytes 908 * @asce: address-space-control element to use for translation 909 * @mode: access mode 910 * @access_key: access key to mach the range's storage keys against 911 * 912 * Translate a logical range to a series of guest absolute addresses, 913 * such that the concatenation of page fragments starting at each gpa make up 914 * the whole range. 915 * The translation is performed as if done by the cpu for the given @asce, @ar, 916 * @mode and state of the @vcpu. 917 * If the translation causes an exception, its program interruption code is 918 * returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified 919 * such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject 920 * a correct exception into the guest. 921 * The resulting gpas are stored into @gpas, unless it is NULL. 922 * 923 * Note: All fragments except the first one start at the beginning of a page. 924 * When deriving the boundaries of a fragment from a gpa, all but the last 925 * fragment end at the end of the page. 926 * 927 * Return: 928 * * 0 - success 929 * * <0 - translation could not be performed, for example if guest 930 * memory could not be accessed 931 * * >0 - an access exception occurred. In this case the returned value 932 * is the program interruption code and the contents of pgm may 933 * be used to inject an exception into the guest. 934 */ 935 static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, 936 unsigned long *gpas, unsigned long len, 937 const union asce asce, enum gacc_mode mode, 938 u8 access_key) 939 { 940 psw_t *psw = &vcpu->arch.sie_block->gpsw; 941 unsigned int offset = offset_in_page(ga); 942 unsigned int fragment_len; 943 int lap_enabled, rc = 0; 944 enum prot_type prot; 945 unsigned long gpa; 946 947 lap_enabled = low_address_protection_enabled(vcpu, asce); 948 while (min(PAGE_SIZE - offset, len) > 0) { 949 fragment_len = min(PAGE_SIZE - offset, len); 950 ga = kvm_s390_logical_to_effective(vcpu, ga); 951 if (mode == GACC_STORE && lap_enabled && is_low_address(ga)) 952 return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode, 953 PROT_TYPE_LA); 954 if (psw_bits(*psw).dat) { 955 rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot); 956 if (rc < 0) 957 return rc; 958 } else { 959 gpa = kvm_s390_real_to_abs(vcpu, ga); 960 if (kvm_is_error_gpa(vcpu->kvm, gpa)) 961 rc = PGM_ADDRESSING; 962 } 963 if (rc) 964 return trans_exc(vcpu, rc, ga, ar, mode, prot); 965 rc = vcpu_check_access_key(vcpu, access_key, mode, asce, gpa, ga, 966 fragment_len); 967 if (rc) 968 return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_KEYC); 969 if (gpas) 970 *gpas++ = gpa; 971 offset = 0; 972 ga += fragment_len; 973 len -= fragment_len; 974 } 975 return 0; 976 } 977 978 static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa, 979 void *data, unsigned int len) 980 { 981 const unsigned int offset = offset_in_page(gpa); 982 const gfn_t gfn = gpa_to_gfn(gpa); 983 int rc; 984 985 if (mode == GACC_STORE) 986 rc = kvm_write_guest_page(kvm, gfn, data, offset, len); 987 else 988 rc = kvm_read_guest_page(kvm, gfn, data, offset, len); 989 return rc; 990 } 991 992 static int 993 access_guest_page_with_key(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa, 994 void *data, unsigned int len, u8 access_key) 995 { 996 struct kvm_memory_slot *slot; 997 bool writable; 998 gfn_t gfn; 999 hva_t hva; 1000 int rc; 1001 1002 gfn = gpa >> PAGE_SHIFT; 1003 slot = gfn_to_memslot(kvm, gfn); 1004 hva = gfn_to_hva_memslot_prot(slot, gfn, &writable); 1005 1006 if (kvm_is_error_hva(hva)) 1007 return PGM_ADDRESSING; 1008 /* 1009 * Check if it's a ro memslot, even tho that can't occur (they're unsupported). 1010 * Don't try to actually handle that case. 1011 */ 1012 if (!writable && mode == GACC_STORE) 1013 return -EOPNOTSUPP; 1014 hva += offset_in_page(gpa); 1015 if (mode == GACC_STORE) 1016 rc = copy_to_user_key((void __user *)hva, data, len, access_key); 1017 else 1018 rc = copy_from_user_key(data, (void __user *)hva, len, access_key); 1019 if (rc) 1020 return PGM_PROTECTION; 1021 if (mode == GACC_STORE) 1022 mark_page_dirty_in_slot(kvm, slot, gfn); 1023 return 0; 1024 } 1025 1026 int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data, 1027 unsigned long len, enum gacc_mode mode, u8 access_key) 1028 { 1029 int offset = offset_in_page(gpa); 1030 int fragment_len; 1031 int rc; 1032 1033 while (min(PAGE_SIZE - offset, len) > 0) { 1034 fragment_len = min(PAGE_SIZE - offset, len); 1035 rc = access_guest_page_with_key(kvm, mode, gpa, data, fragment_len, access_key); 1036 if (rc) 1037 return rc; 1038 offset = 0; 1039 len -= fragment_len; 1040 data += fragment_len; 1041 gpa += fragment_len; 1042 } 1043 return 0; 1044 } 1045 1046 int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, 1047 void *data, unsigned long len, enum gacc_mode mode, 1048 u8 access_key) 1049 { 1050 psw_t *psw = &vcpu->arch.sie_block->gpsw; 1051 unsigned long nr_pages, idx; 1052 unsigned long gpa_array[2]; 1053 unsigned int fragment_len; 1054 unsigned long *gpas; 1055 enum prot_type prot; 1056 int need_ipte_lock; 1057 union asce asce; 1058 bool try_storage_prot_override; 1059 bool try_fetch_prot_override; 1060 int rc; 1061 1062 if (!len) 1063 return 0; 1064 ga = kvm_s390_logical_to_effective(vcpu, ga); 1065 rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode); 1066 if (rc) 1067 return rc; 1068 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; 1069 gpas = gpa_array; 1070 if (nr_pages > ARRAY_SIZE(gpa_array)) 1071 gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long))); 1072 if (!gpas) 1073 return -ENOMEM; 1074 try_fetch_prot_override = fetch_prot_override_applicable(vcpu, mode, asce); 1075 try_storage_prot_override = storage_prot_override_applicable(vcpu); 1076 need_ipte_lock = psw_bits(*psw).dat && !asce.r; 1077 if (need_ipte_lock) 1078 ipte_lock(vcpu); 1079 /* 1080 * Since we do the access further down ultimately via a move instruction 1081 * that does key checking and returns an error in case of a protection 1082 * violation, we don't need to do the check during address translation. 1083 * Skip it by passing access key 0, which matches any storage key, 1084 * obviating the need for any further checks. As a result the check is 1085 * handled entirely in hardware on access, we only need to take care to 1086 * forego key protection checking if fetch protection override applies or 1087 * retry with the special key 9 in case of storage protection override. 1088 */ 1089 rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode, 0); 1090 if (rc) 1091 goto out_unlock; 1092 for (idx = 0; idx < nr_pages; idx++) { 1093 fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len); 1094 if (try_fetch_prot_override && fetch_prot_override_applies(ga, fragment_len)) { 1095 rc = access_guest_page(vcpu->kvm, mode, gpas[idx], 1096 data, fragment_len); 1097 } else { 1098 rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx], 1099 data, fragment_len, access_key); 1100 } 1101 if (rc == PGM_PROTECTION && try_storage_prot_override) 1102 rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx], 1103 data, fragment_len, PAGE_SPO_ACC); 1104 if (rc == PGM_PROTECTION) 1105 prot = PROT_TYPE_KEYC; 1106 if (rc) 1107 break; 1108 len -= fragment_len; 1109 data += fragment_len; 1110 ga = kvm_s390_logical_to_effective(vcpu, ga + fragment_len); 1111 } 1112 if (rc > 0) 1113 rc = trans_exc(vcpu, rc, ga, ar, mode, prot); 1114 out_unlock: 1115 if (need_ipte_lock) 1116 ipte_unlock(vcpu); 1117 if (nr_pages > ARRAY_SIZE(gpa_array)) 1118 vfree(gpas); 1119 return rc; 1120 } 1121 1122 int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, 1123 void *data, unsigned long len, enum gacc_mode mode) 1124 { 1125 unsigned int fragment_len; 1126 unsigned long gpa; 1127 int rc = 0; 1128 1129 while (len && !rc) { 1130 gpa = kvm_s390_real_to_abs(vcpu, gra); 1131 fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len); 1132 rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len); 1133 len -= fragment_len; 1134 gra += fragment_len; 1135 data += fragment_len; 1136 } 1137 return rc; 1138 } 1139 1140 /** 1141 * guest_translate_address_with_key - translate guest logical into guest absolute address 1142 * @vcpu: virtual cpu 1143 * @gva: Guest virtual address 1144 * @ar: Access register 1145 * @gpa: Guest physical address 1146 * @mode: Translation access mode 1147 * @access_key: access key to mach the storage key with 1148 * 1149 * Parameter semantics are the same as the ones from guest_translate. 1150 * The memory contents at the guest address are not changed. 1151 * 1152 * Note: The IPTE lock is not taken during this function, so the caller 1153 * has to take care of this. 1154 */ 1155 int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, 1156 unsigned long *gpa, enum gacc_mode mode, 1157 u8 access_key) 1158 { 1159 union asce asce; 1160 int rc; 1161 1162 gva = kvm_s390_logical_to_effective(vcpu, gva); 1163 rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); 1164 if (rc) 1165 return rc; 1166 return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode, 1167 access_key); 1168 } 1169 1170 /** 1171 * check_gva_range - test a range of guest virtual addresses for accessibility 1172 * @vcpu: virtual cpu 1173 * @gva: Guest virtual address 1174 * @ar: Access register 1175 * @length: Length of test range 1176 * @mode: Translation access mode 1177 * @access_key: access key to mach the storage keys with 1178 */ 1179 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, 1180 unsigned long length, enum gacc_mode mode, u8 access_key) 1181 { 1182 union asce asce; 1183 int rc = 0; 1184 1185 rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); 1186 if (rc) 1187 return rc; 1188 ipte_lock(vcpu); 1189 rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode, 1190 access_key); 1191 ipte_unlock(vcpu); 1192 1193 return rc; 1194 } 1195 1196 /** 1197 * check_gpa_range - test a range of guest physical addresses for accessibility 1198 * @kvm: virtual machine instance 1199 * @gpa: guest physical address 1200 * @length: length of test range 1201 * @mode: access mode to test, relevant for storage keys 1202 * @access_key: access key to mach the storage keys with 1203 */ 1204 int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length, 1205 enum gacc_mode mode, u8 access_key) 1206 { 1207 unsigned int fragment_len; 1208 int rc = 0; 1209 1210 while (length && !rc) { 1211 fragment_len = min(PAGE_SIZE - offset_in_page(gpa), length); 1212 rc = vm_check_access_key(kvm, access_key, mode, gpa); 1213 length -= fragment_len; 1214 gpa += fragment_len; 1215 } 1216 return rc; 1217 } 1218 1219 /** 1220 * kvm_s390_check_low_addr_prot_real - check for low-address protection 1221 * @vcpu: virtual cpu 1222 * @gra: Guest real address 1223 * 1224 * Checks whether an address is subject to low-address protection and set 1225 * up vcpu->arch.pgm accordingly if necessary. 1226 * 1227 * Return: 0 if no protection exception, or PGM_PROTECTION if protected. 1228 */ 1229 int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra) 1230 { 1231 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; 1232 1233 if (!ctlreg0.lap || !is_low_address(gra)) 1234 return 0; 1235 return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA); 1236 } 1237 1238 /** 1239 * kvm_s390_shadow_tables - walk the guest page table and create shadow tables 1240 * @sg: pointer to the shadow guest address space structure 1241 * @saddr: faulting address in the shadow gmap 1242 * @pgt: pointer to the beginning of the page table for the given address if 1243 * successful (return value 0), or to the first invalid DAT entry in 1244 * case of exceptions (return value > 0) 1245 * @dat_protection: referenced memory is write protected 1246 * @fake: pgt references contiguous guest memory block, not a pgtable 1247 */ 1248 static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, 1249 unsigned long *pgt, int *dat_protection, 1250 int *fake) 1251 { 1252 struct gmap *parent; 1253 union asce asce; 1254 union vaddress vaddr; 1255 unsigned long ptr; 1256 int rc; 1257 1258 *fake = 0; 1259 *dat_protection = 0; 1260 parent = sg->parent; 1261 vaddr.addr = saddr; 1262 asce.val = sg->orig_asce; 1263 ptr = asce.origin * PAGE_SIZE; 1264 if (asce.r) { 1265 *fake = 1; 1266 ptr = 0; 1267 asce.dt = ASCE_TYPE_REGION1; 1268 } 1269 switch (asce.dt) { 1270 case ASCE_TYPE_REGION1: 1271 if (vaddr.rfx01 > asce.tl && !*fake) 1272 return PGM_REGION_FIRST_TRANS; 1273 break; 1274 case ASCE_TYPE_REGION2: 1275 if (vaddr.rfx) 1276 return PGM_ASCE_TYPE; 1277 if (vaddr.rsx01 > asce.tl) 1278 return PGM_REGION_SECOND_TRANS; 1279 break; 1280 case ASCE_TYPE_REGION3: 1281 if (vaddr.rfx || vaddr.rsx) 1282 return PGM_ASCE_TYPE; 1283 if (vaddr.rtx01 > asce.tl) 1284 return PGM_REGION_THIRD_TRANS; 1285 break; 1286 case ASCE_TYPE_SEGMENT: 1287 if (vaddr.rfx || vaddr.rsx || vaddr.rtx) 1288 return PGM_ASCE_TYPE; 1289 if (vaddr.sx01 > asce.tl) 1290 return PGM_SEGMENT_TRANSLATION; 1291 break; 1292 } 1293 1294 switch (asce.dt) { 1295 case ASCE_TYPE_REGION1: { 1296 union region1_table_entry rfte; 1297 1298 if (*fake) { 1299 ptr += vaddr.rfx * _REGION1_SIZE; 1300 rfte.val = ptr; 1301 goto shadow_r2t; 1302 } 1303 *pgt = ptr + vaddr.rfx * 8; 1304 rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val); 1305 if (rc) 1306 return rc; 1307 if (rfte.i) 1308 return PGM_REGION_FIRST_TRANS; 1309 if (rfte.tt != TABLE_TYPE_REGION1) 1310 return PGM_TRANSLATION_SPEC; 1311 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) 1312 return PGM_REGION_SECOND_TRANS; 1313 if (sg->edat_level >= 1) 1314 *dat_protection |= rfte.p; 1315 ptr = rfte.rto * PAGE_SIZE; 1316 shadow_r2t: 1317 rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake); 1318 if (rc) 1319 return rc; 1320 } 1321 fallthrough; 1322 case ASCE_TYPE_REGION2: { 1323 union region2_table_entry rste; 1324 1325 if (*fake) { 1326 ptr += vaddr.rsx * _REGION2_SIZE; 1327 rste.val = ptr; 1328 goto shadow_r3t; 1329 } 1330 *pgt = ptr + vaddr.rsx * 8; 1331 rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val); 1332 if (rc) 1333 return rc; 1334 if (rste.i) 1335 return PGM_REGION_SECOND_TRANS; 1336 if (rste.tt != TABLE_TYPE_REGION2) 1337 return PGM_TRANSLATION_SPEC; 1338 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) 1339 return PGM_REGION_THIRD_TRANS; 1340 if (sg->edat_level >= 1) 1341 *dat_protection |= rste.p; 1342 ptr = rste.rto * PAGE_SIZE; 1343 shadow_r3t: 1344 rste.p |= *dat_protection; 1345 rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake); 1346 if (rc) 1347 return rc; 1348 } 1349 fallthrough; 1350 case ASCE_TYPE_REGION3: { 1351 union region3_table_entry rtte; 1352 1353 if (*fake) { 1354 ptr += vaddr.rtx * _REGION3_SIZE; 1355 rtte.val = ptr; 1356 goto shadow_sgt; 1357 } 1358 *pgt = ptr + vaddr.rtx * 8; 1359 rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val); 1360 if (rc) 1361 return rc; 1362 if (rtte.i) 1363 return PGM_REGION_THIRD_TRANS; 1364 if (rtte.tt != TABLE_TYPE_REGION3) 1365 return PGM_TRANSLATION_SPEC; 1366 if (rtte.cr && asce.p && sg->edat_level >= 2) 1367 return PGM_TRANSLATION_SPEC; 1368 if (rtte.fc && sg->edat_level >= 2) { 1369 *dat_protection |= rtte.fc0.p; 1370 *fake = 1; 1371 ptr = rtte.fc1.rfaa * _REGION3_SIZE; 1372 rtte.val = ptr; 1373 goto shadow_sgt; 1374 } 1375 if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl) 1376 return PGM_SEGMENT_TRANSLATION; 1377 if (sg->edat_level >= 1) 1378 *dat_protection |= rtte.fc0.p; 1379 ptr = rtte.fc0.sto * PAGE_SIZE; 1380 shadow_sgt: 1381 rtte.fc0.p |= *dat_protection; 1382 rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake); 1383 if (rc) 1384 return rc; 1385 } 1386 fallthrough; 1387 case ASCE_TYPE_SEGMENT: { 1388 union segment_table_entry ste; 1389 1390 if (*fake) { 1391 ptr += vaddr.sx * _SEGMENT_SIZE; 1392 ste.val = ptr; 1393 goto shadow_pgt; 1394 } 1395 *pgt = ptr + vaddr.sx * 8; 1396 rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val); 1397 if (rc) 1398 return rc; 1399 if (ste.i) 1400 return PGM_SEGMENT_TRANSLATION; 1401 if (ste.tt != TABLE_TYPE_SEGMENT) 1402 return PGM_TRANSLATION_SPEC; 1403 if (ste.cs && asce.p) 1404 return PGM_TRANSLATION_SPEC; 1405 *dat_protection |= ste.fc0.p; 1406 if (ste.fc && sg->edat_level >= 1) { 1407 *fake = 1; 1408 ptr = ste.fc1.sfaa * _SEGMENT_SIZE; 1409 ste.val = ptr; 1410 goto shadow_pgt; 1411 } 1412 ptr = ste.fc0.pto * (PAGE_SIZE / 2); 1413 shadow_pgt: 1414 ste.fc0.p |= *dat_protection; 1415 rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake); 1416 if (rc) 1417 return rc; 1418 } 1419 } 1420 /* Return the parent address of the page table */ 1421 *pgt = ptr; 1422 return 0; 1423 } 1424 1425 /** 1426 * kvm_s390_shadow_fault - handle fault on a shadow page table 1427 * @vcpu: virtual cpu 1428 * @sg: pointer to the shadow guest address space structure 1429 * @saddr: faulting address in the shadow gmap 1430 * @datptr: will contain the address of the faulting DAT table entry, or of 1431 * the valid leaf, plus some flags 1432 * 1433 * Returns: - 0 if the shadow fault was successfully resolved 1434 * - > 0 (pgm exception code) on exceptions while faulting 1435 * - -EAGAIN if the caller can retry immediately 1436 * - -EFAULT when accessing invalid guest addresses 1437 * - -ENOMEM if out of memory 1438 */ 1439 int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, 1440 unsigned long saddr, unsigned long *datptr) 1441 { 1442 union vaddress vaddr; 1443 union page_table_entry pte; 1444 unsigned long pgt = 0; 1445 int dat_protection, fake; 1446 int rc; 1447 1448 mmap_read_lock(sg->mm); 1449 /* 1450 * We don't want any guest-2 tables to change - so the parent 1451 * tables/pointers we read stay valid - unshadowing is however 1452 * always possible - only guest_table_lock protects us. 1453 */ 1454 ipte_lock(vcpu); 1455 1456 rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake); 1457 if (rc) 1458 rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection, 1459 &fake); 1460 1461 vaddr.addr = saddr; 1462 if (fake) { 1463 pte.val = pgt + vaddr.px * PAGE_SIZE; 1464 goto shadow_page; 1465 } 1466 1467 switch (rc) { 1468 case PGM_SEGMENT_TRANSLATION: 1469 case PGM_REGION_THIRD_TRANS: 1470 case PGM_REGION_SECOND_TRANS: 1471 case PGM_REGION_FIRST_TRANS: 1472 pgt |= PEI_NOT_PTE; 1473 break; 1474 case 0: 1475 pgt += vaddr.px * 8; 1476 rc = gmap_read_table(sg->parent, pgt, &pte.val); 1477 } 1478 if (datptr) 1479 *datptr = pgt | dat_protection * PEI_DAT_PROT; 1480 if (!rc && pte.i) 1481 rc = PGM_PAGE_TRANSLATION; 1482 if (!rc && pte.z) 1483 rc = PGM_TRANSLATION_SPEC; 1484 shadow_page: 1485 pte.p |= dat_protection; 1486 if (!rc) 1487 rc = gmap_shadow_page(sg, saddr, __pte(pte.val)); 1488 ipte_unlock(vcpu); 1489 mmap_read_unlock(sg->mm); 1490 return rc; 1491 } 1492