1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * MMU support 9 * 10 * Copyright (C) 2006 Qumranet, Inc. 11 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 12 * 13 * Authors: 14 * Yaniv Kamay <yaniv@qumranet.com> 15 * Avi Kivity <avi@qumranet.com> 16 */ 17 18 /* 19 * We need the mmu code to access both 32-bit and 64-bit guest ptes, 20 * so the code in this file is compiled twice, once per pte size. 21 */ 22 23 #if PTTYPE == 64 24 #define pt_element_t u64 25 #define guest_walker guest_walker64 26 #define FNAME(name) paging##64_##name 27 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK 28 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) 29 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) 30 #define PT_INDEX(addr, level) PT64_INDEX(addr, level) 31 #define PT_LEVEL_BITS PT64_LEVEL_BITS 32 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT 33 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT 34 #define PT_HAVE_ACCESSED_DIRTY(mmu) true 35 #ifdef CONFIG_X86_64 36 #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL 37 #define CMPXCHG cmpxchg 38 #else 39 #define CMPXCHG cmpxchg64 40 #define PT_MAX_FULL_LEVELS 2 41 #endif 42 #elif PTTYPE == 32 43 #define pt_element_t u32 44 #define guest_walker guest_walker32 45 #define FNAME(name) paging##32_##name 46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK 47 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl) 48 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl) 49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level) 50 #define PT_LEVEL_BITS PT32_LEVEL_BITS 51 #define PT_MAX_FULL_LEVELS 2 52 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT 53 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT 54 #define PT_HAVE_ACCESSED_DIRTY(mmu) true 55 #define CMPXCHG cmpxchg 56 #elif PTTYPE == PTTYPE_EPT 57 #define pt_element_t u64 58 #define guest_walker guest_walkerEPT 59 #define FNAME(name) ept_##name 60 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK 61 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) 62 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) 63 #define PT_INDEX(addr, level) PT64_INDEX(addr, level) 64 #define PT_LEVEL_BITS PT64_LEVEL_BITS 65 #define PT_GUEST_DIRTY_SHIFT 9 66 #define PT_GUEST_ACCESSED_SHIFT 8 67 #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad) 68 #define CMPXCHG cmpxchg64 69 #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL 70 #else 71 #error Invalid PTTYPE value 72 #endif 73 74 #define PT_GUEST_DIRTY_MASK (1 << PT_GUEST_DIRTY_SHIFT) 75 #define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT) 76 77 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl) 78 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PG_LEVEL_4K) 79 80 /* 81 * The guest_walker structure emulates the behavior of the hardware page 82 * table walker. 83 */ 84 struct guest_walker { 85 int level; 86 unsigned max_level; 87 gfn_t table_gfn[PT_MAX_FULL_LEVELS]; 88 pt_element_t ptes[PT_MAX_FULL_LEVELS]; 89 pt_element_t prefetch_ptes[PTE_PREFETCH_NUM]; 90 gpa_t pte_gpa[PT_MAX_FULL_LEVELS]; 91 pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS]; 92 bool pte_writable[PT_MAX_FULL_LEVELS]; 93 unsigned pt_access; 94 unsigned pte_access; 95 gfn_t gfn; 96 struct x86_exception fault; 97 }; 98 99 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) 100 { 101 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; 102 } 103 104 static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access, 105 unsigned gpte) 106 { 107 unsigned mask; 108 109 /* dirty bit is not supported, so no need to track it */ 110 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) 111 return; 112 113 BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); 114 115 mask = (unsigned)~ACC_WRITE_MASK; 116 /* Allow write access to dirty gptes */ 117 mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & 118 PT_WRITABLE_MASK; 119 *access &= mask; 120 } 121 122 static inline int FNAME(is_present_gpte)(unsigned long pte) 123 { 124 #if PTTYPE != PTTYPE_EPT 125 return pte & PT_PRESENT_MASK; 126 #else 127 return pte & 7; 128 #endif 129 } 130 131 static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte) 132 { 133 #if PTTYPE != PTTYPE_EPT 134 return false; 135 #else 136 return __is_bad_mt_xwr(rsvd_check, gpte); 137 #endif 138 } 139 140 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) 141 { 142 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) || 143 FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte); 144 } 145 146 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 147 pt_element_t __user *ptep_user, unsigned index, 148 pt_element_t orig_pte, pt_element_t new_pte) 149 { 150 int npages; 151 pt_element_t ret; 152 pt_element_t *table; 153 struct page *page; 154 155 npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page); 156 if (likely(npages == 1)) { 157 table = kmap_atomic(page); 158 ret = CMPXCHG(&table[index], orig_pte, new_pte); 159 kunmap_atomic(table); 160 161 kvm_release_page_dirty(page); 162 } else { 163 struct vm_area_struct *vma; 164 unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK; 165 unsigned long pfn; 166 unsigned long paddr; 167 168 mmap_read_lock(current->mm); 169 vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE); 170 if (!vma || !(vma->vm_flags & VM_PFNMAP)) { 171 mmap_read_unlock(current->mm); 172 return -EFAULT; 173 } 174 pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 175 paddr = pfn << PAGE_SHIFT; 176 table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB); 177 if (!table) { 178 mmap_read_unlock(current->mm); 179 return -EFAULT; 180 } 181 ret = CMPXCHG(&table[index], orig_pte, new_pte); 182 memunmap(table); 183 mmap_read_unlock(current->mm); 184 } 185 186 return (ret != orig_pte); 187 } 188 189 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, 190 struct kvm_mmu_page *sp, u64 *spte, 191 u64 gpte) 192 { 193 if (!FNAME(is_present_gpte)(gpte)) 194 goto no_present; 195 196 /* if accessed bit is not supported prefetch non accessed gpte */ 197 if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && 198 !(gpte & PT_GUEST_ACCESSED_MASK)) 199 goto no_present; 200 201 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K)) 202 goto no_present; 203 204 return false; 205 206 no_present: 207 drop_spte(vcpu->kvm, spte); 208 return true; 209 } 210 211 /* 212 * For PTTYPE_EPT, a page table can be executable but not readable 213 * on supported processors. Therefore, set_spte does not automatically 214 * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK 215 * to signify readability since it isn't used in the EPT case 216 */ 217 static inline unsigned FNAME(gpte_access)(u64 gpte) 218 { 219 unsigned access; 220 #if PTTYPE == PTTYPE_EPT 221 access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) | 222 ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) | 223 ((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0); 224 #else 225 BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK); 226 BUILD_BUG_ON(ACC_EXEC_MASK != 1); 227 access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK); 228 /* Combine NX with P (which is set here) to get ACC_EXEC_MASK. */ 229 access ^= (gpte >> PT64_NX_SHIFT); 230 #endif 231 232 return access; 233 } 234 235 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, 236 struct kvm_mmu *mmu, 237 struct guest_walker *walker, 238 gpa_t addr, int write_fault) 239 { 240 unsigned level, index; 241 pt_element_t pte, orig_pte; 242 pt_element_t __user *ptep_user; 243 gfn_t table_gfn; 244 int ret; 245 246 /* dirty/accessed bits are not supported, so no need to update them */ 247 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) 248 return 0; 249 250 for (level = walker->max_level; level >= walker->level; --level) { 251 pte = orig_pte = walker->ptes[level - 1]; 252 table_gfn = walker->table_gfn[level - 1]; 253 ptep_user = walker->ptep_user[level - 1]; 254 index = offset_in_page(ptep_user) / sizeof(pt_element_t); 255 if (!(pte & PT_GUEST_ACCESSED_MASK)) { 256 trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); 257 pte |= PT_GUEST_ACCESSED_MASK; 258 } 259 if (level == walker->level && write_fault && 260 !(pte & PT_GUEST_DIRTY_MASK)) { 261 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); 262 #if PTTYPE == PTTYPE_EPT 263 if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr)) 264 return -EINVAL; 265 #endif 266 pte |= PT_GUEST_DIRTY_MASK; 267 } 268 if (pte == orig_pte) 269 continue; 270 271 /* 272 * If the slot is read-only, simply do not process the accessed 273 * and dirty bits. This is the correct thing to do if the slot 274 * is ROM, and page tables in read-as-ROM/write-as-MMIO slots 275 * are only supported if the accessed and dirty bits are already 276 * set in the ROM (so that MMIO writes are never needed). 277 * 278 * Note that NPT does not allow this at all and faults, since 279 * it always wants nested page table entries for the guest 280 * page tables to be writable. And EPT works but will simply 281 * overwrite the read-only memory to set the accessed and dirty 282 * bits. 283 */ 284 if (unlikely(!walker->pte_writable[level - 1])) 285 continue; 286 287 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); 288 if (ret) 289 return ret; 290 291 kvm_vcpu_mark_page_dirty(vcpu, table_gfn); 292 walker->ptes[level - 1] = pte; 293 } 294 return 0; 295 } 296 297 static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte) 298 { 299 unsigned pkeys = 0; 300 #if PTTYPE == 64 301 pte_t pte = {.pte = gpte}; 302 303 pkeys = pte_flags_pkey(pte_flags(pte)); 304 #endif 305 return pkeys; 306 } 307 308 /* 309 * Fetch a guest pte for a guest virtual address, or for an L2's GPA. 310 */ 311 static int FNAME(walk_addr_generic)(struct guest_walker *walker, 312 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 313 gpa_t addr, u32 access) 314 { 315 int ret; 316 pt_element_t pte; 317 pt_element_t __user *ptep_user; 318 gfn_t table_gfn; 319 u64 pt_access, pte_access; 320 unsigned index, accessed_dirty, pte_pkey; 321 unsigned nested_access; 322 gpa_t pte_gpa; 323 bool have_ad; 324 int offset; 325 u64 walk_nx_mask = 0; 326 const int write_fault = access & PFERR_WRITE_MASK; 327 const int user_fault = access & PFERR_USER_MASK; 328 const int fetch_fault = access & PFERR_FETCH_MASK; 329 u16 errcode = 0; 330 gpa_t real_gpa; 331 gfn_t gfn; 332 333 trace_kvm_mmu_pagetable_walk(addr, access); 334 retry_walk: 335 walker->level = mmu->root_level; 336 pte = mmu->get_guest_pgd(vcpu); 337 have_ad = PT_HAVE_ACCESSED_DIRTY(mmu); 338 339 #if PTTYPE == 64 340 walk_nx_mask = 1ULL << PT64_NX_SHIFT; 341 if (walker->level == PT32E_ROOT_LEVEL) { 342 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); 343 trace_kvm_mmu_paging_element(pte, walker->level); 344 if (!FNAME(is_present_gpte)(pte)) 345 goto error; 346 --walker->level; 347 } 348 #endif 349 walker->max_level = walker->level; 350 ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu))); 351 352 /* 353 * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging 354 * by the MOV to CR instruction are treated as reads and do not cause the 355 * processor to set the dirty flag in any EPT paging-structure entry. 356 */ 357 nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK; 358 359 pte_access = ~0; 360 ++walker->level; 361 362 do { 363 unsigned long host_addr; 364 365 pt_access = pte_access; 366 --walker->level; 367 368 index = PT_INDEX(addr, walker->level); 369 table_gfn = gpte_to_gfn(pte); 370 offset = index * sizeof(pt_element_t); 371 pte_gpa = gfn_to_gpa(table_gfn) + offset; 372 373 BUG_ON(walker->level < 1); 374 walker->table_gfn[walker->level - 1] = table_gfn; 375 walker->pte_gpa[walker->level - 1] = pte_gpa; 376 377 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), 378 nested_access, 379 &walker->fault); 380 381 /* 382 * FIXME: This can happen if emulation (for of an INS/OUTS 383 * instruction) triggers a nested page fault. The exit 384 * qualification / exit info field will incorrectly have 385 * "guest page access" as the nested page fault's cause, 386 * instead of "guest page structure access". To fix this, 387 * the x86_exception struct should be augmented with enough 388 * information to fix the exit_qualification or exit_info_1 389 * fields. 390 */ 391 if (unlikely(real_gpa == UNMAPPED_GVA)) 392 return 0; 393 394 host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa), 395 &walker->pte_writable[walker->level - 1]); 396 if (unlikely(kvm_is_error_hva(host_addr))) 397 goto error; 398 399 ptep_user = (pt_element_t __user *)((void *)host_addr + offset); 400 if (unlikely(__get_user(pte, ptep_user))) 401 goto error; 402 walker->ptep_user[walker->level - 1] = ptep_user; 403 404 trace_kvm_mmu_paging_element(pte, walker->level); 405 406 /* 407 * Inverting the NX it lets us AND it like other 408 * permission bits. 409 */ 410 pte_access = pt_access & (pte ^ walk_nx_mask); 411 412 if (unlikely(!FNAME(is_present_gpte)(pte))) 413 goto error; 414 415 if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) { 416 errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK; 417 goto error; 418 } 419 420 walker->ptes[walker->level - 1] = pte; 421 } while (!is_last_gpte(mmu, walker->level, pte)); 422 423 pte_pkey = FNAME(gpte_pkeys)(vcpu, pte); 424 accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0; 425 426 /* Convert to ACC_*_MASK flags for struct guest_walker. */ 427 walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask); 428 walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask); 429 errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access); 430 if (unlikely(errcode)) 431 goto error; 432 433 gfn = gpte_to_gfn_lvl(pte, walker->level); 434 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; 435 436 if (PTTYPE == 32 && walker->level > PG_LEVEL_4K && is_cpuid_PSE36()) 437 gfn += pse36_gfn_delta(pte); 438 439 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); 440 if (real_gpa == UNMAPPED_GVA) 441 return 0; 442 443 walker->gfn = real_gpa >> PAGE_SHIFT; 444 445 if (!write_fault) 446 FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte); 447 else 448 /* 449 * On a write fault, fold the dirty bit into accessed_dirty. 450 * For modes without A/D bits support accessed_dirty will be 451 * always clear. 452 */ 453 accessed_dirty &= pte >> 454 (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); 455 456 if (unlikely(!accessed_dirty)) { 457 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, 458 addr, write_fault); 459 if (unlikely(ret < 0)) 460 goto error; 461 else if (ret) 462 goto retry_walk; 463 } 464 465 pgprintk("%s: pte %llx pte_access %x pt_access %x\n", 466 __func__, (u64)pte, walker->pte_access, walker->pt_access); 467 return 1; 468 469 error: 470 errcode |= write_fault | user_fault; 471 if (fetch_fault && (mmu->nx || 472 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))) 473 errcode |= PFERR_FETCH_MASK; 474 475 walker->fault.vector = PF_VECTOR; 476 walker->fault.error_code_valid = true; 477 walker->fault.error_code = errcode; 478 479 #if PTTYPE == PTTYPE_EPT 480 /* 481 * Use PFERR_RSVD_MASK in error_code to to tell if EPT 482 * misconfiguration requires to be injected. The detection is 483 * done by is_rsvd_bits_set() above. 484 * 485 * We set up the value of exit_qualification to inject: 486 * [2:0] - Derive from the access bits. The exit_qualification might be 487 * out of date if it is serving an EPT misconfiguration. 488 * [5:3] - Calculated by the page walk of the guest EPT page tables 489 * [7:8] - Derived from [7:8] of real exit_qualification 490 * 491 * The other bits are set to 0. 492 */ 493 if (!(errcode & PFERR_RSVD_MASK)) { 494 vcpu->arch.exit_qualification &= 0x180; 495 if (write_fault) 496 vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE; 497 if (user_fault) 498 vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ; 499 if (fetch_fault) 500 vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR; 501 vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3; 502 } 503 #endif 504 walker->fault.address = addr; 505 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; 506 507 trace_kvm_mmu_walker_error(walker->fault.error_code); 508 return 0; 509 } 510 511 static int FNAME(walk_addr)(struct guest_walker *walker, 512 struct kvm_vcpu *vcpu, gpa_t addr, u32 access) 513 { 514 return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr, 515 access); 516 } 517 518 #if PTTYPE != PTTYPE_EPT 519 static int FNAME(walk_addr_nested)(struct guest_walker *walker, 520 struct kvm_vcpu *vcpu, gva_t addr, 521 u32 access) 522 { 523 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, 524 addr, access); 525 } 526 #endif 527 528 static bool 529 FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 530 u64 *spte, pt_element_t gpte, bool no_dirty_log) 531 { 532 unsigned pte_access; 533 gfn_t gfn; 534 kvm_pfn_t pfn; 535 536 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) 537 return false; 538 539 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); 540 541 gfn = gpte_to_gfn(gpte); 542 pte_access = sp->role.access & FNAME(gpte_access)(gpte); 543 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); 544 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, 545 no_dirty_log && (pte_access & ACC_WRITE_MASK)); 546 if (is_error_pfn(pfn)) 547 return false; 548 549 /* 550 * we call mmu_set_spte() with host_writable = true because 551 * pte_prefetch_gfn_to_pfn always gets a writable pfn. 552 */ 553 mmu_set_spte(vcpu, spte, pte_access, false, PG_LEVEL_4K, gfn, pfn, 554 true, true); 555 556 kvm_release_pfn_clean(pfn); 557 return true; 558 } 559 560 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 561 u64 *spte, const void *pte) 562 { 563 pt_element_t gpte = *(const pt_element_t *)pte; 564 565 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); 566 } 567 568 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu, 569 struct guest_walker *gw, int level) 570 { 571 pt_element_t curr_pte; 572 gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1]; 573 u64 mask; 574 int r, index; 575 576 if (level == PG_LEVEL_4K) { 577 mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1; 578 base_gpa = pte_gpa & ~mask; 579 index = (pte_gpa - base_gpa) / sizeof(pt_element_t); 580 581 r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa, 582 gw->prefetch_ptes, sizeof(gw->prefetch_ptes)); 583 curr_pte = gw->prefetch_ptes[index]; 584 } else 585 r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, 586 &curr_pte, sizeof(curr_pte)); 587 588 return r || curr_pte != gw->ptes[level - 1]; 589 } 590 591 static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, 592 u64 *sptep) 593 { 594 struct kvm_mmu_page *sp; 595 pt_element_t *gptep = gw->prefetch_ptes; 596 u64 *spte; 597 int i; 598 599 sp = sptep_to_sp(sptep); 600 601 if (sp->role.level > PG_LEVEL_4K) 602 return; 603 604 /* 605 * If addresses are being invalidated, skip prefetching to avoid 606 * accidentally prefetching those addresses. 607 */ 608 if (unlikely(vcpu->kvm->mmu_notifier_count)) 609 return; 610 611 if (sp->role.direct) 612 return __direct_pte_prefetch(vcpu, sp, sptep); 613 614 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); 615 spte = sp->spt + i; 616 617 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { 618 if (spte == sptep) 619 continue; 620 621 if (is_shadow_present_pte(*spte)) 622 continue; 623 624 if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true)) 625 break; 626 } 627 } 628 629 /* 630 * Fetch a shadow pte for a specific level in the paging hierarchy. 631 * If the guest tries to write a write-protected page, we need to 632 * emulate this operation, return 1 to indicate this case. 633 */ 634 static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, 635 struct guest_walker *gw, u32 error_code, 636 int max_level, kvm_pfn_t pfn, bool map_writable, 637 bool prefault) 638 { 639 bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); 640 bool write_fault = error_code & PFERR_WRITE_MASK; 641 bool exec = error_code & PFERR_FETCH_MASK; 642 bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled; 643 struct kvm_mmu_page *sp = NULL; 644 struct kvm_shadow_walk_iterator it; 645 unsigned direct_access, access = gw->pt_access; 646 int top_level, level, req_level, ret; 647 gfn_t base_gfn = gw->gfn; 648 649 direct_access = gw->pte_access; 650 651 top_level = vcpu->arch.mmu->root_level; 652 if (top_level == PT32E_ROOT_LEVEL) 653 top_level = PT32_ROOT_LEVEL; 654 /* 655 * Verify that the top-level gpte is still there. Since the page 656 * is a root page, it is either write protected (and cannot be 657 * changed from now on) or it is invalid (in which case, we don't 658 * really care if it changes underneath us after this point). 659 */ 660 if (FNAME(gpte_changed)(vcpu, gw, top_level)) 661 goto out_gpte_changed; 662 663 if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) 664 goto out_gpte_changed; 665 666 for (shadow_walk_init(&it, vcpu, addr); 667 shadow_walk_okay(&it) && it.level > gw->level; 668 shadow_walk_next(&it)) { 669 gfn_t table_gfn; 670 671 clear_sp_write_flooding_count(it.sptep); 672 drop_large_spte(vcpu, it.sptep); 673 674 sp = NULL; 675 if (!is_shadow_present_pte(*it.sptep)) { 676 table_gfn = gw->table_gfn[it.level - 2]; 677 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1, 678 false, access); 679 } 680 681 /* 682 * Verify that the gpte in the page we've just write 683 * protected is still there. 684 */ 685 if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) 686 goto out_gpte_changed; 687 688 if (sp) 689 link_shadow_page(vcpu, it.sptep, sp); 690 } 691 692 level = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn, 693 huge_page_disallowed, &req_level); 694 695 trace_kvm_mmu_spte_requested(addr, gw->level, pfn); 696 697 for (; shadow_walk_okay(&it); shadow_walk_next(&it)) { 698 clear_sp_write_flooding_count(it.sptep); 699 700 /* 701 * We cannot overwrite existing page tables with an NX 702 * large page, as the leaf could be executable. 703 */ 704 if (nx_huge_page_workaround_enabled) 705 disallowed_hugepage_adjust(*it.sptep, gw->gfn, it.level, 706 &pfn, &level); 707 708 base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); 709 if (it.level == level) 710 break; 711 712 validate_direct_spte(vcpu, it.sptep, direct_access); 713 714 drop_large_spte(vcpu, it.sptep); 715 716 if (!is_shadow_present_pte(*it.sptep)) { 717 sp = kvm_mmu_get_page(vcpu, base_gfn, addr, 718 it.level - 1, true, direct_access); 719 link_shadow_page(vcpu, it.sptep, sp); 720 if (huge_page_disallowed && req_level >= it.level) 721 account_huge_nx_page(vcpu->kvm, sp); 722 } 723 } 724 725 ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, 726 it.level, base_gfn, pfn, prefault, map_writable); 727 if (ret == RET_PF_SPURIOUS) 728 return ret; 729 730 FNAME(pte_prefetch)(vcpu, gw, it.sptep); 731 ++vcpu->stat.pf_fixed; 732 return ret; 733 734 out_gpte_changed: 735 return RET_PF_RETRY; 736 } 737 738 /* 739 * To see whether the mapped gfn can write its page table in the current 740 * mapping. 741 * 742 * It is the helper function of FNAME(page_fault). When guest uses large page 743 * size to map the writable gfn which is used as current page table, we should 744 * force kvm to use small page size to map it because new shadow page will be 745 * created when kvm establishes shadow page table that stop kvm using large 746 * page size. Do it early can avoid unnecessary #PF and emulation. 747 * 748 * @write_fault_to_shadow_pgtable will return true if the fault gfn is 749 * currently used as its page table. 750 * 751 * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok 752 * since the PDPT is always shadowed, that means, we can not use large page 753 * size to map the gfn which is used as PDPT. 754 */ 755 static bool 756 FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, 757 struct guest_walker *walker, bool user_fault, 758 bool *write_fault_to_shadow_pgtable) 759 { 760 int level; 761 gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1); 762 bool self_changed = false; 763 764 if (!(walker->pte_access & ACC_WRITE_MASK || 765 (!is_write_protection(vcpu) && !user_fault))) 766 return false; 767 768 for (level = walker->level; level <= walker->max_level; level++) { 769 gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1]; 770 771 self_changed |= !(gfn & mask); 772 *write_fault_to_shadow_pgtable |= !gfn; 773 } 774 775 return self_changed; 776 } 777 778 /* 779 * Page fault handler. There are several causes for a page fault: 780 * - there is no shadow pte for the guest pte 781 * - write access through a shadow pte marked read only so that we can set 782 * the dirty bit 783 * - write access to a shadow pte marked read only so we can update the page 784 * dirty bitmap, when userspace requests it 785 * - mmio access; in this case we will never install a present shadow pte 786 * - normal guest page fault due to the guest pte marked not present, not 787 * writable, or not executable 788 * 789 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or 790 * a negative value on error. 791 */ 792 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, 793 bool prefault) 794 { 795 bool write_fault = error_code & PFERR_WRITE_MASK; 796 bool user_fault = error_code & PFERR_USER_MASK; 797 struct guest_walker walker; 798 int r; 799 kvm_pfn_t pfn; 800 hva_t hva; 801 unsigned long mmu_seq; 802 bool map_writable, is_self_change_mapping; 803 int max_level; 804 805 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); 806 807 /* 808 * If PFEC.RSVD is set, this is a shadow page fault. 809 * The bit needs to be cleared before walking guest page tables. 810 */ 811 error_code &= ~PFERR_RSVD_MASK; 812 813 /* 814 * Look up the guest pte for the faulting address. 815 */ 816 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code); 817 818 /* 819 * The page is not mapped by the guest. Let the guest handle it. 820 */ 821 if (!r) { 822 pgprintk("%s: guest page fault\n", __func__); 823 if (!prefault) 824 kvm_inject_emulated_page_fault(vcpu, &walker.fault); 825 826 return RET_PF_RETRY; 827 } 828 829 if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) { 830 shadow_page_table_clear_flood(vcpu, addr); 831 return RET_PF_EMULATE; 832 } 833 834 r = mmu_topup_memory_caches(vcpu, true); 835 if (r) 836 return r; 837 838 vcpu->arch.write_fault_to_shadow_pgtable = false; 839 840 is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, 841 &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); 842 843 if (is_self_change_mapping) 844 max_level = PG_LEVEL_4K; 845 else 846 max_level = walker.level; 847 848 mmu_seq = vcpu->kvm->mmu_notifier_seq; 849 smp_rmb(); 850 851 if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, &hva, 852 write_fault, &map_writable)) 853 return RET_PF_RETRY; 854 855 if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r)) 856 return r; 857 858 /* 859 * Do not change pte_access if the pfn is a mmio page, otherwise 860 * we will cache the incorrect access into mmio spte. 861 */ 862 if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) && 863 !is_write_protection(vcpu) && !user_fault && 864 !is_noslot_pfn(pfn)) { 865 walker.pte_access |= ACC_WRITE_MASK; 866 walker.pte_access &= ~ACC_USER_MASK; 867 868 /* 869 * If we converted a user page to a kernel page, 870 * so that the kernel can write to it when cr0.wp=0, 871 * then we should prevent the kernel from executing it 872 * if SMEP is enabled. 873 */ 874 if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) 875 walker.pte_access &= ~ACC_EXEC_MASK; 876 } 877 878 r = RET_PF_RETRY; 879 write_lock(&vcpu->kvm->mmu_lock); 880 if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva)) 881 goto out_unlock; 882 883 kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); 884 r = make_mmu_pages_available(vcpu); 885 if (r) 886 goto out_unlock; 887 r = FNAME(fetch)(vcpu, addr, &walker, error_code, max_level, pfn, 888 map_writable, prefault); 889 kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); 890 891 out_unlock: 892 write_unlock(&vcpu->kvm->mmu_lock); 893 kvm_release_pfn_clean(pfn); 894 return r; 895 } 896 897 static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) 898 { 899 int offset = 0; 900 901 WARN_ON(sp->role.level != PG_LEVEL_4K); 902 903 if (PTTYPE == 32) 904 offset = sp->role.quadrant << PT64_LEVEL_BITS; 905 906 return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); 907 } 908 909 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) 910 { 911 struct kvm_shadow_walk_iterator iterator; 912 struct kvm_mmu_page *sp; 913 u64 old_spte; 914 int level; 915 u64 *sptep; 916 917 vcpu_clear_mmio_info(vcpu, gva); 918 919 /* 920 * No need to check return value here, rmap_can_add() can 921 * help us to skip pte prefetch later. 922 */ 923 mmu_topup_memory_caches(vcpu, true); 924 925 if (!VALID_PAGE(root_hpa)) { 926 WARN_ON(1); 927 return; 928 } 929 930 write_lock(&vcpu->kvm->mmu_lock); 931 for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) { 932 level = iterator.level; 933 sptep = iterator.sptep; 934 935 sp = sptep_to_sp(sptep); 936 old_spte = *sptep; 937 if (is_last_spte(old_spte, level)) { 938 pt_element_t gpte; 939 gpa_t pte_gpa; 940 941 if (!sp->unsync) 942 break; 943 944 pte_gpa = FNAME(get_level1_sp_gpa)(sp); 945 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); 946 947 mmu_page_zap_pte(vcpu->kvm, sp, sptep, NULL); 948 if (is_shadow_present_pte(old_spte)) 949 kvm_flush_remote_tlbs_with_address(vcpu->kvm, 950 sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); 951 952 if (!rmap_can_add(vcpu)) 953 break; 954 955 if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, 956 sizeof(pt_element_t))) 957 break; 958 959 FNAME(update_pte)(vcpu, sp, sptep, &gpte); 960 } 961 962 if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) 963 break; 964 } 965 write_unlock(&vcpu->kvm->mmu_lock); 966 } 967 968 /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */ 969 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access, 970 struct x86_exception *exception) 971 { 972 struct guest_walker walker; 973 gpa_t gpa = UNMAPPED_GVA; 974 int r; 975 976 r = FNAME(walk_addr)(&walker, vcpu, addr, access); 977 978 if (r) { 979 gpa = gfn_to_gpa(walker.gfn); 980 gpa |= addr & ~PAGE_MASK; 981 } else if (exception) 982 *exception = walker.fault; 983 984 return gpa; 985 } 986 987 #if PTTYPE != PTTYPE_EPT 988 /* Note, gva_to_gpa_nested() is only used to translate L2 GVAs. */ 989 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr, 990 u32 access, 991 struct x86_exception *exception) 992 { 993 struct guest_walker walker; 994 gpa_t gpa = UNMAPPED_GVA; 995 int r; 996 997 #ifndef CONFIG_X86_64 998 /* A 64-bit GVA should be impossible on 32-bit KVM. */ 999 WARN_ON_ONCE(vaddr >> 32); 1000 #endif 1001 1002 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access); 1003 1004 if (r) { 1005 gpa = gfn_to_gpa(walker.gfn); 1006 gpa |= vaddr & ~PAGE_MASK; 1007 } else if (exception) 1008 *exception = walker.fault; 1009 1010 return gpa; 1011 } 1012 #endif 1013 1014 /* 1015 * Using the cached information from sp->gfns is safe because: 1016 * - The spte has a reference to the struct page, so the pfn for a given gfn 1017 * can't change unless all sptes pointing to it are nuked first. 1018 * 1019 * Note: 1020 * We should flush all tlbs if spte is dropped even though guest is 1021 * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page 1022 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't 1023 * used by guest then tlbs are not flushed, so guest is allowed to access the 1024 * freed pages. 1025 * And we increase kvm->tlbs_dirty to delay tlbs flush in this case. 1026 */ 1027 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 1028 { 1029 int i, nr_present = 0; 1030 bool host_writable; 1031 gpa_t first_pte_gpa; 1032 int set_spte_ret = 0; 1033 1034 /* direct kvm_mmu_page can not be unsync. */ 1035 BUG_ON(sp->role.direct); 1036 1037 first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); 1038 1039 for (i = 0; i < PT64_ENT_PER_PAGE; i++) { 1040 unsigned pte_access; 1041 pt_element_t gpte; 1042 gpa_t pte_gpa; 1043 gfn_t gfn; 1044 1045 if (!sp->spt[i]) 1046 continue; 1047 1048 pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); 1049 1050 if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, 1051 sizeof(pt_element_t))) 1052 return 0; 1053 1054 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { 1055 /* 1056 * Update spte before increasing tlbs_dirty to make 1057 * sure no tlb flush is lost after spte is zapped; see 1058 * the comments in kvm_flush_remote_tlbs(). 1059 */ 1060 smp_wmb(); 1061 vcpu->kvm->tlbs_dirty++; 1062 continue; 1063 } 1064 1065 gfn = gpte_to_gfn(gpte); 1066 pte_access = sp->role.access; 1067 pte_access &= FNAME(gpte_access)(gpte); 1068 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); 1069 1070 if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access, 1071 &nr_present)) 1072 continue; 1073 1074 if (gfn != sp->gfns[i]) { 1075 drop_spte(vcpu->kvm, &sp->spt[i]); 1076 /* 1077 * The same as above where we are doing 1078 * prefetch_invalid_gpte(). 1079 */ 1080 smp_wmb(); 1081 vcpu->kvm->tlbs_dirty++; 1082 continue; 1083 } 1084 1085 nr_present++; 1086 1087 host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE; 1088 1089 set_spte_ret |= set_spte(vcpu, &sp->spt[i], 1090 pte_access, PG_LEVEL_4K, 1091 gfn, spte_to_pfn(sp->spt[i]), 1092 true, false, host_writable); 1093 } 1094 1095 if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH) 1096 kvm_flush_remote_tlbs(vcpu->kvm); 1097 1098 return nr_present; 1099 } 1100 1101 #undef pt_element_t 1102 #undef guest_walker 1103 #undef FNAME 1104 #undef PT_BASE_ADDR_MASK 1105 #undef PT_INDEX 1106 #undef PT_LVL_ADDR_MASK 1107 #undef PT_LVL_OFFSET_MASK 1108 #undef PT_LEVEL_BITS 1109 #undef PT_MAX_FULL_LEVELS 1110 #undef gpte_to_gfn 1111 #undef gpte_to_gfn_lvl 1112 #undef CMPXCHG 1113 #undef PT_GUEST_ACCESSED_MASK 1114 #undef PT_GUEST_DIRTY_MASK 1115 #undef PT_GUEST_DIRTY_SHIFT 1116 #undef PT_GUEST_ACCESSED_SHIFT 1117 #undef PT_HAVE_ACCESSED_DIRTY 1118