spte.c (d6b87f256591cf6be78825db6a09a5218666e539) spte.c (e7b7bdea77f3277fe49f714c983d0f38f7cb0d86)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * Macros and functions to access KVM PTEs (also known as SPTEs)
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2020 Red Hat, Inc. and/or its affiliates.
9 */
10
11
12#include <linux/kvm_host.h>
13#include "mmu.h"
14#include "mmu_internal.h"
15#include "x86.h"
16#include "spte.h"
17
18#include <asm/e820/api.h>
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * Macros and functions to access KVM PTEs (also known as SPTEs)
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2020 Red Hat, Inc. and/or its affiliates.
9 */
10
11
12#include <linux/kvm_host.h>
13#include "mmu.h"
14#include "mmu_internal.h"
15#include "x86.h"
16#include "spte.h"
17
18#include <asm/e820/api.h>
19#include <asm/vmx.h>
19
20static bool __read_mostly enable_mmio_caching = true;
21module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
22
23u64 __read_mostly shadow_nx_mask;
24u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
25u64 __read_mostly shadow_user_mask;
26u64 __read_mostly shadow_accessed_mask;

--- 249 unchanged lines hidden (view full) ---

276
277 WARN_ON((mmio_value & mmio_mask) != mmio_value);
278 shadow_mmio_value = mmio_value;
279 shadow_mmio_mask = mmio_mask;
280 shadow_mmio_access_mask = access_mask;
281}
282EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
283
20
21static bool __read_mostly enable_mmio_caching = true;
22module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
23
24u64 __read_mostly shadow_nx_mask;
25u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
26u64 __read_mostly shadow_user_mask;
27u64 __read_mostly shadow_accessed_mask;

--- 249 unchanged lines hidden (view full) ---

277
278 WARN_ON((mmio_value & mmio_mask) != mmio_value);
279 shadow_mmio_value = mmio_value;
280 shadow_mmio_mask = mmio_mask;
281 shadow_mmio_access_mask = access_mask;
282}
283EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
284
284/*
285 * Sets the shadow PTE masks used by the MMU.
286 *
287 * Assumptions:
288 * - Setting either @accessed_mask or @dirty_mask requires setting both
289 * - At least one of @accessed_mask or @acc_track_mask must be set
290 */
291void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
292 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
293 u64 acc_track_mask, u64 me_mask)
285void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
294{
286{
295 BUG_ON(!dirty_mask != !accessed_mask);
296 BUG_ON(!accessed_mask && !acc_track_mask);
297 BUG_ON(acc_track_mask & SPTE_TDP_AD_MASK);
287 shadow_user_mask = VMX_EPT_READABLE_MASK;
288 shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull;
289 shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull;
290 shadow_nx_mask = 0ull;
291 shadow_x_mask = VMX_EPT_EXECUTABLE_MASK;
292 shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK;
293 shadow_acc_track_mask = VMX_EPT_RWX_MASK;
294 shadow_me_mask = 0ull;
298
295
299 shadow_user_mask = user_mask;
300 shadow_accessed_mask = accessed_mask;
301 shadow_dirty_mask = dirty_mask;
302 shadow_nx_mask = nx_mask;
303 shadow_x_mask = x_mask;
304 shadow_present_mask = p_mask;
305 shadow_acc_track_mask = acc_track_mask;
306 shadow_me_mask = me_mask;
296 /*
297 * EPT Misconfigurations are generated if the value of bits 2:0
298 * of an EPT paging-structure entry is 110b (write/execute).
299 */
300 kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE,
301 VMX_EPT_RWX_MASK, 0);
307}
302}
308EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
303EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks);
309
310void kvm_mmu_reset_all_pte_masks(void)
311{
312 u8 low_phys_bits;
313 u64 mask;
314
304
305void kvm_mmu_reset_all_pte_masks(void)
306{
307 u8 low_phys_bits;
308 u64 mask;
309
315 shadow_user_mask = 0;
316 shadow_accessed_mask = 0;
317 shadow_dirty_mask = 0;
318 shadow_nx_mask = 0;
319 shadow_x_mask = 0;
320 shadow_present_mask = 0;
321 shadow_acc_track_mask = 0;
322
323 shadow_phys_bits = kvm_get_shadow_phys_bits();
324
325 /*
326 * If the CPU has 46 or less physical address bits, then set an
327 * appropriate mask to guard against L1TF attacks. Otherwise, it is
328 * assumed that the CPU is not vulnerable to L1TF.
329 *
330 * Some Intel CPUs address the L1 cache using more PA bits than are

--- 10 unchanged lines hidden (view full) ---

341 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
342 shadow_nonpresent_or_rsvd_mask =
343 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
344 }
345
346 shadow_nonpresent_or_rsvd_lower_gfn_mask =
347 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
348
310 shadow_phys_bits = kvm_get_shadow_phys_bits();
311
312 /*
313 * If the CPU has 46 or less physical address bits, then set an
314 * appropriate mask to guard against L1TF attacks. Otherwise, it is
315 * assumed that the CPU is not vulnerable to L1TF.
316 *
317 * Some Intel CPUs address the L1 cache using more PA bits than are

--- 10 unchanged lines hidden (view full) ---

328 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
329 shadow_nonpresent_or_rsvd_mask =
330 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
331 }
332
333 shadow_nonpresent_or_rsvd_lower_gfn_mask =
334 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
335
336 shadow_user_mask = PT_USER_MASK;
337 shadow_accessed_mask = PT_ACCESSED_MASK;
338 shadow_dirty_mask = PT_DIRTY_MASK;
339 shadow_nx_mask = PT64_NX_MASK;
340 shadow_x_mask = 0;
341 shadow_present_mask = PT_PRESENT_MASK;
342 shadow_acc_track_mask = 0;
343 shadow_me_mask = sme_me_mask;
344
349 /*
350 * Set a reserved PA bit in MMIO SPTEs to generate page faults with
351 * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
352 * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
353 * 52-bit physical addresses then there are no reserved PA bits in the
354 * PTEs and so the reserved PA approach must be disabled.
355 */
356 if (shadow_phys_bits < 52)
357 mask = BIT_ULL(51) | PT_PRESENT_MASK;
358 else
359 mask = 0;
360
361 kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
345 /*
346 * Set a reserved PA bit in MMIO SPTEs to generate page faults with
347 * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
348 * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
349 * 52-bit physical addresses then there are no reserved PA bits in the
350 * PTEs and so the reserved PA approach must be disabled.
351 */
352 if (shadow_phys_bits < 52)
353 mask = BIT_ULL(51) | PT_PRESENT_MASK;
354 else
355 mask = 0;
356
357 kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
362
363 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
364 PT_DIRTY_MASK, PT64_NX_MASK, 0,
365 PT_PRESENT_MASK, 0, sme_me_mask);
366}
358}