19fda6753SDavid Matlack // SPDX-License-Identifier: GPL-2.0
29fda6753SDavid Matlack /*
39fda6753SDavid Matlack * x86_64-specific extensions to memstress.c.
49fda6753SDavid Matlack *
59fda6753SDavid Matlack * Copyright (C) 2022, Google, Inc.
69fda6753SDavid Matlack */
79fda6753SDavid Matlack #include <stdio.h>
89fda6753SDavid Matlack #include <stdlib.h>
99fda6753SDavid Matlack #include <linux/bitmap.h>
109fda6753SDavid Matlack #include <linux/bitops.h>
119fda6753SDavid Matlack
129fda6753SDavid Matlack #include "test_util.h"
139fda6753SDavid Matlack #include "kvm_util.h"
149fda6753SDavid Matlack #include "memstress.h"
159fda6753SDavid Matlack #include "processor.h"
169fda6753SDavid Matlack #include "vmx.h"
179fda6753SDavid Matlack
memstress_l2_guest_code(uint64_t vcpu_id)187812d80cSDavid Matlack void memstress_l2_guest_code(uint64_t vcpu_id)
199fda6753SDavid Matlack {
207812d80cSDavid Matlack memstress_guest_code(vcpu_id);
219fda6753SDavid Matlack vmcall();
229fda6753SDavid Matlack }
239fda6753SDavid Matlack
247812d80cSDavid Matlack extern char memstress_l2_guest_entry[];
259fda6753SDavid Matlack __asm__(
267812d80cSDavid Matlack "memstress_l2_guest_entry:"
279fda6753SDavid Matlack " mov (%rsp), %rdi;"
287812d80cSDavid Matlack " call memstress_l2_guest_code;"
299fda6753SDavid Matlack " ud2;"
309fda6753SDavid Matlack );
319fda6753SDavid Matlack
memstress_l1_guest_code(struct vmx_pages * vmx,uint64_t vcpu_id)327812d80cSDavid Matlack static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
339fda6753SDavid Matlack {
349fda6753SDavid Matlack #define L2_GUEST_STACK_SIZE 64
359fda6753SDavid Matlack unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
369fda6753SDavid Matlack unsigned long *rsp;
379fda6753SDavid Matlack
389fda6753SDavid Matlack GUEST_ASSERT(vmx->vmcs_gpa);
399fda6753SDavid Matlack GUEST_ASSERT(prepare_for_vmx_operation(vmx));
409fda6753SDavid Matlack GUEST_ASSERT(load_vmcs(vmx));
419fda6753SDavid Matlack GUEST_ASSERT(ept_1g_pages_supported());
429fda6753SDavid Matlack
439fda6753SDavid Matlack rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
449fda6753SDavid Matlack *rsp = vcpu_id;
457812d80cSDavid Matlack prepare_vmcs(vmx, memstress_l2_guest_entry, rsp);
469fda6753SDavid Matlack
479fda6753SDavid Matlack GUEST_ASSERT(!vmlaunch());
489fda6753SDavid Matlack GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
499fda6753SDavid Matlack GUEST_DONE();
509fda6753SDavid Matlack }
519fda6753SDavid Matlack
memstress_nested_pages(int nr_vcpus)527812d80cSDavid Matlack uint64_t memstress_nested_pages(int nr_vcpus)
539fda6753SDavid Matlack {
549fda6753SDavid Matlack /*
559fda6753SDavid Matlack * 513 page tables is enough to identity-map 256 TiB of L2 with 1G
569fda6753SDavid Matlack * pages and 4-level paging, plus a few pages per-vCPU for data
579fda6753SDavid Matlack * structures such as the VMCS.
589fda6753SDavid Matlack */
599fda6753SDavid Matlack return 513 + 10 * nr_vcpus;
609fda6753SDavid Matlack }
619fda6753SDavid Matlack
memstress_setup_ept(struct vmx_pages * vmx,struct kvm_vm * vm)627812d80cSDavid Matlack void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
639fda6753SDavid Matlack {
649fda6753SDavid Matlack uint64_t start, end;
659fda6753SDavid Matlack
669fda6753SDavid Matlack prepare_eptp(vmx, vm, 0);
679fda6753SDavid Matlack
689fda6753SDavid Matlack /*
699fda6753SDavid Matlack * Identity map the first 4G and the test region with 1G pages so that
709fda6753SDavid Matlack * KVM can shadow the EPT12 with the maximum huge page size supported
719fda6753SDavid Matlack * by the backing source.
729fda6753SDavid Matlack */
739fda6753SDavid Matlack nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
749fda6753SDavid Matlack
757812d80cSDavid Matlack start = align_down(memstress_args.gpa, PG_SIZE_1G);
767812d80cSDavid Matlack end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G);
779fda6753SDavid Matlack nested_identity_map_1g(vmx, vm, start, end - start);
789fda6753SDavid Matlack }
799fda6753SDavid Matlack
memstress_setup_nested(struct kvm_vm * vm,int nr_vcpus,struct kvm_vcpu * vcpus[])807812d80cSDavid Matlack void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
819fda6753SDavid Matlack {
829fda6753SDavid Matlack struct vmx_pages *vmx, *vmx0 = NULL;
839fda6753SDavid Matlack struct kvm_regs regs;
849fda6753SDavid Matlack vm_vaddr_t vmx_gva;
859fda6753SDavid Matlack int vcpu_id;
869fda6753SDavid Matlack
879fda6753SDavid Matlack TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
88*5c107f70SDavid Matlack TEST_REQUIRE(kvm_cpu_has_ept());
899fda6753SDavid Matlack
909fda6753SDavid Matlack for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
919fda6753SDavid Matlack vmx = vcpu_alloc_vmx(vm, &vmx_gva);
929fda6753SDavid Matlack
939fda6753SDavid Matlack if (vcpu_id == 0) {
947812d80cSDavid Matlack memstress_setup_ept(vmx, vm);
959fda6753SDavid Matlack vmx0 = vmx;
969fda6753SDavid Matlack } else {
979fda6753SDavid Matlack /* Share the same EPT table across all vCPUs. */
989fda6753SDavid Matlack vmx->eptp = vmx0->eptp;
999fda6753SDavid Matlack vmx->eptp_hva = vmx0->eptp_hva;
1009fda6753SDavid Matlack vmx->eptp_gpa = vmx0->eptp_gpa;
1019fda6753SDavid Matlack }
1029fda6753SDavid Matlack
1039fda6753SDavid Matlack /*
1047812d80cSDavid Matlack * Override the vCPU to run memstress_l1_guest_code() which will
1057812d80cSDavid Matlack * bounce it into L2 before calling memstress_guest_code().
1069fda6753SDavid Matlack */
1079fda6753SDavid Matlack vcpu_regs_get(vcpus[vcpu_id], ®s);
1087812d80cSDavid Matlack regs.rip = (unsigned long) memstress_l1_guest_code;
1099fda6753SDavid Matlack vcpu_regs_set(vcpus[vcpu_id], ®s);
1109fda6753SDavid Matlack vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id);
1119fda6753SDavid Matlack }
1129fda6753SDavid Matlack }
113