12415e46eSAnup Patel // SPDX-License-Identifier: GPL-2.0
22415e46eSAnup Patel /*
32415e46eSAnup Patel * Copyright (c) 2022 Ventana Micro Systems Inc.
42415e46eSAnup Patel */
52415e46eSAnup Patel
613acfec2SAnup Patel #include <linux/bitmap.h>
713acfec2SAnup Patel #include <linux/cpumask.h>
82415e46eSAnup Patel #include <linux/errno.h>
92415e46eSAnup Patel #include <linux/err.h>
102415e46eSAnup Patel #include <linux/module.h>
1113acfec2SAnup Patel #include <linux/smp.h>
122415e46eSAnup Patel #include <linux/kvm_host.h>
1313acfec2SAnup Patel #include <asm/cacheflush.h>
142415e46eSAnup Patel #include <asm/csr.h>
155ff11248SAnup Patel #include <asm/hwcap.h>
16bb233a11SAndrew Jones #include <asm/insn-def.h>
172415e46eSAnup Patel
18e8ad17d2SAndrew Jones #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
195ff11248SAnup Patel
kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,gpa_t gpa,gpa_t gpsz,unsigned long order)202415e46eSAnup Patel void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
212415e46eSAnup Patel gpa_t gpa, gpa_t gpsz,
222415e46eSAnup Patel unsigned long order)
232415e46eSAnup Patel {
242415e46eSAnup Patel gpa_t pos;
252415e46eSAnup Patel
262415e46eSAnup Patel if (PTRS_PER_PTE < (gpsz >> order)) {
272415e46eSAnup Patel kvm_riscv_local_hfence_gvma_vmid_all(vmid);
282415e46eSAnup Patel return;
292415e46eSAnup Patel }
302415e46eSAnup Patel
315ff11248SAnup Patel if (has_svinval()) {
325ff11248SAnup Patel asm volatile (SFENCE_W_INVAL() ::: "memory");
335ff11248SAnup Patel for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
345ff11248SAnup Patel asm volatile (HINVAL_GVMA(%0, %1)
355ff11248SAnup Patel : : "r" (pos >> 2), "r" (vmid) : "memory");
365ff11248SAnup Patel asm volatile (SFENCE_INVAL_IR() ::: "memory");
375ff11248SAnup Patel } else {
38bb233a11SAndrew Jones for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
39bb233a11SAndrew Jones asm volatile (HFENCE_GVMA(%0, %1)
40bb233a11SAndrew Jones : : "r" (pos >> 2), "r" (vmid) : "memory");
412415e46eSAnup Patel }
425ff11248SAnup Patel }
432415e46eSAnup Patel
kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)442415e46eSAnup Patel void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
452415e46eSAnup Patel {
46bb233a11SAndrew Jones asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
472415e46eSAnup Patel }
482415e46eSAnup Patel
kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa,gpa_t gpsz,unsigned long order)492415e46eSAnup Patel void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
502415e46eSAnup Patel unsigned long order)
512415e46eSAnup Patel {
522415e46eSAnup Patel gpa_t pos;
532415e46eSAnup Patel
542415e46eSAnup Patel if (PTRS_PER_PTE < (gpsz >> order)) {
552415e46eSAnup Patel kvm_riscv_local_hfence_gvma_all();
562415e46eSAnup Patel return;
572415e46eSAnup Patel }
582415e46eSAnup Patel
595ff11248SAnup Patel if (has_svinval()) {
605ff11248SAnup Patel asm volatile (SFENCE_W_INVAL() ::: "memory");
615ff11248SAnup Patel for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
625ff11248SAnup Patel asm volatile(HINVAL_GVMA(%0, zero)
635ff11248SAnup Patel : : "r" (pos >> 2) : "memory");
645ff11248SAnup Patel asm volatile (SFENCE_INVAL_IR() ::: "memory");
655ff11248SAnup Patel } else {
66bb233a11SAndrew Jones for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
67bb233a11SAndrew Jones asm volatile(HFENCE_GVMA(%0, zero)
68bb233a11SAndrew Jones : : "r" (pos >> 2) : "memory");
692415e46eSAnup Patel }
705ff11248SAnup Patel }
712415e46eSAnup Patel
kvm_riscv_local_hfence_gvma_all(void)722415e46eSAnup Patel void kvm_riscv_local_hfence_gvma_all(void)
732415e46eSAnup Patel {
74bb233a11SAndrew Jones asm volatile(HFENCE_GVMA(zero, zero) : : : "memory");
752415e46eSAnup Patel }
762415e46eSAnup Patel
kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,unsigned long asid,unsigned long gva,unsigned long gvsz,unsigned long order)772415e46eSAnup Patel void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
782415e46eSAnup Patel unsigned long asid,
792415e46eSAnup Patel unsigned long gva,
802415e46eSAnup Patel unsigned long gvsz,
812415e46eSAnup Patel unsigned long order)
822415e46eSAnup Patel {
832415e46eSAnup Patel unsigned long pos, hgatp;
842415e46eSAnup Patel
852415e46eSAnup Patel if (PTRS_PER_PTE < (gvsz >> order)) {
862415e46eSAnup Patel kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
872415e46eSAnup Patel return;
882415e46eSAnup Patel }
892415e46eSAnup Patel
902415e46eSAnup Patel hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
912415e46eSAnup Patel
925ff11248SAnup Patel if (has_svinval()) {
935ff11248SAnup Patel asm volatile (SFENCE_W_INVAL() ::: "memory");
945ff11248SAnup Patel for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
955ff11248SAnup Patel asm volatile(HINVAL_VVMA(%0, %1)
965ff11248SAnup Patel : : "r" (pos), "r" (asid) : "memory");
975ff11248SAnup Patel asm volatile (SFENCE_INVAL_IR() ::: "memory");
985ff11248SAnup Patel } else {
99bb233a11SAndrew Jones for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
100bb233a11SAndrew Jones asm volatile(HFENCE_VVMA(%0, %1)
101bb233a11SAndrew Jones : : "r" (pos), "r" (asid) : "memory");
1025ff11248SAnup Patel }
1032415e46eSAnup Patel
1042415e46eSAnup Patel csr_write(CSR_HGATP, hgatp);
1052415e46eSAnup Patel }
1062415e46eSAnup Patel
kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,unsigned long asid)1072415e46eSAnup Patel void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
1082415e46eSAnup Patel unsigned long asid)
1092415e46eSAnup Patel {
1102415e46eSAnup Patel unsigned long hgatp;
1112415e46eSAnup Patel
1122415e46eSAnup Patel hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
1132415e46eSAnup Patel
114bb233a11SAndrew Jones asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory");
1152415e46eSAnup Patel
1162415e46eSAnup Patel csr_write(CSR_HGATP, hgatp);
1172415e46eSAnup Patel }
1182415e46eSAnup Patel
kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,unsigned long gva,unsigned long gvsz,unsigned long order)1192415e46eSAnup Patel void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
1202415e46eSAnup Patel unsigned long gva, unsigned long gvsz,
1212415e46eSAnup Patel unsigned long order)
1222415e46eSAnup Patel {
1232415e46eSAnup Patel unsigned long pos, hgatp;
1242415e46eSAnup Patel
1252415e46eSAnup Patel if (PTRS_PER_PTE < (gvsz >> order)) {
1262415e46eSAnup Patel kvm_riscv_local_hfence_vvma_all(vmid);
1272415e46eSAnup Patel return;
1282415e46eSAnup Patel }
1292415e46eSAnup Patel
1302415e46eSAnup Patel hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
1312415e46eSAnup Patel
1325ff11248SAnup Patel if (has_svinval()) {
1335ff11248SAnup Patel asm volatile (SFENCE_W_INVAL() ::: "memory");
1345ff11248SAnup Patel for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
1355ff11248SAnup Patel asm volatile(HINVAL_VVMA(%0, zero)
1365ff11248SAnup Patel : : "r" (pos) : "memory");
1375ff11248SAnup Patel asm volatile (SFENCE_INVAL_IR() ::: "memory");
1385ff11248SAnup Patel } else {
139bb233a11SAndrew Jones for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
140bb233a11SAndrew Jones asm volatile(HFENCE_VVMA(%0, zero)
141bb233a11SAndrew Jones : : "r" (pos) : "memory");
1425ff11248SAnup Patel }
1432415e46eSAnup Patel
1442415e46eSAnup Patel csr_write(CSR_HGATP, hgatp);
1452415e46eSAnup Patel }
1462415e46eSAnup Patel
kvm_riscv_local_hfence_vvma_all(unsigned long vmid)1472415e46eSAnup Patel void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
1482415e46eSAnup Patel {
1492415e46eSAnup Patel unsigned long hgatp;
1502415e46eSAnup Patel
1512415e46eSAnup Patel hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
1522415e46eSAnup Patel
153bb233a11SAndrew Jones asm volatile(HFENCE_VVMA(zero, zero) : : : "memory");
1542415e46eSAnup Patel
1552415e46eSAnup Patel csr_write(CSR_HGATP, hgatp);
1562415e46eSAnup Patel }
15713acfec2SAnup Patel
kvm_riscv_local_tlb_sanitize(struct kvm_vcpu * vcpu)15892e45050SAnup Patel void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
15992e45050SAnup Patel {
16092e45050SAnup Patel unsigned long vmid;
16192e45050SAnup Patel
16292e45050SAnup Patel if (!kvm_riscv_gstage_vmid_bits() ||
16392e45050SAnup Patel vcpu->arch.last_exit_cpu == vcpu->cpu)
16492e45050SAnup Patel return;
16592e45050SAnup Patel
16692e45050SAnup Patel /*
16792e45050SAnup Patel * On RISC-V platforms with hardware VMID support, we share same
16892e45050SAnup Patel * VMID for all VCPUs of a particular Guest/VM. This means we might
16992e45050SAnup Patel * have stale G-stage TLB entries on the current Host CPU due to
17092e45050SAnup Patel * some other VCPU of the same Guest which ran previously on the
17192e45050SAnup Patel * current Host CPU.
17292e45050SAnup Patel *
17392e45050SAnup Patel * To cleanup stale TLB entries, we simply flush all G-stage TLB
17492e45050SAnup Patel * entries by VMID whenever underlying Host CPU changes for a VCPU.
17592e45050SAnup Patel */
17692e45050SAnup Patel
17792e45050SAnup Patel vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
17892e45050SAnup Patel kvm_riscv_local_hfence_gvma_vmid_all(vmid);
17992e45050SAnup Patel }
18092e45050SAnup Patel
kvm_riscv_fence_i_process(struct kvm_vcpu * vcpu)18113acfec2SAnup Patel void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
18213acfec2SAnup Patel {
183c39cea6fSAtish Patra kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD);
18413acfec2SAnup Patel local_flush_icache_all();
18513acfec2SAnup Patel }
18613acfec2SAnup Patel
kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu * vcpu)18713acfec2SAnup Patel void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
18813acfec2SAnup Patel {
18913acfec2SAnup Patel struct kvm_vmid *vmid;
19013acfec2SAnup Patel
19113acfec2SAnup Patel vmid = &vcpu->kvm->arch.vmid;
19213acfec2SAnup Patel kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid));
19313acfec2SAnup Patel }
19413acfec2SAnup Patel
kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu * vcpu)19513acfec2SAnup Patel void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
19613acfec2SAnup Patel {
19713acfec2SAnup Patel struct kvm_vmid *vmid;
19813acfec2SAnup Patel
19913acfec2SAnup Patel vmid = &vcpu->kvm->arch.vmid;
20013acfec2SAnup Patel kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid));
20113acfec2SAnup Patel }
20213acfec2SAnup Patel
vcpu_hfence_dequeue(struct kvm_vcpu * vcpu,struct kvm_riscv_hfence * out_data)20313acfec2SAnup Patel static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
20413acfec2SAnup Patel struct kvm_riscv_hfence *out_data)
20513acfec2SAnup Patel {
20613acfec2SAnup Patel bool ret = false;
20713acfec2SAnup Patel struct kvm_vcpu_arch *varch = &vcpu->arch;
20813acfec2SAnup Patel
20913acfec2SAnup Patel spin_lock(&varch->hfence_lock);
21013acfec2SAnup Patel
21113acfec2SAnup Patel if (varch->hfence_queue[varch->hfence_head].type) {
21213acfec2SAnup Patel memcpy(out_data, &varch->hfence_queue[varch->hfence_head],
21313acfec2SAnup Patel sizeof(*out_data));
21413acfec2SAnup Patel varch->hfence_queue[varch->hfence_head].type = 0;
21513acfec2SAnup Patel
21613acfec2SAnup Patel varch->hfence_head++;
21713acfec2SAnup Patel if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE)
21813acfec2SAnup Patel varch->hfence_head = 0;
21913acfec2SAnup Patel
22013acfec2SAnup Patel ret = true;
22113acfec2SAnup Patel }
22213acfec2SAnup Patel
22313acfec2SAnup Patel spin_unlock(&varch->hfence_lock);
22413acfec2SAnup Patel
22513acfec2SAnup Patel return ret;
22613acfec2SAnup Patel }
22713acfec2SAnup Patel
vcpu_hfence_enqueue(struct kvm_vcpu * vcpu,const struct kvm_riscv_hfence * data)22813acfec2SAnup Patel static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
22913acfec2SAnup Patel const struct kvm_riscv_hfence *data)
23013acfec2SAnup Patel {
23113acfec2SAnup Patel bool ret = false;
23213acfec2SAnup Patel struct kvm_vcpu_arch *varch = &vcpu->arch;
23313acfec2SAnup Patel
23413acfec2SAnup Patel spin_lock(&varch->hfence_lock);
23513acfec2SAnup Patel
23613acfec2SAnup Patel if (!varch->hfence_queue[varch->hfence_tail].type) {
23713acfec2SAnup Patel memcpy(&varch->hfence_queue[varch->hfence_tail],
23813acfec2SAnup Patel data, sizeof(*data));
23913acfec2SAnup Patel
24013acfec2SAnup Patel varch->hfence_tail++;
24113acfec2SAnup Patel if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE)
24213acfec2SAnup Patel varch->hfence_tail = 0;
24313acfec2SAnup Patel
24413acfec2SAnup Patel ret = true;
24513acfec2SAnup Patel }
24613acfec2SAnup Patel
24713acfec2SAnup Patel spin_unlock(&varch->hfence_lock);
24813acfec2SAnup Patel
24913acfec2SAnup Patel return ret;
25013acfec2SAnup Patel }
25113acfec2SAnup Patel
kvm_riscv_hfence_process(struct kvm_vcpu * vcpu)25213acfec2SAnup Patel void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
25313acfec2SAnup Patel {
25413acfec2SAnup Patel struct kvm_riscv_hfence d = { 0 };
25513acfec2SAnup Patel struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
25613acfec2SAnup Patel
25713acfec2SAnup Patel while (vcpu_hfence_dequeue(vcpu, &d)) {
25813acfec2SAnup Patel switch (d.type) {
25913acfec2SAnup Patel case KVM_RISCV_HFENCE_UNKNOWN:
26013acfec2SAnup Patel break;
26113acfec2SAnup Patel case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
26213acfec2SAnup Patel kvm_riscv_local_hfence_gvma_vmid_gpa(
26313acfec2SAnup Patel READ_ONCE(v->vmid),
26413acfec2SAnup Patel d.addr, d.size, d.order);
26513acfec2SAnup Patel break;
26613acfec2SAnup Patel case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
267c39cea6fSAtish Patra kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
26813acfec2SAnup Patel kvm_riscv_local_hfence_vvma_asid_gva(
26913acfec2SAnup Patel READ_ONCE(v->vmid), d.asid,
27013acfec2SAnup Patel d.addr, d.size, d.order);
27113acfec2SAnup Patel break;
27213acfec2SAnup Patel case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
273c39cea6fSAtish Patra kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
27413acfec2SAnup Patel kvm_riscv_local_hfence_vvma_asid_all(
27513acfec2SAnup Patel READ_ONCE(v->vmid), d.asid);
27613acfec2SAnup Patel break;
27713acfec2SAnup Patel case KVM_RISCV_HFENCE_VVMA_GVA:
278c39cea6fSAtish Patra kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
27913acfec2SAnup Patel kvm_riscv_local_hfence_vvma_gva(
28013acfec2SAnup Patel READ_ONCE(v->vmid),
28113acfec2SAnup Patel d.addr, d.size, d.order);
28213acfec2SAnup Patel break;
28313acfec2SAnup Patel default:
28413acfec2SAnup Patel break;
28513acfec2SAnup Patel }
28613acfec2SAnup Patel }
28713acfec2SAnup Patel }
28813acfec2SAnup Patel
make_xfence_request(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned int req,unsigned int fallback_req,const struct kvm_riscv_hfence * data)28913acfec2SAnup Patel static void make_xfence_request(struct kvm *kvm,
29013acfec2SAnup Patel unsigned long hbase, unsigned long hmask,
29113acfec2SAnup Patel unsigned int req, unsigned int fallback_req,
29213acfec2SAnup Patel const struct kvm_riscv_hfence *data)
29313acfec2SAnup Patel {
29413acfec2SAnup Patel unsigned long i;
29513acfec2SAnup Patel struct kvm_vcpu *vcpu;
29613acfec2SAnup Patel unsigned int actual_req = req;
29713acfec2SAnup Patel DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
29813acfec2SAnup Patel
299*78329084SYe Xingchen bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
30013acfec2SAnup Patel kvm_for_each_vcpu(i, vcpu, kvm) {
30113acfec2SAnup Patel if (hbase != -1UL) {
30213acfec2SAnup Patel if (vcpu->vcpu_id < hbase)
30313acfec2SAnup Patel continue;
30413acfec2SAnup Patel if (!(hmask & (1UL << (vcpu->vcpu_id - hbase))))
30513acfec2SAnup Patel continue;
30613acfec2SAnup Patel }
30713acfec2SAnup Patel
30813acfec2SAnup Patel bitmap_set(vcpu_mask, i, 1);
30913acfec2SAnup Patel
31013acfec2SAnup Patel if (!data || !data->type)
31113acfec2SAnup Patel continue;
31213acfec2SAnup Patel
31313acfec2SAnup Patel /*
31413acfec2SAnup Patel * Enqueue hfence data to VCPU hfence queue. If we don't
31513acfec2SAnup Patel * have space in the VCPU hfence queue then fallback to
31613acfec2SAnup Patel * a more conservative hfence request.
31713acfec2SAnup Patel */
31813acfec2SAnup Patel if (!vcpu_hfence_enqueue(vcpu, data))
31913acfec2SAnup Patel actual_req = fallback_req;
32013acfec2SAnup Patel }
32113acfec2SAnup Patel
32213acfec2SAnup Patel kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask);
32313acfec2SAnup Patel }
32413acfec2SAnup Patel
kvm_riscv_fence_i(struct kvm * kvm,unsigned long hbase,unsigned long hmask)32513acfec2SAnup Patel void kvm_riscv_fence_i(struct kvm *kvm,
32613acfec2SAnup Patel unsigned long hbase, unsigned long hmask)
32713acfec2SAnup Patel {
32813acfec2SAnup Patel make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I,
32913acfec2SAnup Patel KVM_REQ_FENCE_I, NULL);
33013acfec2SAnup Patel }
33113acfec2SAnup Patel
kvm_riscv_hfence_gvma_vmid_gpa(struct kvm * kvm,unsigned long hbase,unsigned long hmask,gpa_t gpa,gpa_t gpsz,unsigned long order)33213acfec2SAnup Patel void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
33313acfec2SAnup Patel unsigned long hbase, unsigned long hmask,
33413acfec2SAnup Patel gpa_t gpa, gpa_t gpsz,
33513acfec2SAnup Patel unsigned long order)
33613acfec2SAnup Patel {
33713acfec2SAnup Patel struct kvm_riscv_hfence data;
33813acfec2SAnup Patel
33913acfec2SAnup Patel data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
34013acfec2SAnup Patel data.asid = 0;
34113acfec2SAnup Patel data.addr = gpa;
34213acfec2SAnup Patel data.size = gpsz;
34313acfec2SAnup Patel data.order = order;
34413acfec2SAnup Patel make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
34513acfec2SAnup Patel KVM_REQ_HFENCE_GVMA_VMID_ALL, &data);
34613acfec2SAnup Patel }
34713acfec2SAnup Patel
kvm_riscv_hfence_gvma_vmid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask)34813acfec2SAnup Patel void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
34913acfec2SAnup Patel unsigned long hbase, unsigned long hmask)
35013acfec2SAnup Patel {
35113acfec2SAnup Patel make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL,
35213acfec2SAnup Patel KVM_REQ_HFENCE_GVMA_VMID_ALL, NULL);
35313acfec2SAnup Patel }
35413acfec2SAnup Patel
kvm_riscv_hfence_vvma_asid_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order,unsigned long asid)35513acfec2SAnup Patel void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
35613acfec2SAnup Patel unsigned long hbase, unsigned long hmask,
35713acfec2SAnup Patel unsigned long gva, unsigned long gvsz,
35813acfec2SAnup Patel unsigned long order, unsigned long asid)
35913acfec2SAnup Patel {
36013acfec2SAnup Patel struct kvm_riscv_hfence data;
36113acfec2SAnup Patel
36213acfec2SAnup Patel data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
36313acfec2SAnup Patel data.asid = asid;
36413acfec2SAnup Patel data.addr = gva;
36513acfec2SAnup Patel data.size = gvsz;
36613acfec2SAnup Patel data.order = order;
36713acfec2SAnup Patel make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
36813acfec2SAnup Patel KVM_REQ_HFENCE_VVMA_ALL, &data);
36913acfec2SAnup Patel }
37013acfec2SAnup Patel
kvm_riscv_hfence_vvma_asid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long asid)37113acfec2SAnup Patel void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
37213acfec2SAnup Patel unsigned long hbase, unsigned long hmask,
37313acfec2SAnup Patel unsigned long asid)
37413acfec2SAnup Patel {
37513acfec2SAnup Patel struct kvm_riscv_hfence data;
37613acfec2SAnup Patel
37713acfec2SAnup Patel data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
37813acfec2SAnup Patel data.asid = asid;
37913acfec2SAnup Patel data.addr = data.size = data.order = 0;
38013acfec2SAnup Patel make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
38113acfec2SAnup Patel KVM_REQ_HFENCE_VVMA_ALL, &data);
38213acfec2SAnup Patel }
38313acfec2SAnup Patel
kvm_riscv_hfence_vvma_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order)38413acfec2SAnup Patel void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
38513acfec2SAnup Patel unsigned long hbase, unsigned long hmask,
38613acfec2SAnup Patel unsigned long gva, unsigned long gvsz,
38713acfec2SAnup Patel unsigned long order)
38813acfec2SAnup Patel {
38913acfec2SAnup Patel struct kvm_riscv_hfence data;
39013acfec2SAnup Patel
39113acfec2SAnup Patel data.type = KVM_RISCV_HFENCE_VVMA_GVA;
39213acfec2SAnup Patel data.asid = 0;
39313acfec2SAnup Patel data.addr = gva;
39413acfec2SAnup Patel data.size = gvsz;
39513acfec2SAnup Patel data.order = order;
39613acfec2SAnup Patel make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
39713acfec2SAnup Patel KVM_REQ_HFENCE_VVMA_ALL, &data);
39813acfec2SAnup Patel }
39913acfec2SAnup Patel
kvm_riscv_hfence_vvma_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask)40013acfec2SAnup Patel void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
40113acfec2SAnup Patel unsigned long hbase, unsigned long hmask)
40213acfec2SAnup Patel {
40313acfec2SAnup Patel make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
40413acfec2SAnup Patel KVM_REQ_HFENCE_VVMA_ALL, NULL);
40513acfec2SAnup Patel }
406