1fd7bb4a2SAnup Patel // SPDX-License-Identifier: GPL-2.0
2fd7bb4a2SAnup Patel /*
3fd7bb4a2SAnup Patel * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4fd7bb4a2SAnup Patel *
5fd7bb4a2SAnup Patel * Authors:
6fd7bb4a2SAnup Patel * Anup Patel <anup.patel@wdc.com>
7fd7bb4a2SAnup Patel */
8fd7bb4a2SAnup Patel
9fd7bb4a2SAnup Patel #include <linux/bitops.h>
10fd7bb4a2SAnup Patel #include <linux/cpumask.h>
11fd7bb4a2SAnup Patel #include <linux/errno.h>
12fd7bb4a2SAnup Patel #include <linux/err.h>
13fd7bb4a2SAnup Patel #include <linux/module.h>
1413acfec2SAnup Patel #include <linux/smp.h>
15fd7bb4a2SAnup Patel #include <linux/kvm_host.h>
16fd7bb4a2SAnup Patel #include <asm/csr.h>
17fd7bb4a2SAnup Patel
18fd7bb4a2SAnup Patel static unsigned long vmid_version = 1;
19fd7bb4a2SAnup Patel static unsigned long vmid_next;
2045b66dc1SSean Christopherson static unsigned long vmid_bits __ro_after_init;
21fd7bb4a2SAnup Patel static DEFINE_SPINLOCK(vmid_lock);
22fd7bb4a2SAnup Patel
kvm_riscv_gstage_vmid_detect(void)2345b66dc1SSean Christopherson void __init kvm_riscv_gstage_vmid_detect(void)
24fd7bb4a2SAnup Patel {
25fd7bb4a2SAnup Patel unsigned long old;
26fd7bb4a2SAnup Patel
27fd7bb4a2SAnup Patel /* Figure-out number of VMID bits in HW */
28fd7bb4a2SAnup Patel old = csr_read(CSR_HGATP);
29*e290dbb7SAnup Patel csr_write(CSR_HGATP, old | HGATP_VMID);
30fd7bb4a2SAnup Patel vmid_bits = csr_read(CSR_HGATP);
31*e290dbb7SAnup Patel vmid_bits = (vmid_bits & HGATP_VMID) >> HGATP_VMID_SHIFT;
32fd7bb4a2SAnup Patel vmid_bits = fls_long(vmid_bits);
33fd7bb4a2SAnup Patel csr_write(CSR_HGATP, old);
34fd7bb4a2SAnup Patel
35fd7bb4a2SAnup Patel /* We polluted local TLB so flush all guest TLB */
362415e46eSAnup Patel kvm_riscv_local_hfence_gvma_all();
37fd7bb4a2SAnup Patel
38fd7bb4a2SAnup Patel /* We don't use VMID bits if they are not sufficient */
39fd7bb4a2SAnup Patel if ((1UL << vmid_bits) < num_possible_cpus())
40fd7bb4a2SAnup Patel vmid_bits = 0;
41fd7bb4a2SAnup Patel }
42fd7bb4a2SAnup Patel
kvm_riscv_gstage_vmid_bits(void)4326708234SAnup Patel unsigned long kvm_riscv_gstage_vmid_bits(void)
44fd7bb4a2SAnup Patel {
45fd7bb4a2SAnup Patel return vmid_bits;
46fd7bb4a2SAnup Patel }
47fd7bb4a2SAnup Patel
kvm_riscv_gstage_vmid_init(struct kvm * kvm)4826708234SAnup Patel int kvm_riscv_gstage_vmid_init(struct kvm *kvm)
49fd7bb4a2SAnup Patel {
50fd7bb4a2SAnup Patel /* Mark the initial VMID and VMID version invalid */
51fd7bb4a2SAnup Patel kvm->arch.vmid.vmid_version = 0;
52fd7bb4a2SAnup Patel kvm->arch.vmid.vmid = 0;
53fd7bb4a2SAnup Patel
54fd7bb4a2SAnup Patel return 0;
55fd7bb4a2SAnup Patel }
56fd7bb4a2SAnup Patel
kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid * vmid)5726708234SAnup Patel bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid)
58fd7bb4a2SAnup Patel {
59fd7bb4a2SAnup Patel if (!vmid_bits)
60fd7bb4a2SAnup Patel return false;
61fd7bb4a2SAnup Patel
62fd7bb4a2SAnup Patel return unlikely(READ_ONCE(vmid->vmid_version) !=
63fd7bb4a2SAnup Patel READ_ONCE(vmid_version));
64fd7bb4a2SAnup Patel }
65fd7bb4a2SAnup Patel
__local_hfence_gvma_all(void * info)6613acfec2SAnup Patel static void __local_hfence_gvma_all(void *info)
6713acfec2SAnup Patel {
6813acfec2SAnup Patel kvm_riscv_local_hfence_gvma_all();
6913acfec2SAnup Patel }
7013acfec2SAnup Patel
kvm_riscv_gstage_vmid_update(struct kvm_vcpu * vcpu)7126708234SAnup Patel void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu)
72fd7bb4a2SAnup Patel {
7346808a4cSMarc Zyngier unsigned long i;
74fd7bb4a2SAnup Patel struct kvm_vcpu *v;
75fd7bb4a2SAnup Patel struct kvm_vmid *vmid = &vcpu->kvm->arch.vmid;
76fd7bb4a2SAnup Patel
7726708234SAnup Patel if (!kvm_riscv_gstage_vmid_ver_changed(vmid))
78fd7bb4a2SAnup Patel return;
79fd7bb4a2SAnup Patel
80fd7bb4a2SAnup Patel spin_lock(&vmid_lock);
81fd7bb4a2SAnup Patel
82fd7bb4a2SAnup Patel /*
83fd7bb4a2SAnup Patel * We need to re-check the vmid_version here to ensure that if
84fd7bb4a2SAnup Patel * another vcpu already allocated a valid vmid for this vm.
85fd7bb4a2SAnup Patel */
8626708234SAnup Patel if (!kvm_riscv_gstage_vmid_ver_changed(vmid)) {
87fd7bb4a2SAnup Patel spin_unlock(&vmid_lock);
88fd7bb4a2SAnup Patel return;
89fd7bb4a2SAnup Patel }
90fd7bb4a2SAnup Patel
91fd7bb4a2SAnup Patel /* First user of a new VMID version? */
92fd7bb4a2SAnup Patel if (unlikely(vmid_next == 0)) {
93fd7bb4a2SAnup Patel WRITE_ONCE(vmid_version, READ_ONCE(vmid_version) + 1);
94fd7bb4a2SAnup Patel vmid_next = 1;
95fd7bb4a2SAnup Patel
96fd7bb4a2SAnup Patel /*
97fd7bb4a2SAnup Patel * We ran out of VMIDs so we increment vmid_version and
98fd7bb4a2SAnup Patel * start assigning VMIDs from 1.
99fd7bb4a2SAnup Patel *
100ea6c1213SJulia Lawall * This also means existing VMIDs assignment to all Guest
101fd7bb4a2SAnup Patel * instances is invalid and we have force VMID re-assignement
102fd7bb4a2SAnup Patel * for all Guest instances. The Guest instances that were not
103fd7bb4a2SAnup Patel * running will automatically pick-up new VMIDs because will
10426708234SAnup Patel * call kvm_riscv_gstage_vmid_update() whenever they enter
105fd7bb4a2SAnup Patel * in-kernel run loop. For Guest instances that are already
106fd7bb4a2SAnup Patel * running, we force VM exits on all host CPUs using IPI and
107fd7bb4a2SAnup Patel * flush all Guest TLBs.
108fd7bb4a2SAnup Patel */
10913acfec2SAnup Patel on_each_cpu_mask(cpu_online_mask, __local_hfence_gvma_all,
11013acfec2SAnup Patel NULL, 1);
111fd7bb4a2SAnup Patel }
112fd7bb4a2SAnup Patel
113fd7bb4a2SAnup Patel vmid->vmid = vmid_next;
114fd7bb4a2SAnup Patel vmid_next++;
115fd7bb4a2SAnup Patel vmid_next &= (1 << vmid_bits) - 1;
116fd7bb4a2SAnup Patel
117fd7bb4a2SAnup Patel WRITE_ONCE(vmid->vmid_version, READ_ONCE(vmid_version));
118fd7bb4a2SAnup Patel
119fd7bb4a2SAnup Patel spin_unlock(&vmid_lock);
120fd7bb4a2SAnup Patel
12126708234SAnup Patel /* Request G-stage page table update for all VCPUs */
122fd7bb4a2SAnup Patel kvm_for_each_vcpu(i, v, vcpu->kvm)
123fd7bb4a2SAnup Patel kvm_make_request(KVM_REQ_UPDATE_HGATP, v);
124fd7bb4a2SAnup Patel }
125