xref: /openbmc/linux/arch/arm64/kvm/vmid.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
141783839SShameer Kolothum // SPDX-License-Identifier: GPL-2.0
241783839SShameer Kolothum /*
341783839SShameer Kolothum  * VMID allocator.
441783839SShameer Kolothum  *
541783839SShameer Kolothum  * Based on Arm64 ASID allocator algorithm.
641783839SShameer Kolothum  * Please refer arch/arm64/mm/context.c for detailed
741783839SShameer Kolothum  * comments on algorithm.
841783839SShameer Kolothum  *
941783839SShameer Kolothum  * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
1041783839SShameer Kolothum  * Copyright (C) 2012 ARM Ltd.
1141783839SShameer Kolothum  */
1241783839SShameer Kolothum 
1341783839SShameer Kolothum #include <linux/bitfield.h>
1441783839SShameer Kolothum #include <linux/bitops.h>
1541783839SShameer Kolothum 
1641783839SShameer Kolothum #include <asm/kvm_asm.h>
1741783839SShameer Kolothum #include <asm/kvm_mmu.h>
1841783839SShameer Kolothum 
198d20bd63SSean Christopherson unsigned int __ro_after_init kvm_arm_vmid_bits;
2041783839SShameer Kolothum static DEFINE_RAW_SPINLOCK(cpu_vmid_lock);
2141783839SShameer Kolothum 
2241783839SShameer Kolothum static atomic64_t vmid_generation;
2341783839SShameer Kolothum static unsigned long *vmid_map;
2441783839SShameer Kolothum 
2541783839SShameer Kolothum static DEFINE_PER_CPU(atomic64_t, active_vmids);
2641783839SShameer Kolothum static DEFINE_PER_CPU(u64, reserved_vmids);
2741783839SShameer Kolothum 
2841783839SShameer Kolothum #define VMID_MASK		(~GENMASK(kvm_arm_vmid_bits - 1, 0))
2941783839SShameer Kolothum #define VMID_FIRST_VERSION	(1UL << kvm_arm_vmid_bits)
3041783839SShameer Kolothum 
3141783839SShameer Kolothum #define NUM_USER_VMIDS		VMID_FIRST_VERSION
3241783839SShameer Kolothum #define vmid2idx(vmid)		((vmid) & ~VMID_MASK)
3341783839SShameer Kolothum #define idx2vmid(idx)		vmid2idx(idx)
3441783839SShameer Kolothum 
35100b4f09SShameer Kolothum /*
36100b4f09SShameer Kolothum  * As vmid #0 is always reserved, we will never allocate one
37100b4f09SShameer Kolothum  * as below and can be treated as invalid. This is used to
38100b4f09SShameer Kolothum  * set the active_vmids on vCPU schedule out.
39100b4f09SShameer Kolothum  */
40100b4f09SShameer Kolothum #define VMID_ACTIVE_INVALID		VMID_FIRST_VERSION
41100b4f09SShameer Kolothum 
4241783839SShameer Kolothum #define vmid_gen_match(vmid) \
4341783839SShameer Kolothum 	(!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits))
4441783839SShameer Kolothum 
flush_context(void)4541783839SShameer Kolothum static void flush_context(void)
4641783839SShameer Kolothum {
4741783839SShameer Kolothum 	int cpu;
4841783839SShameer Kolothum 	u64 vmid;
4941783839SShameer Kolothum 
504be8ddb4SChristophe JAILLET 	bitmap_zero(vmid_map, NUM_USER_VMIDS);
5141783839SShameer Kolothum 
5241783839SShameer Kolothum 	for_each_possible_cpu(cpu) {
5341783839SShameer Kolothum 		vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
5441783839SShameer Kolothum 
5541783839SShameer Kolothum 		/* Preserve reserved VMID */
5641783839SShameer Kolothum 		if (vmid == 0)
5741783839SShameer Kolothum 			vmid = per_cpu(reserved_vmids, cpu);
5841783839SShameer Kolothum 		__set_bit(vmid2idx(vmid), vmid_map);
5941783839SShameer Kolothum 		per_cpu(reserved_vmids, cpu) = vmid;
6041783839SShameer Kolothum 	}
6141783839SShameer Kolothum 
6241783839SShameer Kolothum 	/*
6341783839SShameer Kolothum 	 * Unlike ASID allocator, we expect less frequent rollover in
6441783839SShameer Kolothum 	 * case of VMIDs. Hence, instead of marking the CPU as
6541783839SShameer Kolothum 	 * flush_pending and issuing a local context invalidation on
6641783839SShameer Kolothum 	 * the next context-switch, we broadcast TLB flush + I-cache
6741783839SShameer Kolothum 	 * invalidation over the inner shareable domain on rollover.
6841783839SShameer Kolothum 	 */
6941783839SShameer Kolothum 	kvm_call_hyp(__kvm_flush_vm_context);
7041783839SShameer Kolothum }
7141783839SShameer Kolothum 
check_update_reserved_vmid(u64 vmid,u64 newvmid)7241783839SShameer Kolothum static bool check_update_reserved_vmid(u64 vmid, u64 newvmid)
7341783839SShameer Kolothum {
7441783839SShameer Kolothum 	int cpu;
7541783839SShameer Kolothum 	bool hit = false;
7641783839SShameer Kolothum 
7741783839SShameer Kolothum 	/*
7841783839SShameer Kolothum 	 * Iterate over the set of reserved VMIDs looking for a match
7941783839SShameer Kolothum 	 * and update to use newvmid (i.e. the same VMID in the current
8041783839SShameer Kolothum 	 * generation).
8141783839SShameer Kolothum 	 */
8241783839SShameer Kolothum 	for_each_possible_cpu(cpu) {
8341783839SShameer Kolothum 		if (per_cpu(reserved_vmids, cpu) == vmid) {
8441783839SShameer Kolothum 			hit = true;
8541783839SShameer Kolothum 			per_cpu(reserved_vmids, cpu) = newvmid;
8641783839SShameer Kolothum 		}
8741783839SShameer Kolothum 	}
8841783839SShameer Kolothum 
8941783839SShameer Kolothum 	return hit;
9041783839SShameer Kolothum }
9141783839SShameer Kolothum 
new_vmid(struct kvm_vmid * kvm_vmid)9241783839SShameer Kolothum static u64 new_vmid(struct kvm_vmid *kvm_vmid)
9341783839SShameer Kolothum {
9441783839SShameer Kolothum 	static u32 cur_idx = 1;
9541783839SShameer Kolothum 	u64 vmid = atomic64_read(&kvm_vmid->id);
9641783839SShameer Kolothum 	u64 generation = atomic64_read(&vmid_generation);
9741783839SShameer Kolothum 
9841783839SShameer Kolothum 	if (vmid != 0) {
9941783839SShameer Kolothum 		u64 newvmid = generation | (vmid & ~VMID_MASK);
10041783839SShameer Kolothum 
10141783839SShameer Kolothum 		if (check_update_reserved_vmid(vmid, newvmid)) {
10241783839SShameer Kolothum 			atomic64_set(&kvm_vmid->id, newvmid);
10341783839SShameer Kolothum 			return newvmid;
10441783839SShameer Kolothum 		}
10541783839SShameer Kolothum 
10641783839SShameer Kolothum 		if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) {
10741783839SShameer Kolothum 			atomic64_set(&kvm_vmid->id, newvmid);
10841783839SShameer Kolothum 			return newvmid;
10941783839SShameer Kolothum 		}
11041783839SShameer Kolothum 	}
11141783839SShameer Kolothum 
11241783839SShameer Kolothum 	vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, cur_idx);
11341783839SShameer Kolothum 	if (vmid != NUM_USER_VMIDS)
11441783839SShameer Kolothum 		goto set_vmid;
11541783839SShameer Kolothum 
11641783839SShameer Kolothum 	/* We're out of VMIDs, so increment the global generation count */
11741783839SShameer Kolothum 	generation = atomic64_add_return_relaxed(VMID_FIRST_VERSION,
11841783839SShameer Kolothum 						 &vmid_generation);
11941783839SShameer Kolothum 	flush_context();
12041783839SShameer Kolothum 
12141783839SShameer Kolothum 	/* We have more VMIDs than CPUs, so this will always succeed */
12241783839SShameer Kolothum 	vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, 1);
12341783839SShameer Kolothum 
12441783839SShameer Kolothum set_vmid:
12541783839SShameer Kolothum 	__set_bit(vmid, vmid_map);
12641783839SShameer Kolothum 	cur_idx = vmid;
12741783839SShameer Kolothum 	vmid = idx2vmid(vmid) | generation;
12841783839SShameer Kolothum 	atomic64_set(&kvm_vmid->id, vmid);
12941783839SShameer Kolothum 	return vmid;
13041783839SShameer Kolothum }
13141783839SShameer Kolothum 
132100b4f09SShameer Kolothum /* Called from vCPU sched out with preemption disabled */
kvm_arm_vmid_clear_active(void)133100b4f09SShameer Kolothum void kvm_arm_vmid_clear_active(void)
134100b4f09SShameer Kolothum {
135100b4f09SShameer Kolothum 	atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
136100b4f09SShameer Kolothum }
137100b4f09SShameer Kolothum 
kvm_arm_vmid_update(struct kvm_vmid * kvm_vmid)13841783839SShameer Kolothum void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
13941783839SShameer Kolothum {
14041783839SShameer Kolothum 	unsigned long flags;
14141783839SShameer Kolothum 	u64 vmid, old_active_vmid;
14241783839SShameer Kolothum 
14341783839SShameer Kolothum 	vmid = atomic64_read(&kvm_vmid->id);
14441783839SShameer Kolothum 
14541783839SShameer Kolothum 	/*
14641783839SShameer Kolothum 	 * Please refer comments in check_and_switch_context() in
14741783839SShameer Kolothum 	 * arch/arm64/mm/context.c.
148100b4f09SShameer Kolothum 	 *
149100b4f09SShameer Kolothum 	 * Unlike ASID allocator, we set the active_vmids to
150100b4f09SShameer Kolothum 	 * VMID_ACTIVE_INVALID on vCPU schedule out to avoid
151100b4f09SShameer Kolothum 	 * reserving the VMID space needlessly on rollover.
152100b4f09SShameer Kolothum 	 * Hence explicitly check here for a "!= 0" to
153100b4f09SShameer Kolothum 	 * handle the sync with a concurrent rollover.
15441783839SShameer Kolothum 	 */
15541783839SShameer Kolothum 	old_active_vmid = atomic64_read(this_cpu_ptr(&active_vmids));
156100b4f09SShameer Kolothum 	if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
157100b4f09SShameer Kolothum 	    0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
15841783839SShameer Kolothum 					  old_active_vmid, vmid))
15941783839SShameer Kolothum 		return;
16041783839SShameer Kolothum 
16141783839SShameer Kolothum 	raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
16241783839SShameer Kolothum 
16341783839SShameer Kolothum 	/* Check that our VMID belongs to the current generation. */
16441783839SShameer Kolothum 	vmid = atomic64_read(&kvm_vmid->id);
16541783839SShameer Kolothum 	if (!vmid_gen_match(vmid))
16641783839SShameer Kolothum 		vmid = new_vmid(kvm_vmid);
16741783839SShameer Kolothum 
16841783839SShameer Kolothum 	atomic64_set(this_cpu_ptr(&active_vmids), vmid);
16941783839SShameer Kolothum 	raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
17041783839SShameer Kolothum }
17141783839SShameer Kolothum 
17241783839SShameer Kolothum /*
17341783839SShameer Kolothum  * Initialize the VMID allocator
17441783839SShameer Kolothum  */
kvm_arm_vmid_alloc_init(void)1758d20bd63SSean Christopherson int __init kvm_arm_vmid_alloc_init(void)
17641783839SShameer Kolothum {
17741783839SShameer Kolothum 	kvm_arm_vmid_bits = kvm_get_vmid_bits();
17841783839SShameer Kolothum 
17941783839SShameer Kolothum 	/*
18041783839SShameer Kolothum 	 * Expect allocation after rollover to fail if we don't have
18141783839SShameer Kolothum 	 * at least one more VMID than CPUs. VMID #0 is always reserved.
18241783839SShameer Kolothum 	 */
18341783839SShameer Kolothum 	WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus());
18441783839SShameer Kolothum 	atomic64_set(&vmid_generation, VMID_FIRST_VERSION);
185*a00e9e43SChristophe JAILLET 	vmid_map = bitmap_zalloc(NUM_USER_VMIDS, GFP_KERNEL);
18641783839SShameer Kolothum 	if (!vmid_map)
18741783839SShameer Kolothum 		return -ENOMEM;
18841783839SShameer Kolothum 
18941783839SShameer Kolothum 	return 0;
19041783839SShameer Kolothum }
19141783839SShameer Kolothum 
kvm_arm_vmid_alloc_free(void)1928d20bd63SSean Christopherson void __init kvm_arm_vmid_alloc_free(void)
19341783839SShameer Kolothum {
194*a00e9e43SChristophe JAILLET 	bitmap_free(vmid_map);
19541783839SShameer Kolothum }
196