1f320bc74SQuentin Perret // SPDX-License-Identifier: GPL-2.0-only 2f320bc74SQuentin Perret /* 3f320bc74SQuentin Perret * Copyright (C) 2020 Google LLC 4f320bc74SQuentin Perret * Author: Quentin Perret <qperret@google.com> 5f320bc74SQuentin Perret */ 6f320bc74SQuentin Perret 7f320bc74SQuentin Perret #include <linux/kvm_host.h> 8f320bc74SQuentin Perret #include <asm/kvm_hyp.h> 9f320bc74SQuentin Perret #include <asm/kvm_mmu.h> 10f320bc74SQuentin Perret #include <asm/kvm_pgtable.h> 119429f4b0SWill Deacon #include <asm/kvm_pkvm.h> 12f320bc74SQuentin Perret #include <asm/spectre.h> 13f320bc74SQuentin Perret 14f320bc74SQuentin Perret #include <nvhe/early_alloc.h> 15f320bc74SQuentin Perret #include <nvhe/gfp.h> 16f320bc74SQuentin Perret #include <nvhe/memory.h> 17f320bc74SQuentin Perret #include <nvhe/mm.h> 18f320bc74SQuentin Perret #include <nvhe/spinlock.h> 19f320bc74SQuentin Perret 20f320bc74SQuentin Perret struct kvm_pgtable pkvm_pgtable; 21f320bc74SQuentin Perret hyp_spinlock_t pkvm_pgd_lock; 22f320bc74SQuentin Perret 23f320bc74SQuentin Perret struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS]; 24f320bc74SQuentin Perret unsigned int hyp_memblock_nr; 25f320bc74SQuentin Perret 26473a3efbSQuentin Perret static u64 __io_map_base; 27473a3efbSQuentin Perret 2864a80fb7SQuentin Perret static int __pkvm_create_mappings(unsigned long start, unsigned long size, 29f320bc74SQuentin Perret unsigned long phys, enum kvm_pgtable_prot prot) 30f320bc74SQuentin Perret { 31f320bc74SQuentin Perret int err; 32f320bc74SQuentin Perret 33f320bc74SQuentin Perret hyp_spin_lock(&pkvm_pgd_lock); 34f320bc74SQuentin Perret err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot); 35f320bc74SQuentin Perret hyp_spin_unlock(&pkvm_pgd_lock); 36f320bc74SQuentin Perret 37f320bc74SQuentin Perret return err; 38f320bc74SQuentin Perret } 39f320bc74SQuentin Perret 40*f922c13eSKalesh Singh /** 41*f922c13eSKalesh Singh * pkvm_alloc_private_va_range - Allocates a private VA range. 42*f922c13eSKalesh Singh * @size: The size of the VA range to reserve. 43*f922c13eSKalesh Singh * @haddr: The hypervisor virtual start address of the allocation. 44*f922c13eSKalesh Singh * 45*f922c13eSKalesh Singh * The private virtual address (VA) range is allocated above __io_map_base 46*f922c13eSKalesh Singh * and aligned based on the order of @size. 47*f922c13eSKalesh Singh * 48*f922c13eSKalesh Singh * Return: 0 on success or negative error code on failure. 49*f922c13eSKalesh Singh */ 50*f922c13eSKalesh Singh int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr) 51*f922c13eSKalesh Singh { 52*f922c13eSKalesh Singh unsigned long base, addr; 53*f922c13eSKalesh Singh int ret = 0; 54*f922c13eSKalesh Singh 55*f922c13eSKalesh Singh hyp_spin_lock(&pkvm_pgd_lock); 56*f922c13eSKalesh Singh 57*f922c13eSKalesh Singh /* Align the allocation based on the order of its size */ 58*f922c13eSKalesh Singh addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size)); 59*f922c13eSKalesh Singh 60*f922c13eSKalesh Singh /* The allocated size is always a multiple of PAGE_SIZE */ 61*f922c13eSKalesh Singh base = addr + PAGE_ALIGN(size); 62*f922c13eSKalesh Singh 63*f922c13eSKalesh Singh /* Are we overflowing on the vmemmap ? */ 64*f922c13eSKalesh Singh if (!addr || base > __hyp_vmemmap) 65*f922c13eSKalesh Singh ret = -ENOMEM; 66*f922c13eSKalesh Singh else { 67*f922c13eSKalesh Singh __io_map_base = base; 68*f922c13eSKalesh Singh *haddr = addr; 69*f922c13eSKalesh Singh } 70*f922c13eSKalesh Singh 71*f922c13eSKalesh Singh hyp_spin_unlock(&pkvm_pgd_lock); 72*f922c13eSKalesh Singh 73*f922c13eSKalesh Singh return ret; 74*f922c13eSKalesh Singh } 75*f922c13eSKalesh Singh 76*f922c13eSKalesh Singh int __pkvm_create_private_mapping(phys_addr_t phys, size_t size, 77*f922c13eSKalesh Singh enum kvm_pgtable_prot prot, 78*f922c13eSKalesh Singh unsigned long *haddr) 79f320bc74SQuentin Perret { 80f320bc74SQuentin Perret unsigned long addr; 81f320bc74SQuentin Perret int err; 82f320bc74SQuentin Perret 83f320bc74SQuentin Perret size = PAGE_ALIGN(size + offset_in_page(phys)); 84*f922c13eSKalesh Singh err = pkvm_alloc_private_va_range(size, &addr); 85*f922c13eSKalesh Singh if (err) 86*f922c13eSKalesh Singh return err; 87f320bc74SQuentin Perret 88*f922c13eSKalesh Singh err = __pkvm_create_mappings(addr, size, phys, prot); 89*f922c13eSKalesh Singh if (err) 90*f922c13eSKalesh Singh return err; 91f320bc74SQuentin Perret 92*f922c13eSKalesh Singh *haddr = addr + offset_in_page(phys); 93*f922c13eSKalesh Singh return err; 94f320bc74SQuentin Perret } 95f320bc74SQuentin Perret 96f9370010SQuentin Perret int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot) 97f320bc74SQuentin Perret { 98f320bc74SQuentin Perret unsigned long start = (unsigned long)from; 99f320bc74SQuentin Perret unsigned long end = (unsigned long)to; 100f320bc74SQuentin Perret unsigned long virt_addr; 101f320bc74SQuentin Perret phys_addr_t phys; 102f320bc74SQuentin Perret 103f9370010SQuentin Perret hyp_assert_lock_held(&pkvm_pgd_lock); 104f9370010SQuentin Perret 105f320bc74SQuentin Perret start = start & PAGE_MASK; 106f320bc74SQuentin Perret end = PAGE_ALIGN(end); 107f320bc74SQuentin Perret 108f320bc74SQuentin Perret for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { 109f320bc74SQuentin Perret int err; 110f320bc74SQuentin Perret 111f320bc74SQuentin Perret phys = hyp_virt_to_phys((void *)virt_addr); 112f9370010SQuentin Perret err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE, 113f9370010SQuentin Perret phys, prot); 114f320bc74SQuentin Perret if (err) 115f320bc74SQuentin Perret return err; 116f320bc74SQuentin Perret } 117f320bc74SQuentin Perret 118f320bc74SQuentin Perret return 0; 119f320bc74SQuentin Perret } 120f320bc74SQuentin Perret 121f9370010SQuentin Perret int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot) 122f9370010SQuentin Perret { 123f9370010SQuentin Perret int ret; 124f9370010SQuentin Perret 125f9370010SQuentin Perret hyp_spin_lock(&pkvm_pgd_lock); 126f9370010SQuentin Perret ret = pkvm_create_mappings_locked(from, to, prot); 127f9370010SQuentin Perret hyp_spin_unlock(&pkvm_pgd_lock); 128f9370010SQuentin Perret 129f9370010SQuentin Perret return ret; 130f9370010SQuentin Perret } 131f9370010SQuentin Perret 132f320bc74SQuentin Perret int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back) 133f320bc74SQuentin Perret { 134f320bc74SQuentin Perret unsigned long start, end; 135f320bc74SQuentin Perret 136f320bc74SQuentin Perret hyp_vmemmap_range(phys, size, &start, &end); 137f320bc74SQuentin Perret 138f320bc74SQuentin Perret return __pkvm_create_mappings(start, end - start, back, PAGE_HYP); 139f320bc74SQuentin Perret } 140f320bc74SQuentin Perret 141f320bc74SQuentin Perret static void *__hyp_bp_vect_base; 142f320bc74SQuentin Perret int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot) 143f320bc74SQuentin Perret { 144f320bc74SQuentin Perret void *vector; 145f320bc74SQuentin Perret 146f320bc74SQuentin Perret switch (slot) { 147f320bc74SQuentin Perret case HYP_VECTOR_DIRECT: { 148f320bc74SQuentin Perret vector = __kvm_hyp_vector; 149f320bc74SQuentin Perret break; 150f320bc74SQuentin Perret } 151f320bc74SQuentin Perret case HYP_VECTOR_SPECTRE_DIRECT: { 152f320bc74SQuentin Perret vector = __bp_harden_hyp_vecs; 153f320bc74SQuentin Perret break; 154f320bc74SQuentin Perret } 155f320bc74SQuentin Perret case HYP_VECTOR_INDIRECT: 156f320bc74SQuentin Perret case HYP_VECTOR_SPECTRE_INDIRECT: { 157f320bc74SQuentin Perret vector = (void *)__hyp_bp_vect_base; 158f320bc74SQuentin Perret break; 159f320bc74SQuentin Perret } 160f320bc74SQuentin Perret default: 161f320bc74SQuentin Perret return -EINVAL; 162f320bc74SQuentin Perret } 163f320bc74SQuentin Perret 164f320bc74SQuentin Perret vector = __kvm_vector_slot2addr(vector, slot); 165f320bc74SQuentin Perret *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector; 166f320bc74SQuentin Perret 167f320bc74SQuentin Perret return 0; 168f320bc74SQuentin Perret } 169f320bc74SQuentin Perret 170f320bc74SQuentin Perret int hyp_map_vectors(void) 171f320bc74SQuentin Perret { 172f320bc74SQuentin Perret phys_addr_t phys; 173*f922c13eSKalesh Singh unsigned long bp_base; 174*f922c13eSKalesh Singh int ret; 175f320bc74SQuentin Perret 1765bdf3437SJames Morse if (!kvm_system_needs_idmapped_vectors()) { 1775bdf3437SJames Morse __hyp_bp_vect_base = __bp_harden_hyp_vecs; 178f320bc74SQuentin Perret return 0; 1795bdf3437SJames Morse } 180f320bc74SQuentin Perret 181f320bc74SQuentin Perret phys = __hyp_pa(__bp_harden_hyp_vecs); 182*f922c13eSKalesh Singh ret = __pkvm_create_private_mapping(phys, __BP_HARDEN_HYP_VECS_SZ, 183*f922c13eSKalesh Singh PAGE_HYP_EXEC, &bp_base); 184*f922c13eSKalesh Singh if (ret) 185*f922c13eSKalesh Singh return ret; 186f320bc74SQuentin Perret 187*f922c13eSKalesh Singh __hyp_bp_vect_base = (void *)bp_base; 188f320bc74SQuentin Perret 189f320bc74SQuentin Perret return 0; 190f320bc74SQuentin Perret } 191f320bc74SQuentin Perret 192f320bc74SQuentin Perret int hyp_create_idmap(u32 hyp_va_bits) 193f320bc74SQuentin Perret { 194f320bc74SQuentin Perret unsigned long start, end; 195f320bc74SQuentin Perret 196f320bc74SQuentin Perret start = hyp_virt_to_phys((void *)__hyp_idmap_text_start); 197f320bc74SQuentin Perret start = ALIGN_DOWN(start, PAGE_SIZE); 198f320bc74SQuentin Perret 199f320bc74SQuentin Perret end = hyp_virt_to_phys((void *)__hyp_idmap_text_end); 200f320bc74SQuentin Perret end = ALIGN(end, PAGE_SIZE); 201f320bc74SQuentin Perret 202f320bc74SQuentin Perret /* 203f320bc74SQuentin Perret * One half of the VA space is reserved to linearly map portions of 204f320bc74SQuentin Perret * memory -- see va_layout.c for more details. The other half of the VA 205f320bc74SQuentin Perret * space contains the trampoline page, and needs some care. Split that 206f320bc74SQuentin Perret * second half in two and find the quarter of VA space not conflicting 207f320bc74SQuentin Perret * with the idmap to place the IOs and the vmemmap. IOs use the lower 208f320bc74SQuentin Perret * half of the quarter and the vmemmap the upper half. 209f320bc74SQuentin Perret */ 210f320bc74SQuentin Perret __io_map_base = start & BIT(hyp_va_bits - 2); 211f320bc74SQuentin Perret __io_map_base ^= BIT(hyp_va_bits - 2); 212f320bc74SQuentin Perret __hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3); 213f320bc74SQuentin Perret 214f320bc74SQuentin Perret return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC); 215f320bc74SQuentin Perret } 216