1f320bc74SQuentin Perret // SPDX-License-Identifier: GPL-2.0-only 2f320bc74SQuentin Perret /* 3f320bc74SQuentin Perret * Copyright (C) 2020 Google LLC 4f320bc74SQuentin Perret * Author: Quentin Perret <qperret@google.com> 5f320bc74SQuentin Perret */ 6f320bc74SQuentin Perret 7f320bc74SQuentin Perret #include <linux/kvm_host.h> 8f320bc74SQuentin Perret #include <asm/kvm_hyp.h> 9f320bc74SQuentin Perret #include <asm/kvm_mmu.h> 10f320bc74SQuentin Perret #include <asm/kvm_pgtable.h> 119429f4b0SWill Deacon #include <asm/kvm_pkvm.h> 12f320bc74SQuentin Perret #include <asm/spectre.h> 13f320bc74SQuentin Perret 14f320bc74SQuentin Perret #include <nvhe/early_alloc.h> 15f320bc74SQuentin Perret #include <nvhe/gfp.h> 16f320bc74SQuentin Perret #include <nvhe/memory.h> 17f320bc74SQuentin Perret #include <nvhe/mm.h> 18f320bc74SQuentin Perret #include <nvhe/spinlock.h> 19f320bc74SQuentin Perret 20f320bc74SQuentin Perret struct kvm_pgtable pkvm_pgtable; 21f320bc74SQuentin Perret hyp_spinlock_t pkvm_pgd_lock; 22f320bc74SQuentin Perret 23f320bc74SQuentin Perret struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS]; 24f320bc74SQuentin Perret unsigned int hyp_memblock_nr; 25f320bc74SQuentin Perret 26473a3efbSQuentin Perret static u64 __io_map_base; 27473a3efbSQuentin Perret 2864a80fb7SQuentin Perret static int __pkvm_create_mappings(unsigned long start, unsigned long size, 29f320bc74SQuentin Perret unsigned long phys, enum kvm_pgtable_prot prot) 30f320bc74SQuentin Perret { 31f320bc74SQuentin Perret int err; 32f320bc74SQuentin Perret 33f320bc74SQuentin Perret hyp_spin_lock(&pkvm_pgd_lock); 34f320bc74SQuentin Perret err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot); 35f320bc74SQuentin Perret hyp_spin_unlock(&pkvm_pgd_lock); 36f320bc74SQuentin Perret 37f320bc74SQuentin Perret return err; 38f320bc74SQuentin Perret } 39f320bc74SQuentin Perret 40f922c13eSKalesh Singh /** 41f922c13eSKalesh Singh * pkvm_alloc_private_va_range - Allocates a private VA range. 42f922c13eSKalesh Singh * @size: The size of the VA range to reserve. 43f922c13eSKalesh Singh * @haddr: The hypervisor virtual start address of the allocation. 44f922c13eSKalesh Singh * 45f922c13eSKalesh Singh * The private virtual address (VA) range is allocated above __io_map_base 46f922c13eSKalesh Singh * and aligned based on the order of @size. 47f922c13eSKalesh Singh * 48f922c13eSKalesh Singh * Return: 0 on success or negative error code on failure. 49f922c13eSKalesh Singh */ 50f922c13eSKalesh Singh int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr) 51f922c13eSKalesh Singh { 52f922c13eSKalesh Singh unsigned long base, addr; 53f922c13eSKalesh Singh int ret = 0; 54f922c13eSKalesh Singh 55f922c13eSKalesh Singh hyp_spin_lock(&pkvm_pgd_lock); 56f922c13eSKalesh Singh 57f922c13eSKalesh Singh /* Align the allocation based on the order of its size */ 58f922c13eSKalesh Singh addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size)); 59f922c13eSKalesh Singh 60f922c13eSKalesh Singh /* The allocated size is always a multiple of PAGE_SIZE */ 61f922c13eSKalesh Singh base = addr + PAGE_ALIGN(size); 62f922c13eSKalesh Singh 63f922c13eSKalesh Singh /* Are we overflowing on the vmemmap ? */ 64f922c13eSKalesh Singh if (!addr || base > __hyp_vmemmap) 65f922c13eSKalesh Singh ret = -ENOMEM; 66f922c13eSKalesh Singh else { 67f922c13eSKalesh Singh __io_map_base = base; 68f922c13eSKalesh Singh *haddr = addr; 69f922c13eSKalesh Singh } 70f922c13eSKalesh Singh 71f922c13eSKalesh Singh hyp_spin_unlock(&pkvm_pgd_lock); 72f922c13eSKalesh Singh 73f922c13eSKalesh Singh return ret; 74f922c13eSKalesh Singh } 75f922c13eSKalesh Singh 76f922c13eSKalesh Singh int __pkvm_create_private_mapping(phys_addr_t phys, size_t size, 77f922c13eSKalesh Singh enum kvm_pgtable_prot prot, 78f922c13eSKalesh Singh unsigned long *haddr) 79f320bc74SQuentin Perret { 80f320bc74SQuentin Perret unsigned long addr; 81f320bc74SQuentin Perret int err; 82f320bc74SQuentin Perret 83f320bc74SQuentin Perret size = PAGE_ALIGN(size + offset_in_page(phys)); 84f922c13eSKalesh Singh err = pkvm_alloc_private_va_range(size, &addr); 85f922c13eSKalesh Singh if (err) 86f922c13eSKalesh Singh return err; 87f320bc74SQuentin Perret 88f922c13eSKalesh Singh err = __pkvm_create_mappings(addr, size, phys, prot); 89f922c13eSKalesh Singh if (err) 90f922c13eSKalesh Singh return err; 91f320bc74SQuentin Perret 92f922c13eSKalesh Singh *haddr = addr + offset_in_page(phys); 93f922c13eSKalesh Singh return err; 94f320bc74SQuentin Perret } 95f320bc74SQuentin Perret 96f9370010SQuentin Perret int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot) 97f320bc74SQuentin Perret { 98f320bc74SQuentin Perret unsigned long start = (unsigned long)from; 99f320bc74SQuentin Perret unsigned long end = (unsigned long)to; 100f320bc74SQuentin Perret unsigned long virt_addr; 101f320bc74SQuentin Perret phys_addr_t phys; 102f320bc74SQuentin Perret 103f9370010SQuentin Perret hyp_assert_lock_held(&pkvm_pgd_lock); 104f9370010SQuentin Perret 105f320bc74SQuentin Perret start = start & PAGE_MASK; 106f320bc74SQuentin Perret end = PAGE_ALIGN(end); 107f320bc74SQuentin Perret 108f320bc74SQuentin Perret for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { 109f320bc74SQuentin Perret int err; 110f320bc74SQuentin Perret 111f320bc74SQuentin Perret phys = hyp_virt_to_phys((void *)virt_addr); 112f9370010SQuentin Perret err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE, 113f9370010SQuentin Perret phys, prot); 114f320bc74SQuentin Perret if (err) 115f320bc74SQuentin Perret return err; 116f320bc74SQuentin Perret } 117f320bc74SQuentin Perret 118f320bc74SQuentin Perret return 0; 119f320bc74SQuentin Perret } 120f320bc74SQuentin Perret 121f9370010SQuentin Perret int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot) 122f9370010SQuentin Perret { 123f9370010SQuentin Perret int ret; 124f9370010SQuentin Perret 125f9370010SQuentin Perret hyp_spin_lock(&pkvm_pgd_lock); 126f9370010SQuentin Perret ret = pkvm_create_mappings_locked(from, to, prot); 127f9370010SQuentin Perret hyp_spin_unlock(&pkvm_pgd_lock); 128f9370010SQuentin Perret 129f9370010SQuentin Perret return ret; 130f9370010SQuentin Perret } 131f9370010SQuentin Perret 132*8e6bcc3aSQuentin Perret int hyp_back_vmemmap(phys_addr_t back) 133f320bc74SQuentin Perret { 134*8e6bcc3aSQuentin Perret unsigned long i, start, size, end = 0; 135*8e6bcc3aSQuentin Perret int ret; 136f320bc74SQuentin Perret 137*8e6bcc3aSQuentin Perret for (i = 0; i < hyp_memblock_nr; i++) { 138*8e6bcc3aSQuentin Perret start = hyp_memory[i].base; 139*8e6bcc3aSQuentin Perret start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE); 140*8e6bcc3aSQuentin Perret /* 141*8e6bcc3aSQuentin Perret * The begining of the hyp_vmemmap region for the current 142*8e6bcc3aSQuentin Perret * memblock may already be backed by the page backing the end 143*8e6bcc3aSQuentin Perret * the previous region, so avoid mapping it twice. 144*8e6bcc3aSQuentin Perret */ 145*8e6bcc3aSQuentin Perret start = max(start, end); 146f320bc74SQuentin Perret 147*8e6bcc3aSQuentin Perret end = hyp_memory[i].base + hyp_memory[i].size; 148*8e6bcc3aSQuentin Perret end = PAGE_ALIGN((u64)hyp_phys_to_page(end)); 149*8e6bcc3aSQuentin Perret if (start >= end) 150*8e6bcc3aSQuentin Perret continue; 151*8e6bcc3aSQuentin Perret 152*8e6bcc3aSQuentin Perret size = end - start; 153*8e6bcc3aSQuentin Perret ret = __pkvm_create_mappings(start, size, back, PAGE_HYP); 154*8e6bcc3aSQuentin Perret if (ret) 155*8e6bcc3aSQuentin Perret return ret; 156*8e6bcc3aSQuentin Perret 157*8e6bcc3aSQuentin Perret memset(hyp_phys_to_virt(back), 0, size); 158*8e6bcc3aSQuentin Perret back += size; 159*8e6bcc3aSQuentin Perret } 160*8e6bcc3aSQuentin Perret 161*8e6bcc3aSQuentin Perret return 0; 162f320bc74SQuentin Perret } 163f320bc74SQuentin Perret 164f320bc74SQuentin Perret static void *__hyp_bp_vect_base; 165f320bc74SQuentin Perret int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot) 166f320bc74SQuentin Perret { 167f320bc74SQuentin Perret void *vector; 168f320bc74SQuentin Perret 169f320bc74SQuentin Perret switch (slot) { 170f320bc74SQuentin Perret case HYP_VECTOR_DIRECT: { 171f320bc74SQuentin Perret vector = __kvm_hyp_vector; 172f320bc74SQuentin Perret break; 173f320bc74SQuentin Perret } 174f320bc74SQuentin Perret case HYP_VECTOR_SPECTRE_DIRECT: { 175f320bc74SQuentin Perret vector = __bp_harden_hyp_vecs; 176f320bc74SQuentin Perret break; 177f320bc74SQuentin Perret } 178f320bc74SQuentin Perret case HYP_VECTOR_INDIRECT: 179f320bc74SQuentin Perret case HYP_VECTOR_SPECTRE_INDIRECT: { 180f320bc74SQuentin Perret vector = (void *)__hyp_bp_vect_base; 181f320bc74SQuentin Perret break; 182f320bc74SQuentin Perret } 183f320bc74SQuentin Perret default: 184f320bc74SQuentin Perret return -EINVAL; 185f320bc74SQuentin Perret } 186f320bc74SQuentin Perret 187f320bc74SQuentin Perret vector = __kvm_vector_slot2addr(vector, slot); 188f320bc74SQuentin Perret *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector; 189f320bc74SQuentin Perret 190f320bc74SQuentin Perret return 0; 191f320bc74SQuentin Perret } 192f320bc74SQuentin Perret 193f320bc74SQuentin Perret int hyp_map_vectors(void) 194f320bc74SQuentin Perret { 195f320bc74SQuentin Perret phys_addr_t phys; 196f922c13eSKalesh Singh unsigned long bp_base; 197f922c13eSKalesh Singh int ret; 198f320bc74SQuentin Perret 1995bdf3437SJames Morse if (!kvm_system_needs_idmapped_vectors()) { 2005bdf3437SJames Morse __hyp_bp_vect_base = __bp_harden_hyp_vecs; 201f320bc74SQuentin Perret return 0; 2025bdf3437SJames Morse } 203f320bc74SQuentin Perret 204f320bc74SQuentin Perret phys = __hyp_pa(__bp_harden_hyp_vecs); 205f922c13eSKalesh Singh ret = __pkvm_create_private_mapping(phys, __BP_HARDEN_HYP_VECS_SZ, 206f922c13eSKalesh Singh PAGE_HYP_EXEC, &bp_base); 207f922c13eSKalesh Singh if (ret) 208f922c13eSKalesh Singh return ret; 209f320bc74SQuentin Perret 210f922c13eSKalesh Singh __hyp_bp_vect_base = (void *)bp_base; 211f320bc74SQuentin Perret 212f320bc74SQuentin Perret return 0; 213f320bc74SQuentin Perret } 214f320bc74SQuentin Perret 215f320bc74SQuentin Perret int hyp_create_idmap(u32 hyp_va_bits) 216f320bc74SQuentin Perret { 217f320bc74SQuentin Perret unsigned long start, end; 218f320bc74SQuentin Perret 219f320bc74SQuentin Perret start = hyp_virt_to_phys((void *)__hyp_idmap_text_start); 220f320bc74SQuentin Perret start = ALIGN_DOWN(start, PAGE_SIZE); 221f320bc74SQuentin Perret 222f320bc74SQuentin Perret end = hyp_virt_to_phys((void *)__hyp_idmap_text_end); 223f320bc74SQuentin Perret end = ALIGN(end, PAGE_SIZE); 224f320bc74SQuentin Perret 225f320bc74SQuentin Perret /* 226f320bc74SQuentin Perret * One half of the VA space is reserved to linearly map portions of 227f320bc74SQuentin Perret * memory -- see va_layout.c for more details. The other half of the VA 228f320bc74SQuentin Perret * space contains the trampoline page, and needs some care. Split that 229f320bc74SQuentin Perret * second half in two and find the quarter of VA space not conflicting 230f320bc74SQuentin Perret * with the idmap to place the IOs and the vmemmap. IOs use the lower 231f320bc74SQuentin Perret * half of the quarter and the vmemmap the upper half. 232f320bc74SQuentin Perret */ 233f320bc74SQuentin Perret __io_map_base = start & BIT(hyp_va_bits - 2); 234f320bc74SQuentin Perret __io_map_base ^= BIT(hyp_va_bits - 2); 235f320bc74SQuentin Perret __hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3); 236f320bc74SQuentin Perret 237f320bc74SQuentin Perret return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC); 238f320bc74SQuentin Perret } 239