1f320bc74SQuentin Perret // SPDX-License-Identifier: GPL-2.0-only 2f320bc74SQuentin Perret /* 3f320bc74SQuentin Perret * Copyright (C) 2020 Google LLC 4f320bc74SQuentin Perret * Author: Quentin Perret <qperret@google.com> 5f320bc74SQuentin Perret */ 6f320bc74SQuentin Perret 7f320bc74SQuentin Perret #include <linux/kvm_host.h> 8f320bc74SQuentin Perret #include <asm/kvm_hyp.h> 9f320bc74SQuentin Perret #include <asm/kvm_mmu.h> 10f320bc74SQuentin Perret #include <asm/kvm_pgtable.h> 11f320bc74SQuentin Perret #include <asm/spectre.h> 12f320bc74SQuentin Perret 13f320bc74SQuentin Perret #include <nvhe/early_alloc.h> 14f320bc74SQuentin Perret #include <nvhe/gfp.h> 15f320bc74SQuentin Perret #include <nvhe/memory.h> 16f320bc74SQuentin Perret #include <nvhe/mm.h> 17f320bc74SQuentin Perret #include <nvhe/spinlock.h> 18f320bc74SQuentin Perret 19f320bc74SQuentin Perret struct kvm_pgtable pkvm_pgtable; 20f320bc74SQuentin Perret hyp_spinlock_t pkvm_pgd_lock; 21f320bc74SQuentin Perret u64 __io_map_base; 22f320bc74SQuentin Perret 23f320bc74SQuentin Perret struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS]; 24f320bc74SQuentin Perret unsigned int hyp_memblock_nr; 25f320bc74SQuentin Perret 26*64a80fb7SQuentin Perret static int __pkvm_create_mappings(unsigned long start, unsigned long size, 27f320bc74SQuentin Perret unsigned long phys, enum kvm_pgtable_prot prot) 28f320bc74SQuentin Perret { 29f320bc74SQuentin Perret int err; 30f320bc74SQuentin Perret 31f320bc74SQuentin Perret hyp_spin_lock(&pkvm_pgd_lock); 32f320bc74SQuentin Perret err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot); 33f320bc74SQuentin Perret hyp_spin_unlock(&pkvm_pgd_lock); 34f320bc74SQuentin Perret 35f320bc74SQuentin Perret return err; 36f320bc74SQuentin Perret } 37f320bc74SQuentin Perret 38f320bc74SQuentin Perret unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size, 39f320bc74SQuentin Perret enum kvm_pgtable_prot prot) 40f320bc74SQuentin Perret { 41f320bc74SQuentin Perret unsigned long addr; 42f320bc74SQuentin Perret int err; 43f320bc74SQuentin Perret 44f320bc74SQuentin Perret hyp_spin_lock(&pkvm_pgd_lock); 45f320bc74SQuentin Perret 46f320bc74SQuentin Perret size = PAGE_ALIGN(size + offset_in_page(phys)); 47f320bc74SQuentin Perret addr = __io_map_base; 48f320bc74SQuentin Perret __io_map_base += size; 49f320bc74SQuentin Perret 50f320bc74SQuentin Perret /* Are we overflowing on the vmemmap ? */ 51f320bc74SQuentin Perret if (__io_map_base > __hyp_vmemmap) { 52f320bc74SQuentin Perret __io_map_base -= size; 53f320bc74SQuentin Perret addr = (unsigned long)ERR_PTR(-ENOMEM); 54f320bc74SQuentin Perret goto out; 55f320bc74SQuentin Perret } 56f320bc74SQuentin Perret 57f320bc74SQuentin Perret err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot); 58f320bc74SQuentin Perret if (err) { 59f320bc74SQuentin Perret addr = (unsigned long)ERR_PTR(err); 60f320bc74SQuentin Perret goto out; 61f320bc74SQuentin Perret } 62f320bc74SQuentin Perret 63f320bc74SQuentin Perret addr = addr + offset_in_page(phys); 64f320bc74SQuentin Perret out: 65f320bc74SQuentin Perret hyp_spin_unlock(&pkvm_pgd_lock); 66f320bc74SQuentin Perret 67f320bc74SQuentin Perret return addr; 68f320bc74SQuentin Perret } 69f320bc74SQuentin Perret 70f9370010SQuentin Perret int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot) 71f320bc74SQuentin Perret { 72f320bc74SQuentin Perret unsigned long start = (unsigned long)from; 73f320bc74SQuentin Perret unsigned long end = (unsigned long)to; 74f320bc74SQuentin Perret unsigned long virt_addr; 75f320bc74SQuentin Perret phys_addr_t phys; 76f320bc74SQuentin Perret 77f9370010SQuentin Perret hyp_assert_lock_held(&pkvm_pgd_lock); 78f9370010SQuentin Perret 79f320bc74SQuentin Perret start = start & PAGE_MASK; 80f320bc74SQuentin Perret end = PAGE_ALIGN(end); 81f320bc74SQuentin Perret 82f320bc74SQuentin Perret for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { 83f320bc74SQuentin Perret int err; 84f320bc74SQuentin Perret 85f320bc74SQuentin Perret phys = hyp_virt_to_phys((void *)virt_addr); 86f9370010SQuentin Perret err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE, 87f9370010SQuentin Perret phys, prot); 88f320bc74SQuentin Perret if (err) 89f320bc74SQuentin Perret return err; 90f320bc74SQuentin Perret } 91f320bc74SQuentin Perret 92f320bc74SQuentin Perret return 0; 93f320bc74SQuentin Perret } 94f320bc74SQuentin Perret 95f9370010SQuentin Perret int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot) 96f9370010SQuentin Perret { 97f9370010SQuentin Perret int ret; 98f9370010SQuentin Perret 99f9370010SQuentin Perret hyp_spin_lock(&pkvm_pgd_lock); 100f9370010SQuentin Perret ret = pkvm_create_mappings_locked(from, to, prot); 101f9370010SQuentin Perret hyp_spin_unlock(&pkvm_pgd_lock); 102f9370010SQuentin Perret 103f9370010SQuentin Perret return ret; 104f9370010SQuentin Perret } 105f9370010SQuentin Perret 106f320bc74SQuentin Perret int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back) 107f320bc74SQuentin Perret { 108f320bc74SQuentin Perret unsigned long start, end; 109f320bc74SQuentin Perret 110f320bc74SQuentin Perret hyp_vmemmap_range(phys, size, &start, &end); 111f320bc74SQuentin Perret 112f320bc74SQuentin Perret return __pkvm_create_mappings(start, end - start, back, PAGE_HYP); 113f320bc74SQuentin Perret } 114f320bc74SQuentin Perret 115f320bc74SQuentin Perret static void *__hyp_bp_vect_base; 116f320bc74SQuentin Perret int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot) 117f320bc74SQuentin Perret { 118f320bc74SQuentin Perret void *vector; 119f320bc74SQuentin Perret 120f320bc74SQuentin Perret switch (slot) { 121f320bc74SQuentin Perret case HYP_VECTOR_DIRECT: { 122f320bc74SQuentin Perret vector = __kvm_hyp_vector; 123f320bc74SQuentin Perret break; 124f320bc74SQuentin Perret } 125f320bc74SQuentin Perret case HYP_VECTOR_SPECTRE_DIRECT: { 126f320bc74SQuentin Perret vector = __bp_harden_hyp_vecs; 127f320bc74SQuentin Perret break; 128f320bc74SQuentin Perret } 129f320bc74SQuentin Perret case HYP_VECTOR_INDIRECT: 130f320bc74SQuentin Perret case HYP_VECTOR_SPECTRE_INDIRECT: { 131f320bc74SQuentin Perret vector = (void *)__hyp_bp_vect_base; 132f320bc74SQuentin Perret break; 133f320bc74SQuentin Perret } 134f320bc74SQuentin Perret default: 135f320bc74SQuentin Perret return -EINVAL; 136f320bc74SQuentin Perret } 137f320bc74SQuentin Perret 138f320bc74SQuentin Perret vector = __kvm_vector_slot2addr(vector, slot); 139f320bc74SQuentin Perret *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector; 140f320bc74SQuentin Perret 141f320bc74SQuentin Perret return 0; 142f320bc74SQuentin Perret } 143f320bc74SQuentin Perret 144f320bc74SQuentin Perret int hyp_map_vectors(void) 145f320bc74SQuentin Perret { 146f320bc74SQuentin Perret phys_addr_t phys; 147f320bc74SQuentin Perret void *bp_base; 148f320bc74SQuentin Perret 149f320bc74SQuentin Perret if (!cpus_have_const_cap(ARM64_SPECTRE_V3A)) 150f320bc74SQuentin Perret return 0; 151f320bc74SQuentin Perret 152f320bc74SQuentin Perret phys = __hyp_pa(__bp_harden_hyp_vecs); 153f320bc74SQuentin Perret bp_base = (void *)__pkvm_create_private_mapping(phys, 154f320bc74SQuentin Perret __BP_HARDEN_HYP_VECS_SZ, 155f320bc74SQuentin Perret PAGE_HYP_EXEC); 156f320bc74SQuentin Perret if (IS_ERR_OR_NULL(bp_base)) 157f320bc74SQuentin Perret return PTR_ERR(bp_base); 158f320bc74SQuentin Perret 159f320bc74SQuentin Perret __hyp_bp_vect_base = bp_base; 160f320bc74SQuentin Perret 161f320bc74SQuentin Perret return 0; 162f320bc74SQuentin Perret } 163f320bc74SQuentin Perret 164f320bc74SQuentin Perret int hyp_create_idmap(u32 hyp_va_bits) 165f320bc74SQuentin Perret { 166f320bc74SQuentin Perret unsigned long start, end; 167f320bc74SQuentin Perret 168f320bc74SQuentin Perret start = hyp_virt_to_phys((void *)__hyp_idmap_text_start); 169f320bc74SQuentin Perret start = ALIGN_DOWN(start, PAGE_SIZE); 170f320bc74SQuentin Perret 171f320bc74SQuentin Perret end = hyp_virt_to_phys((void *)__hyp_idmap_text_end); 172f320bc74SQuentin Perret end = ALIGN(end, PAGE_SIZE); 173f320bc74SQuentin Perret 174f320bc74SQuentin Perret /* 175f320bc74SQuentin Perret * One half of the VA space is reserved to linearly map portions of 176f320bc74SQuentin Perret * memory -- see va_layout.c for more details. The other half of the VA 177f320bc74SQuentin Perret * space contains the trampoline page, and needs some care. Split that 178f320bc74SQuentin Perret * second half in two and find the quarter of VA space not conflicting 179f320bc74SQuentin Perret * with the idmap to place the IOs and the vmemmap. IOs use the lower 180f320bc74SQuentin Perret * half of the quarter and the vmemmap the upper half. 181f320bc74SQuentin Perret */ 182f320bc74SQuentin Perret __io_map_base = start & BIT(hyp_va_bits - 2); 183f320bc74SQuentin Perret __io_map_base ^= BIT(hyp_va_bits - 2); 184f320bc74SQuentin Perret __hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3); 185f320bc74SQuentin Perret 186f320bc74SQuentin Perret return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC); 187f320bc74SQuentin Perret } 188