1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * AArch64 processor specific defines 4 * 5 * Copyright (C) 2018, Red Hat, Inc. 6 */ 7 #ifndef SELFTEST_KVM_PROCESSOR_H 8 #define SELFTEST_KVM_PROCESSOR_H 9 10 #include "kvm_util.h" 11 #include <linux/stringify.h> 12 #include <linux/types.h> 13 #include <asm/sysreg.h> 14 15 16 #define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ 17 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) 18 19 /* 20 * KVM_ARM64_SYS_REG(sys_reg_id): Helper macro to convert 21 * SYS_* register definitions in asm/sysreg.h to use in KVM 22 * calls such as vcpu_get_reg() and vcpu_set_reg(). 23 */ 24 #define KVM_ARM64_SYS_REG(sys_reg_id) \ 25 ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \ 26 sys_reg_Op1(sys_reg_id), \ 27 sys_reg_CRn(sys_reg_id), \ 28 sys_reg_CRm(sys_reg_id), \ 29 sys_reg_Op2(sys_reg_id)) 30 31 /* 32 * Default MAIR 33 * index attribute 34 * DEVICE_nGnRnE 0 0000:0000 35 * DEVICE_nGnRE 1 0000:0100 36 * DEVICE_GRE 2 0000:1100 37 * NORMAL_NC 3 0100:0100 38 * NORMAL 4 1111:1111 39 * NORMAL_WT 5 1011:1011 40 */ 41 42 /* Linux doesn't use these memory types, so let's define them. */ 43 #define MAIR_ATTR_DEVICE_GRE UL(0x0c) 44 #define MAIR_ATTR_NORMAL_WT UL(0xbb) 45 46 #define MT_DEVICE_nGnRnE 0 47 #define MT_DEVICE_nGnRE 1 48 #define MT_DEVICE_GRE 2 49 #define MT_NORMAL_NC 3 50 #define MT_NORMAL 4 51 #define MT_NORMAL_WT 5 52 53 #define DEFAULT_MAIR_EL1 \ 54 (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ 55 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ 56 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \ 57 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ 58 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ 59 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT)) 60 61 #define MPIDR_HWID_BITMASK (0xff00fffffful) 62 63 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init); 64 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 65 struct kvm_vcpu_init *init, void *guest_code); 66 67 struct ex_regs { 68 u64 regs[31]; 69 u64 sp; 70 u64 pc; 71 u64 pstate; 72 }; 73 74 #define VECTOR_NUM 16 75 76 enum { 77 VECTOR_SYNC_CURRENT_SP0, 78 VECTOR_IRQ_CURRENT_SP0, 79 VECTOR_FIQ_CURRENT_SP0, 80 VECTOR_ERROR_CURRENT_SP0, 81 82 VECTOR_SYNC_CURRENT, 83 VECTOR_IRQ_CURRENT, 84 VECTOR_FIQ_CURRENT, 85 VECTOR_ERROR_CURRENT, 86 87 VECTOR_SYNC_LOWER_64, 88 VECTOR_IRQ_LOWER_64, 89 VECTOR_FIQ_LOWER_64, 90 VECTOR_ERROR_LOWER_64, 91 92 VECTOR_SYNC_LOWER_32, 93 VECTOR_IRQ_LOWER_32, 94 VECTOR_FIQ_LOWER_32, 95 VECTOR_ERROR_LOWER_32, 96 }; 97 98 #define VECTOR_IS_SYNC(v) ((v) == VECTOR_SYNC_CURRENT_SP0 || \ 99 (v) == VECTOR_SYNC_CURRENT || \ 100 (v) == VECTOR_SYNC_LOWER_64 || \ 101 (v) == VECTOR_SYNC_LOWER_32) 102 103 #define ESR_EC_NUM 64 104 #define ESR_EC_SHIFT 26 105 #define ESR_EC_MASK (ESR_EC_NUM - 1) 106 107 #define ESR_EC_SVC64 0x15 108 #define ESR_EC_IABT 0x21 109 #define ESR_EC_DABT 0x25 110 #define ESR_EC_HW_BP_CURRENT 0x31 111 #define ESR_EC_SSTEP_CURRENT 0x33 112 #define ESR_EC_WP_CURRENT 0x35 113 #define ESR_EC_BRK_INS 0x3c 114 115 /* Access flag */ 116 #define PTE_AF (1ULL << 10) 117 118 /* Access flag update enable/disable */ 119 #define TCR_EL1_HA (1ULL << 39) 120 121 void aarch64_get_supported_page_sizes(uint32_t ipa, 122 bool *ps4k, bool *ps16k, bool *ps64k); 123 124 void vm_init_descriptor_tables(struct kvm_vm *vm); 125 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); 126 127 typedef void(*handler_fn)(struct ex_regs *); 128 void vm_install_exception_handler(struct kvm_vm *vm, 129 int vector, handler_fn handler); 130 void vm_install_sync_handler(struct kvm_vm *vm, 131 int vector, int ec, handler_fn handler); 132 133 uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva); 134 135 static inline void cpu_relax(void) 136 { 137 asm volatile("yield" ::: "memory"); 138 } 139 140 #define isb() asm volatile("isb" : : : "memory") 141 #define dsb(opt) asm volatile("dsb " #opt : : : "memory") 142 #define dmb(opt) asm volatile("dmb " #opt : : : "memory") 143 144 #define dma_wmb() dmb(oshst) 145 #define __iowmb() dma_wmb() 146 147 #define dma_rmb() dmb(oshld) 148 149 #define __iormb(v) \ 150 ({ \ 151 unsigned long tmp; \ 152 \ 153 dma_rmb(); \ 154 \ 155 /* \ 156 * Courtesy of arch/arm64/include/asm/io.h: \ 157 * Create a dummy control dependency from the IO read to any \ 158 * later instructions. This ensures that a subsequent call \ 159 * to udelay() will be ordered due to the ISB in __delay(). \ 160 */ \ 161 asm volatile("eor %0, %1, %1\n" \ 162 "cbnz %0, ." \ 163 : "=r" (tmp) : "r" ((unsigned long)(v)) \ 164 : "memory"); \ 165 }) 166 167 static __always_inline void __raw_writel(u32 val, volatile void *addr) 168 { 169 asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); 170 } 171 172 static __always_inline u32 __raw_readl(const volatile void *addr) 173 { 174 u32 val; 175 asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr)); 176 return val; 177 } 178 179 #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) 180 #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) 181 182 #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c));}) 183 #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; }) 184 185 static inline void local_irq_enable(void) 186 { 187 asm volatile("msr daifclr, #3" : : : "memory"); 188 } 189 190 static inline void local_irq_disable(void) 191 { 192 asm volatile("msr daifset, #3" : : : "memory"); 193 } 194 195 /** 196 * struct arm_smccc_res - Result from SMC/HVC call 197 * @a0-a3 result values from registers 0 to 3 198 */ 199 struct arm_smccc_res { 200 unsigned long a0; 201 unsigned long a1; 202 unsigned long a2; 203 unsigned long a3; 204 }; 205 206 /** 207 * smccc_hvc - Invoke a SMCCC function using the hvc conduit 208 * @function_id: the SMCCC function to be called 209 * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7 210 * @res: pointer to write the return values from registers x0-x3 211 * 212 */ 213 void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, 214 uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, 215 uint64_t arg6, struct arm_smccc_res *res); 216 217 /** 218 * smccc_smc - Invoke a SMCCC function using the smc conduit 219 * @function_id: the SMCCC function to be called 220 * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7 221 * @res: pointer to write the return values from registers x0-x3 222 * 223 */ 224 void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, 225 uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, 226 uint64_t arg6, struct arm_smccc_res *res); 227 228 229 230 uint32_t guest_get_vcpuid(void); 231 232 #endif /* SELFTEST_KVM_PROCESSOR_H */ 233