1 /* 2 * AArch64 specific prctl functions for linux-user 3 * 4 * SPDX-License-Identifier: GPL-2.0-or-later 5 */ 6 #ifndef AARCH64_TARGET_PRCTL_H 7 #define AARCH64_TARGET_PRCTL_H 8 9 #include "target/arm/cpu-features.h" 10 #include "mte_user_helper.h" 11 12 static abi_long do_prctl_sve_get_vl(CPUArchState *env) 13 { 14 ARMCPU *cpu = env_archcpu(env); 15 if (cpu_isar_feature(aa64_sve, cpu)) { 16 /* PSTATE.SM is always unset on syscall entry. */ 17 return sve_vq(env) * 16; 18 } 19 return -TARGET_EINVAL; 20 } 21 #define do_prctl_sve_get_vl do_prctl_sve_get_vl 22 23 static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2) 24 { 25 /* 26 * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT. 27 * Note the kernel definition of sve_vl_valid allows for VQ=512, 28 * i.e. VL=8192, even though the current architectural maximum is VQ=16. 29 */ 30 if (cpu_isar_feature(aa64_sve, env_archcpu(env)) 31 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { 32 uint32_t vq, old_vq; 33 34 /* PSTATE.SM is always unset on syscall entry. */ 35 old_vq = sve_vq(env); 36 37 /* 38 * Bound the value of arg2, so that we know that it fits into 39 * the 4-bit field in ZCR_EL1. Rely on the hflags rebuild to 40 * sort out the length supported by the cpu. 41 */ 42 vq = MAX(arg2 / 16, 1); 43 vq = MIN(vq, ARM_MAX_VQ); 44 env->vfp.zcr_el[1] = vq - 1; 45 arm_rebuild_hflags(env); 46 47 vq = sve_vq(env); 48 if (vq < old_vq) { 49 aarch64_sve_narrow_vq(env, vq); 50 } 51 return vq * 16; 52 } 53 return -TARGET_EINVAL; 54 } 55 #define do_prctl_sve_set_vl do_prctl_sve_set_vl 56 57 static abi_long do_prctl_sme_get_vl(CPUArchState *env) 58 { 59 ARMCPU *cpu = env_archcpu(env); 60 if (cpu_isar_feature(aa64_sme, cpu)) { 61 return sme_vq(env) * 16; 62 } 63 return -TARGET_EINVAL; 64 } 65 #define do_prctl_sme_get_vl do_prctl_sme_get_vl 66 67 static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2) 68 { 69 /* 70 * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT. 71 * Note the kernel definition of sve_vl_valid allows for VQ=512, 72 * i.e. VL=8192, even though the architectural maximum is VQ=16. 73 */ 74 if (cpu_isar_feature(aa64_sme, env_archcpu(env)) 75 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { 76 int vq, old_vq; 77 78 old_vq = sme_vq(env); 79 80 /* 81 * Bound the value of vq, so that we know that it fits into 82 * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared 83 * on syscall entry, we are not modifying the current SVE 84 * vector length. 85 */ 86 vq = MAX(arg2 / 16, 1); 87 vq = MIN(vq, 16); 88 env->vfp.smcr_el[1] = 89 FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1); 90 91 /* Delay rebuilding hflags until we know if ZA must change. */ 92 vq = sve_vqm1_for_el_sm(env, 0, true) + 1; 93 94 if (vq != old_vq) { 95 /* 96 * PSTATE.ZA state is cleared on any change to SVL. 97 * We need not call arm_rebuild_hflags because PSTATE.SM was 98 * cleared on syscall entry, so this hasn't changed VL. 99 */ 100 env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0); 101 arm_rebuild_hflags(env); 102 } 103 return vq * 16; 104 } 105 return -TARGET_EINVAL; 106 } 107 #define do_prctl_sme_set_vl do_prctl_sme_set_vl 108 109 static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2) 110 { 111 ARMCPU *cpu = env_archcpu(env); 112 113 if (cpu_isar_feature(aa64_pauth, cpu)) { 114 int all = (PR_PAC_APIAKEY | PR_PAC_APIBKEY | 115 PR_PAC_APDAKEY | PR_PAC_APDBKEY | PR_PAC_APGAKEY); 116 int ret = 0; 117 Error *err = NULL; 118 119 if (arg2 == 0) { 120 arg2 = all; 121 } else if (arg2 & ~all) { 122 return -TARGET_EINVAL; 123 } 124 if (arg2 & PR_PAC_APIAKEY) { 125 ret |= qemu_guest_getrandom(&env->keys.apia, 126 sizeof(ARMPACKey), &err); 127 } 128 if (arg2 & PR_PAC_APIBKEY) { 129 ret |= qemu_guest_getrandom(&env->keys.apib, 130 sizeof(ARMPACKey), &err); 131 } 132 if (arg2 & PR_PAC_APDAKEY) { 133 ret |= qemu_guest_getrandom(&env->keys.apda, 134 sizeof(ARMPACKey), &err); 135 } 136 if (arg2 & PR_PAC_APDBKEY) { 137 ret |= qemu_guest_getrandom(&env->keys.apdb, 138 sizeof(ARMPACKey), &err); 139 } 140 if (arg2 & PR_PAC_APGAKEY) { 141 ret |= qemu_guest_getrandom(&env->keys.apga, 142 sizeof(ARMPACKey), &err); 143 } 144 if (ret != 0) { 145 /* 146 * Some unknown failure in the crypto. The best 147 * we can do is log it and fail the syscall. 148 * The real syscall cannot fail this way. 149 */ 150 qemu_log_mask(LOG_UNIMP, "PR_PAC_RESET_KEYS: Crypto failure: %s", 151 error_get_pretty(err)); 152 error_free(err); 153 return -TARGET_EIO; 154 } 155 return 0; 156 } 157 return -TARGET_EINVAL; 158 } 159 #define do_prctl_reset_keys do_prctl_reset_keys 160 161 static abi_long do_prctl_set_tagged_addr_ctrl(CPUArchState *env, abi_long arg2) 162 { 163 abi_ulong valid_mask = PR_TAGGED_ADDR_ENABLE; 164 ARMCPU *cpu = env_archcpu(env); 165 166 if (cpu_isar_feature(aa64_mte, cpu)) { 167 valid_mask |= PR_MTE_TCF_MASK; 168 valid_mask |= PR_MTE_TAG_MASK; 169 } 170 171 if (arg2 & ~valid_mask) { 172 return -TARGET_EINVAL; 173 } 174 env->tagged_addr_enable = arg2 & PR_TAGGED_ADDR_ENABLE; 175 176 if (cpu_isar_feature(aa64_mte, cpu)) { 177 arm_set_mte_tcf0(env, arg2); 178 179 /* 180 * Write PR_MTE_TAG to GCR_EL1[Exclude]. 181 * Note that the syscall uses an include mask, 182 * and hardware uses an exclude mask -- invert. 183 */ 184 env->cp15.gcr_el1 = 185 deposit64(env->cp15.gcr_el1, 0, 16, ~arg2 >> PR_MTE_TAG_SHIFT); 186 arm_rebuild_hflags(env); 187 } 188 return 0; 189 } 190 #define do_prctl_set_tagged_addr_ctrl do_prctl_set_tagged_addr_ctrl 191 192 static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env) 193 { 194 ARMCPU *cpu = env_archcpu(env); 195 abi_long ret = 0; 196 197 if (env->tagged_addr_enable) { 198 ret |= PR_TAGGED_ADDR_ENABLE; 199 } 200 if (cpu_isar_feature(aa64_mte, cpu)) { 201 /* See do_prctl_set_tagged_addr_ctrl. */ 202 ret |= extract64(env->cp15.sctlr_el[1], 38, 2) << PR_MTE_TCF_SHIFT; 203 ret = deposit64(ret, PR_MTE_TAG_SHIFT, 16, ~env->cp15.gcr_el1); 204 } 205 return ret; 206 } 207 #define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl 208 209 #endif /* AARCH64_TARGET_PRCTL_H */ 210