1 /* 2 * ARM SVE Load/Store Helpers 3 * 4 * Copyright (c) 2018-2022 Linaro 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef TARGET_ARM_SVE_LDST_INTERNAL_H 21 #define TARGET_ARM_SVE_LDST_INTERNAL_H 22 23 #include "exec/cpu_ldst.h" 24 25 /* 26 * Load one element into @vd + @reg_off from @host. 27 * The controlling predicate is known to be true. 28 */ 29 typedef void sve_ldst1_host_fn(void *vd, intptr_t reg_off, void *host); 30 31 /* 32 * Load one element into @vd + @reg_off from (@env, @vaddr, @ra). 33 * The controlling predicate is known to be true. 34 */ 35 typedef void sve_ldst1_tlb_fn(CPUARMState *env, void *vd, intptr_t reg_off, 36 target_ulong vaddr, uintptr_t retaddr); 37 38 /* 39 * Generate the above primitives. 40 */ 41 42 #define DO_LD_HOST(NAME, H, TYPEE, TYPEM, HOST) \ 43 static inline void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \ 44 { TYPEM val = HOST(host); *(TYPEE *)(vd + H(reg_off)) = val; } 45 46 #define DO_ST_HOST(NAME, H, TYPEE, TYPEM, HOST) \ 47 static inline void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \ 48 { TYPEM val = *(TYPEE *)(vd + H(reg_off)); HOST(host, val); } 49 50 #define DO_LD_TLB(NAME, H, TYPEE, TYPEM, TLB) \ 51 static inline void sve_##NAME##_tlb(CPUARMState *env, void *vd, \ 52 intptr_t reg_off, target_ulong addr, uintptr_t ra) \ 53 { \ 54 TYPEM val = TLB(env, useronly_clean_ptr(addr), ra); \ 55 *(TYPEE *)(vd + H(reg_off)) = val; \ 56 } 57 58 #define DO_ST_TLB(NAME, H, TYPEE, TYPEM, TLB) \ 59 static inline void sve_##NAME##_tlb(CPUARMState *env, void *vd, \ 60 intptr_t reg_off, target_ulong addr, uintptr_t ra) \ 61 { \ 62 TYPEM val = *(TYPEE *)(vd + H(reg_off)); \ 63 TLB(env, useronly_clean_ptr(addr), val, ra); \ 64 } 65 66 #define DO_LD_PRIM_1(NAME, H, TE, TM) \ 67 DO_LD_HOST(NAME, H, TE, TM, ldub_p) \ 68 DO_LD_TLB(NAME, H, TE, TM, cpu_ldub_data_ra) 69 70 DO_LD_PRIM_1(ld1bb, H1, uint8_t, uint8_t) 71 DO_LD_PRIM_1(ld1bhu, H1_2, uint16_t, uint8_t) 72 DO_LD_PRIM_1(ld1bhs, H1_2, uint16_t, int8_t) 73 DO_LD_PRIM_1(ld1bsu, H1_4, uint32_t, uint8_t) 74 DO_LD_PRIM_1(ld1bss, H1_4, uint32_t, int8_t) 75 DO_LD_PRIM_1(ld1bdu, H1_8, uint64_t, uint8_t) 76 DO_LD_PRIM_1(ld1bds, H1_8, uint64_t, int8_t) 77 78 #define DO_ST_PRIM_1(NAME, H, TE, TM) \ 79 DO_ST_HOST(st1##NAME, H, TE, TM, stb_p) \ 80 DO_ST_TLB(st1##NAME, H, TE, TM, cpu_stb_data_ra) 81 82 DO_ST_PRIM_1(bb, H1, uint8_t, uint8_t) 83 DO_ST_PRIM_1(bh, H1_2, uint16_t, uint8_t) 84 DO_ST_PRIM_1(bs, H1_4, uint32_t, uint8_t) 85 DO_ST_PRIM_1(bd, H1_8, uint64_t, uint8_t) 86 87 #define DO_LD_PRIM_2(NAME, H, TE, TM, LD) \ 88 DO_LD_HOST(ld1##NAME##_be, H, TE, TM, LD##_be_p) \ 89 DO_LD_HOST(ld1##NAME##_le, H, TE, TM, LD##_le_p) \ 90 DO_LD_TLB(ld1##NAME##_be, H, TE, TM, cpu_##LD##_be_data_ra) \ 91 DO_LD_TLB(ld1##NAME##_le, H, TE, TM, cpu_##LD##_le_data_ra) 92 93 #define DO_ST_PRIM_2(NAME, H, TE, TM, ST) \ 94 DO_ST_HOST(st1##NAME##_be, H, TE, TM, ST##_be_p) \ 95 DO_ST_HOST(st1##NAME##_le, H, TE, TM, ST##_le_p) \ 96 DO_ST_TLB(st1##NAME##_be, H, TE, TM, cpu_##ST##_be_data_ra) \ 97 DO_ST_TLB(st1##NAME##_le, H, TE, TM, cpu_##ST##_le_data_ra) 98 99 DO_LD_PRIM_2(hh, H1_2, uint16_t, uint16_t, lduw) 100 DO_LD_PRIM_2(hsu, H1_4, uint32_t, uint16_t, lduw) 101 DO_LD_PRIM_2(hss, H1_4, uint32_t, int16_t, lduw) 102 DO_LD_PRIM_2(hdu, H1_8, uint64_t, uint16_t, lduw) 103 DO_LD_PRIM_2(hds, H1_8, uint64_t, int16_t, lduw) 104 105 DO_ST_PRIM_2(hh, H1_2, uint16_t, uint16_t, stw) 106 DO_ST_PRIM_2(hs, H1_4, uint32_t, uint16_t, stw) 107 DO_ST_PRIM_2(hd, H1_8, uint64_t, uint16_t, stw) 108 109 DO_LD_PRIM_2(ss, H1_4, uint32_t, uint32_t, ldl) 110 DO_LD_PRIM_2(sdu, H1_8, uint64_t, uint32_t, ldl) 111 DO_LD_PRIM_2(sds, H1_8, uint64_t, int32_t, ldl) 112 113 DO_ST_PRIM_2(ss, H1_4, uint32_t, uint32_t, stl) 114 DO_ST_PRIM_2(sd, H1_8, uint64_t, uint32_t, stl) 115 116 DO_LD_PRIM_2(dd, H1_8, uint64_t, uint64_t, ldq) 117 DO_ST_PRIM_2(dd, H1_8, uint64_t, uint64_t, stq) 118 119 #undef DO_LD_TLB 120 #undef DO_ST_TLB 121 #undef DO_LD_HOST 122 #undef DO_LD_PRIM_1 123 #undef DO_ST_PRIM_1 124 #undef DO_LD_PRIM_2 125 #undef DO_ST_PRIM_2 126 127 /* 128 * Resolve the guest virtual address to info->host and info->flags. 129 * If @nofault, return false if the page is invalid, otherwise 130 * exit via page fault exception. 131 */ 132 133 typedef struct { 134 void *host; 135 int flags; 136 MemTxAttrs attrs; 137 bool tagged; 138 } SVEHostPage; 139 140 bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env, 141 target_ulong addr, int mem_off, MMUAccessType access_type, 142 int mmu_idx, uintptr_t retaddr); 143 144 /* 145 * Analyse contiguous data, protected by a governing predicate. 146 */ 147 148 typedef enum { 149 FAULT_NO, 150 FAULT_FIRST, 151 FAULT_ALL, 152 } SVEContFault; 153 154 typedef struct { 155 /* 156 * First and last element wholly contained within the two pages. 157 * mem_off_first[0] and reg_off_first[0] are always set >= 0. 158 * reg_off_last[0] may be < 0 if the first element crosses pages. 159 * All of mem_off_first[1], reg_off_first[1] and reg_off_last[1] 160 * are set >= 0 only if there are complete elements on a second page. 161 * 162 * The reg_off_* offsets are relative to the internal vector register. 163 * The mem_off_first offset is relative to the memory address; the 164 * two offsets are different when a load operation extends, a store 165 * operation truncates, or for multi-register operations. 166 */ 167 int16_t mem_off_first[2]; 168 int16_t reg_off_first[2]; 169 int16_t reg_off_last[2]; 170 171 /* 172 * One element that is misaligned and spans both pages, 173 * or -1 if there is no such active element. 174 */ 175 int16_t mem_off_split; 176 int16_t reg_off_split; 177 178 /* 179 * The byte offset at which the entire operation crosses a page boundary. 180 * Set >= 0 if and only if the entire operation spans two pages. 181 */ 182 int16_t page_split; 183 184 /* TLB data for the two pages. */ 185 SVEHostPage page[2]; 186 } SVEContLdSt; 187 188 /* 189 * Find first active element on each page, and a loose bound for the 190 * final element on each page. Identify any single element that spans 191 * the page boundary. Return true if there are any active elements. 192 */ 193 bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr, uint64_t *vg, 194 intptr_t reg_max, int esz, int msize); 195 196 /* 197 * Resolve the guest virtual addresses to info->page[]. 198 * Control the generation of page faults with @fault. Return false if 199 * there is no work to do, which can only happen with @fault == FAULT_NO. 200 */ 201 bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault, 202 CPUARMState *env, target_ulong addr, 203 MMUAccessType access_type, uintptr_t retaddr); 204 205 #ifdef CONFIG_USER_ONLY 206 static inline void 207 sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, uint64_t *vg, 208 target_ulong addr, int esize, int msize, 209 int wp_access, uintptr_t retaddr) 210 { } 211 #else 212 void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, 213 uint64_t *vg, target_ulong addr, 214 int esize, int msize, int wp_access, 215 uintptr_t retaddr); 216 #endif 217 218 void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env, uint64_t *vg, 219 target_ulong addr, int esize, int msize, 220 uint32_t mtedesc, uintptr_t ra); 221 222 #endif /* TARGET_ARM_SVE_LDST_INTERNAL_H */ 223