1 /* Support for writing ELF notes for ARM architectures 2 * 3 * Copyright (C) 2015 Red Hat Inc. 4 * 5 * Author: Andrew Jones <drjones@redhat.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License along 18 * with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "elf.h" 24 #include "sysemu/dump.h" 25 26 /* struct user_pt_regs from arch/arm64/include/uapi/asm/ptrace.h */ 27 struct aarch64_user_regs { 28 uint64_t regs[31]; 29 uint64_t sp; 30 uint64_t pc; 31 uint64_t pstate; 32 } QEMU_PACKED; 33 34 QEMU_BUILD_BUG_ON(sizeof(struct aarch64_user_regs) != 272); 35 36 /* struct elf_prstatus from include/uapi/linux/elfcore.h */ 37 struct aarch64_elf_prstatus { 38 char pad1[32]; /* 32 == offsetof(struct elf_prstatus, pr_pid) */ 39 uint32_t pr_pid; 40 char pad2[76]; /* 76 == offsetof(struct elf_prstatus, pr_reg) - 41 offsetof(struct elf_prstatus, pr_ppid) */ 42 struct aarch64_user_regs pr_reg; 43 uint32_t pr_fpvalid; 44 char pad3[4]; 45 } QEMU_PACKED; 46 47 QEMU_BUILD_BUG_ON(sizeof(struct aarch64_elf_prstatus) != 392); 48 49 /* struct user_fpsimd_state from arch/arm64/include/uapi/asm/ptrace.h 50 * 51 * While the vregs member of user_fpsimd_state is of type __uint128_t, 52 * QEMU uses an array of uint64_t, where the high half of the 128-bit 53 * value is always in the 2n+1'th index. Thus we also break the 128- 54 * bit values into two halves in this reproduction of user_fpsimd_state. 55 */ 56 struct aarch64_user_vfp_state { 57 uint64_t vregs[64]; 58 uint32_t fpsr; 59 uint32_t fpcr; 60 char pad[8]; 61 } QEMU_PACKED; 62 63 QEMU_BUILD_BUG_ON(sizeof(struct aarch64_user_vfp_state) != 528); 64 65 /* struct user_sve_header from arch/arm64/include/uapi/asm/ptrace.h */ 66 struct aarch64_user_sve_header { 67 uint32_t size; 68 uint32_t max_size; 69 uint16_t vl; 70 uint16_t max_vl; 71 uint16_t flags; 72 uint16_t reserved; 73 } QEMU_PACKED; 74 75 struct aarch64_note { 76 Elf64_Nhdr hdr; 77 char name[8]; /* align_up(sizeof("CORE"), 4) */ 78 union { 79 struct aarch64_elf_prstatus prstatus; 80 struct aarch64_user_vfp_state vfp; 81 struct aarch64_user_sve_header sve; 82 }; 83 } QEMU_PACKED; 84 85 #define AARCH64_NOTE_HEADER_SIZE offsetof(struct aarch64_note, prstatus) 86 #define AARCH64_PRSTATUS_NOTE_SIZE \ 87 (AARCH64_NOTE_HEADER_SIZE + sizeof(struct aarch64_elf_prstatus)) 88 #define AARCH64_PRFPREG_NOTE_SIZE \ 89 (AARCH64_NOTE_HEADER_SIZE + sizeof(struct aarch64_user_vfp_state)) 90 #define AARCH64_SVE_NOTE_SIZE(env) \ 91 (AARCH64_NOTE_HEADER_SIZE + sve_size(env)) 92 93 static void aarch64_note_init(struct aarch64_note *note, DumpState *s, 94 const char *name, Elf64_Word namesz, 95 Elf64_Word type, Elf64_Word descsz) 96 { 97 memset(note, 0, sizeof(*note)); 98 99 note->hdr.n_namesz = cpu_to_dump32(s, namesz); 100 note->hdr.n_descsz = cpu_to_dump32(s, descsz); 101 note->hdr.n_type = cpu_to_dump32(s, type); 102 103 memcpy(note->name, name, namesz); 104 } 105 106 static int aarch64_write_elf64_prfpreg(WriteCoreDumpFunction f, 107 CPUARMState *env, int cpuid, 108 DumpState *s) 109 { 110 struct aarch64_note note; 111 int ret, i; 112 113 aarch64_note_init(¬e, s, "CORE", 5, NT_PRFPREG, sizeof(note.vfp)); 114 115 for (i = 0; i < 32; ++i) { 116 uint64_t *q = aa64_vfp_qreg(env, i); 117 note.vfp.vregs[2 * i + 0] = cpu_to_dump64(s, q[0]); 118 note.vfp.vregs[2 * i + 1] = cpu_to_dump64(s, q[1]); 119 } 120 121 if (s->dump_info.d_endian == ELFDATA2MSB) { 122 /* For AArch64 we must always swap the vfp.regs's 2n and 2n+1 123 * entries when generating BE notes, because even big endian 124 * hosts use 2n+1 for the high half. 125 */ 126 for (i = 0; i < 32; ++i) { 127 uint64_t tmp = note.vfp.vregs[2*i]; 128 note.vfp.vregs[2 * i] = note.vfp.vregs[2 * i + 1]; 129 note.vfp.vregs[2 * i + 1] = tmp; 130 } 131 } 132 133 note.vfp.fpsr = cpu_to_dump32(s, vfp_get_fpsr(env)); 134 note.vfp.fpcr = cpu_to_dump32(s, vfp_get_fpcr(env)); 135 136 ret = f(¬e, AARCH64_PRFPREG_NOTE_SIZE, s); 137 if (ret < 0) { 138 return -1; 139 } 140 141 return 0; 142 } 143 144 #ifdef TARGET_AARCH64 145 static off_t sve_zreg_offset(uint32_t vq, int n) 146 { 147 off_t off = sizeof(struct aarch64_user_sve_header); 148 return ROUND_UP(off, 16) + vq * 16 * n; 149 } 150 151 static off_t sve_preg_offset(uint32_t vq, int n) 152 { 153 return sve_zreg_offset(vq, 32) + vq * 16 / 8 * n; 154 } 155 156 static off_t sve_fpsr_offset(uint32_t vq) 157 { 158 off_t off = sve_preg_offset(vq, 17); 159 return ROUND_UP(off, 16); 160 } 161 162 static off_t sve_fpcr_offset(uint32_t vq) 163 { 164 return sve_fpsr_offset(vq) + sizeof(uint32_t); 165 } 166 167 static uint32_t sve_current_vq(CPUARMState *env) 168 { 169 return sve_vqm1_for_el(env, arm_current_el(env)) + 1; 170 } 171 172 static size_t sve_size_vq(uint32_t vq) 173 { 174 off_t off = sve_fpcr_offset(vq) + sizeof(uint32_t); 175 return ROUND_UP(off, 16); 176 } 177 178 static size_t sve_size(CPUARMState *env) 179 { 180 return sve_size_vq(sve_current_vq(env)); 181 } 182 183 static int aarch64_write_elf64_sve(WriteCoreDumpFunction f, 184 CPUARMState *env, int cpuid, 185 DumpState *s) 186 { 187 struct aarch64_note *note; 188 ARMCPU *cpu = env_archcpu(env); 189 uint32_t vq = sve_current_vq(env); 190 uint64_t tmp[ARM_MAX_VQ * 2], *r; 191 uint32_t fpr; 192 uint8_t *buf; 193 int ret, i; 194 195 note = g_malloc0(AARCH64_SVE_NOTE_SIZE(env)); 196 buf = (uint8_t *)¬e->sve; 197 198 aarch64_note_init(note, s, "LINUX", 6, NT_ARM_SVE, sve_size_vq(vq)); 199 200 note->sve.size = cpu_to_dump32(s, sve_size_vq(vq)); 201 note->sve.max_size = cpu_to_dump32(s, sve_size_vq(cpu->sve_max_vq)); 202 note->sve.vl = cpu_to_dump16(s, vq * 16); 203 note->sve.max_vl = cpu_to_dump16(s, cpu->sve_max_vq * 16); 204 note->sve.flags = cpu_to_dump16(s, 1); 205 206 for (i = 0; i < 32; ++i) { 207 r = sve_bswap64(tmp, &env->vfp.zregs[i].d[0], vq * 2); 208 memcpy(&buf[sve_zreg_offset(vq, i)], r, vq * 16); 209 } 210 211 for (i = 0; i < 17; ++i) { 212 r = sve_bswap64(tmp, r = &env->vfp.pregs[i].p[0], 213 DIV_ROUND_UP(vq * 2, 8)); 214 memcpy(&buf[sve_preg_offset(vq, i)], r, vq * 16 / 8); 215 } 216 217 fpr = cpu_to_dump32(s, vfp_get_fpsr(env)); 218 memcpy(&buf[sve_fpsr_offset(vq)], &fpr, sizeof(uint32_t)); 219 220 fpr = cpu_to_dump32(s, vfp_get_fpcr(env)); 221 memcpy(&buf[sve_fpcr_offset(vq)], &fpr, sizeof(uint32_t)); 222 223 ret = f(note, AARCH64_SVE_NOTE_SIZE(env), s); 224 g_free(note); 225 226 if (ret < 0) { 227 return -1; 228 } 229 230 return 0; 231 } 232 #endif 233 234 int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, 235 int cpuid, void *opaque) 236 { 237 struct aarch64_note note; 238 ARMCPU *cpu = ARM_CPU(cs); 239 CPUARMState *env = &cpu->env; 240 DumpState *s = opaque; 241 uint64_t pstate, sp; 242 int ret, i; 243 244 aarch64_note_init(¬e, s, "CORE", 5, NT_PRSTATUS, sizeof(note.prstatus)); 245 246 note.prstatus.pr_pid = cpu_to_dump32(s, cpuid); 247 note.prstatus.pr_fpvalid = cpu_to_dump32(s, 1); 248 249 if (!is_a64(env)) { 250 aarch64_sync_32_to_64(env); 251 pstate = cpsr_read(env); 252 sp = 0; 253 } else { 254 pstate = pstate_read(env); 255 sp = env->xregs[31]; 256 } 257 258 for (i = 0; i < 31; ++i) { 259 note.prstatus.pr_reg.regs[i] = cpu_to_dump64(s, env->xregs[i]); 260 } 261 note.prstatus.pr_reg.sp = cpu_to_dump64(s, sp); 262 note.prstatus.pr_reg.pc = cpu_to_dump64(s, env->pc); 263 note.prstatus.pr_reg.pstate = cpu_to_dump64(s, pstate); 264 265 ret = f(¬e, AARCH64_PRSTATUS_NOTE_SIZE, s); 266 if (ret < 0) { 267 return -1; 268 } 269 270 ret = aarch64_write_elf64_prfpreg(f, env, cpuid, s); 271 if (ret) { 272 return ret; 273 } 274 275 #ifdef TARGET_AARCH64 276 if (cpu_isar_feature(aa64_sve, cpu)) { 277 ret = aarch64_write_elf64_sve(f, env, cpuid, s); 278 } 279 #endif 280 281 return ret; 282 } 283 284 /* struct pt_regs from arch/arm/include/asm/ptrace.h */ 285 struct arm_user_regs { 286 uint32_t regs[17]; 287 char pad[4]; 288 } QEMU_PACKED; 289 290 QEMU_BUILD_BUG_ON(sizeof(struct arm_user_regs) != 72); 291 292 /* struct elf_prstatus from include/uapi/linux/elfcore.h */ 293 struct arm_elf_prstatus { 294 char pad1[24]; /* 24 == offsetof(struct elf_prstatus, pr_pid) */ 295 uint32_t pr_pid; 296 char pad2[44]; /* 44 == offsetof(struct elf_prstatus, pr_reg) - 297 offsetof(struct elf_prstatus, pr_ppid) */ 298 struct arm_user_regs pr_reg; 299 uint32_t pr_fpvalid; 300 } QEMU_PACKED arm_elf_prstatus; 301 302 QEMU_BUILD_BUG_ON(sizeof(struct arm_elf_prstatus) != 148); 303 304 /* struct user_vfp from arch/arm/include/asm/user.h */ 305 struct arm_user_vfp_state { 306 uint64_t vregs[32]; 307 uint32_t fpscr; 308 } QEMU_PACKED; 309 310 QEMU_BUILD_BUG_ON(sizeof(struct arm_user_vfp_state) != 260); 311 312 struct arm_note { 313 Elf32_Nhdr hdr; 314 char name[8]; /* align_up(sizeof("LINUX"), 4) */ 315 union { 316 struct arm_elf_prstatus prstatus; 317 struct arm_user_vfp_state vfp; 318 }; 319 } QEMU_PACKED; 320 321 #define ARM_NOTE_HEADER_SIZE offsetof(struct arm_note, prstatus) 322 #define ARM_PRSTATUS_NOTE_SIZE \ 323 (ARM_NOTE_HEADER_SIZE + sizeof(struct arm_elf_prstatus)) 324 #define ARM_VFP_NOTE_SIZE \ 325 (ARM_NOTE_HEADER_SIZE + sizeof(struct arm_user_vfp_state)) 326 327 static void arm_note_init(struct arm_note *note, DumpState *s, 328 const char *name, Elf32_Word namesz, 329 Elf32_Word type, Elf32_Word descsz) 330 { 331 memset(note, 0, sizeof(*note)); 332 333 note->hdr.n_namesz = cpu_to_dump32(s, namesz); 334 note->hdr.n_descsz = cpu_to_dump32(s, descsz); 335 note->hdr.n_type = cpu_to_dump32(s, type); 336 337 memcpy(note->name, name, namesz); 338 } 339 340 static int arm_write_elf32_vfp(WriteCoreDumpFunction f, CPUARMState *env, 341 int cpuid, DumpState *s) 342 { 343 struct arm_note note; 344 int ret, i; 345 346 arm_note_init(¬e, s, "LINUX", 6, NT_ARM_VFP, sizeof(note.vfp)); 347 348 for (i = 0; i < 32; ++i) { 349 note.vfp.vregs[i] = cpu_to_dump64(s, *aa32_vfp_dreg(env, i)); 350 } 351 352 note.vfp.fpscr = cpu_to_dump32(s, vfp_get_fpscr(env)); 353 354 ret = f(¬e, ARM_VFP_NOTE_SIZE, s); 355 if (ret < 0) { 356 return -1; 357 } 358 359 return 0; 360 } 361 362 int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, 363 int cpuid, void *opaque) 364 { 365 struct arm_note note; 366 ARMCPU *cpu = ARM_CPU(cs); 367 CPUARMState *env = &cpu->env; 368 DumpState *s = opaque; 369 int ret, i; 370 bool fpvalid = cpu_isar_feature(aa32_vfp_simd, cpu); 371 372 arm_note_init(¬e, s, "CORE", 5, NT_PRSTATUS, sizeof(note.prstatus)); 373 374 note.prstatus.pr_pid = cpu_to_dump32(s, cpuid); 375 note.prstatus.pr_fpvalid = cpu_to_dump32(s, fpvalid); 376 377 for (i = 0; i < 16; ++i) { 378 note.prstatus.pr_reg.regs[i] = cpu_to_dump32(s, env->regs[i]); 379 } 380 note.prstatus.pr_reg.regs[16] = cpu_to_dump32(s, cpsr_read(env)); 381 382 ret = f(¬e, ARM_PRSTATUS_NOTE_SIZE, s); 383 if (ret < 0) { 384 return -1; 385 } else if (fpvalid) { 386 return arm_write_elf32_vfp(f, env, cpuid, s); 387 } 388 389 return 0; 390 } 391 392 int cpu_get_dump_info(ArchDumpInfo *info, 393 const GuestPhysBlockList *guest_phys_blocks) 394 { 395 ARMCPU *cpu; 396 CPUARMState *env; 397 GuestPhysBlock *block; 398 hwaddr lowest_addr = ULLONG_MAX; 399 400 if (first_cpu == NULL) { 401 return -1; 402 } 403 404 cpu = ARM_CPU(first_cpu); 405 env = &cpu->env; 406 407 /* Take a best guess at the phys_base. If we get it wrong then crash 408 * will need '--machdep phys_offset=<phys-offset>' added to its command 409 * line, which isn't any worse than assuming we can use zero, but being 410 * wrong. This is the same algorithm the crash utility uses when 411 * attempting to guess as it loads non-dumpfile formatted files. 412 */ 413 QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) { 414 if (block->target_start < lowest_addr) { 415 lowest_addr = block->target_start; 416 } 417 } 418 419 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 420 info->d_machine = EM_AARCH64; 421 info->d_class = ELFCLASS64; 422 info->page_size = (1 << 16); /* aarch64 max pagesize */ 423 if (lowest_addr != ULLONG_MAX) { 424 info->phys_base = lowest_addr; 425 } 426 } else { 427 info->d_machine = EM_ARM; 428 info->d_class = ELFCLASS32; 429 info->page_size = (1 << 12); 430 if (lowest_addr < UINT_MAX) { 431 info->phys_base = lowest_addr; 432 } 433 } 434 435 /* We assume the relevant endianness is that of EL1; this is right 436 * for kernels, but might give the wrong answer if you're trying to 437 * dump a hypervisor that happens to be running an opposite-endian 438 * kernel. 439 */ 440 info->d_endian = (env->cp15.sctlr_el[1] & SCTLR_EE) != 0 441 ? ELFDATA2MSB : ELFDATA2LSB; 442 443 return 0; 444 } 445 446 ssize_t cpu_get_note_size(int class, int machine, int nr_cpus) 447 { 448 ARMCPU *cpu = ARM_CPU(first_cpu); 449 size_t note_size; 450 451 if (class == ELFCLASS64) { 452 note_size = AARCH64_PRSTATUS_NOTE_SIZE; 453 note_size += AARCH64_PRFPREG_NOTE_SIZE; 454 #ifdef TARGET_AARCH64 455 if (cpu_isar_feature(aa64_sve, cpu)) { 456 note_size += AARCH64_SVE_NOTE_SIZE(&cpu->env); 457 } 458 #endif 459 } else { 460 note_size = ARM_PRSTATUS_NOTE_SIZE; 461 if (cpu_isar_feature(aa32_vfp_simd, cpu)) { 462 note_size += ARM_VFP_NOTE_SIZE; 463 } 464 } 465 466 return note_size * nr_cpus; 467 } 468