1 /* 2 * ARM gdb server stub: AArch64 specific functions. 3 * 4 * Copyright (c) 2013 SUSE LINUX Products GmbH 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/log.h" 21 #include "cpu.h" 22 #include "internals.h" 23 #include "gdbstub/helpers.h" 24 #include "gdbstub/commands.h" 25 #include "tcg/mte_helper.h" 26 #if defined(CONFIG_USER_ONLY) && defined(CONFIG_LINUX) 27 #include <sys/prctl.h> 28 #include "mte_user_helper.h" 29 #endif 30 #ifdef CONFIG_TCG 31 #include "accel/tcg/cpu-mmu-index.h" 32 #include "exec/target_page.h" 33 #endif 34 35 int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) 36 { 37 ARMCPU *cpu = ARM_CPU(cs); 38 CPUARMState *env = &cpu->env; 39 40 if (n < 31) { 41 /* Core integer register. */ 42 return gdb_get_reg64(mem_buf, env->xregs[n]); 43 } 44 switch (n) { 45 case 31: 46 return gdb_get_reg64(mem_buf, env->xregs[31]); 47 case 32: 48 return gdb_get_reg64(mem_buf, env->pc); 49 case 33: 50 return gdb_get_reg32(mem_buf, pstate_read(env)); 51 } 52 /* Unknown register. */ 53 return 0; 54 } 55 56 int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) 57 { 58 ARMCPU *cpu = ARM_CPU(cs); 59 CPUARMState *env = &cpu->env; 60 uint64_t tmp; 61 62 tmp = ldq_p(mem_buf); 63 64 if (n < 31) { 65 /* Core integer register. */ 66 env->xregs[n] = tmp; 67 return 8; 68 } 69 switch (n) { 70 case 31: 71 env->xregs[31] = tmp; 72 return 8; 73 case 32: 74 env->pc = tmp; 75 return 8; 76 case 33: 77 /* CPSR */ 78 pstate_write(env, tmp); 79 return 4; 80 } 81 /* Unknown register. */ 82 return 0; 83 } 84 85 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg) 86 { 87 ARMCPU *cpu = ARM_CPU(cs); 88 CPUARMState *env = &cpu->env; 89 90 switch (reg) { 91 case 0 ... 31: 92 { 93 /* 128 bit FP register - quads are in LE order */ 94 uint64_t *q = aa64_vfp_qreg(env, reg); 95 return gdb_get_reg128(buf, q[1], q[0]); 96 } 97 case 32: 98 /* FPSR */ 99 return gdb_get_reg32(buf, vfp_get_fpsr(env)); 100 case 33: 101 /* FPCR */ 102 return gdb_get_reg32(buf, vfp_get_fpcr(env)); 103 default: 104 return 0; 105 } 106 } 107 108 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg) 109 { 110 ARMCPU *cpu = ARM_CPU(cs); 111 CPUARMState *env = &cpu->env; 112 113 switch (reg) { 114 case 0 ... 31: 115 /* 128 bit FP register */ 116 { 117 uint64_t *q = aa64_vfp_qreg(env, reg); 118 119 /* 120 * On the wire these are target-endian 128 bit values. 121 * In the CPU state these are host-order uint64_t values 122 * with the least-significant one first. This means they're 123 * the other way around for target_big_endian() (which is 124 * only true for us for aarch64_be-linux-user). 125 */ 126 if (target_big_endian()) { 127 q[1] = ldq_p(buf); 128 q[0] = ldq_p(buf + 8); 129 } else{ 130 q[0] = ldq_p(buf); 131 q[1] = ldq_p(buf + 8); 132 } 133 134 return 16; 135 } 136 case 32: 137 /* FPSR */ 138 vfp_set_fpsr(env, ldl_p(buf)); 139 return 4; 140 case 33: 141 /* FPCR */ 142 vfp_set_fpcr(env, ldl_p(buf)); 143 return 4; 144 default: 145 return 0; 146 } 147 } 148 149 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg) 150 { 151 ARMCPU *cpu = ARM_CPU(cs); 152 CPUARMState *env = &cpu->env; 153 154 switch (reg) { 155 /* The first 32 registers are the zregs */ 156 case 0 ... 31: 157 { 158 int vq, len = 0; 159 for (vq = 0; vq < cpu->sve_max_vq; vq++) { 160 len += gdb_get_reg128(buf, 161 env->vfp.zregs[reg].d[vq * 2 + 1], 162 env->vfp.zregs[reg].d[vq * 2]); 163 } 164 return len; 165 } 166 case 32: 167 return gdb_get_reg32(buf, vfp_get_fpsr(env)); 168 case 33: 169 return gdb_get_reg32(buf, vfp_get_fpcr(env)); 170 /* then 16 predicates and the ffr */ 171 case 34 ... 50: 172 { 173 int preg = reg - 34; 174 int vq, len = 0; 175 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { 176 len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]); 177 } 178 return len; 179 } 180 case 51: 181 { 182 /* 183 * We report in Vector Granules (VG) which is 64bit in a Z reg 184 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks. 185 */ 186 int vq = sve_vqm1_for_el(env, arm_current_el(env)) + 1; 187 return gdb_get_reg64(buf, vq * 2); 188 } 189 default: 190 /* gdbstub asked for something out our range */ 191 qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg); 192 break; 193 } 194 195 return 0; 196 } 197 198 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg) 199 { 200 ARMCPU *cpu = ARM_CPU(cs); 201 CPUARMState *env = &cpu->env; 202 203 /* The first 32 registers are the zregs */ 204 switch (reg) { 205 /* The first 32 registers are the zregs */ 206 case 0 ... 31: 207 { 208 int vq, len = 0; 209 for (vq = 0; vq < cpu->sve_max_vq; vq++) { 210 if (target_big_endian()) { 211 env->vfp.zregs[reg].d[vq * 2 + 1] = ldq_p(buf); 212 buf += 8; 213 env->vfp.zregs[reg].d[vq * 2] = ldq_p(buf); 214 } else{ 215 env->vfp.zregs[reg].d[vq * 2] = ldq_p(buf); 216 buf += 8; 217 env->vfp.zregs[reg].d[vq * 2 + 1] = ldq_p(buf); 218 } 219 buf += 8; 220 len += 16; 221 } 222 return len; 223 } 224 case 32: 225 vfp_set_fpsr(env, *(uint32_t *)buf); 226 return 4; 227 case 33: 228 vfp_set_fpcr(env, *(uint32_t *)buf); 229 return 4; 230 case 34 ... 50: 231 { 232 int preg = reg - 34; 233 int vq, len = 0; 234 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { 235 env->vfp.pregs[preg].p[vq / 4] = ldq_p(buf); 236 buf += 8; 237 len += 8; 238 } 239 return len; 240 } 241 case 51: 242 /* cannot set vg via gdbstub */ 243 return 0; 244 default: 245 /* gdbstub asked for something out our range */ 246 break; 247 } 248 249 return 0; 250 } 251 252 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg) 253 { 254 ARMCPU *cpu = ARM_CPU(cs); 255 CPUARMState *env = &cpu->env; 256 257 switch (reg) { 258 case 0: /* pauth_dmask */ 259 case 1: /* pauth_cmask */ 260 case 2: /* pauth_dmask_high */ 261 case 3: /* pauth_cmask_high */ 262 /* 263 * Note that older versions of this feature only contained 264 * pauth_{d,c}mask, for use with Linux user processes, and 265 * thus exclusively in the low half of the address space. 266 * 267 * To support system mode, and to debug kernels, two new regs 268 * were added to cover the high half of the address space. 269 * For the purpose of pauth_ptr_mask, we can use any well-formed 270 * address within the address space half -- here, 0 and -1. 271 */ 272 { 273 bool is_data = !(reg & 1); 274 bool is_high = reg & 2; 275 ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); 276 ARMVAParameters param; 277 278 param = aa64_va_parameters(env, -is_high, mmu_idx, is_data, false); 279 return gdb_get_reg64(buf, pauth_ptr_mask(param)); 280 } 281 default: 282 return 0; 283 } 284 } 285 286 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg) 287 { 288 /* All pseudo registers are read-only. */ 289 return 0; 290 } 291 292 static void output_vector_union_type(GDBFeatureBuilder *builder, int reg_width, 293 const char *name) 294 { 295 struct TypeSize { 296 const char *gdb_type; 297 short size; 298 char sz, suffix; 299 }; 300 301 static const struct TypeSize vec_lanes[] = { 302 /* quads */ 303 { "uint128", 128, 'q', 'u' }, 304 { "int128", 128, 'q', 's' }, 305 /* 64 bit */ 306 { "ieee_double", 64, 'd', 'f' }, 307 { "uint64", 64, 'd', 'u' }, 308 { "int64", 64, 'd', 's' }, 309 /* 32 bit */ 310 { "ieee_single", 32, 's', 'f' }, 311 { "uint32", 32, 's', 'u' }, 312 { "int32", 32, 's', 's' }, 313 /* 16 bit */ 314 { "ieee_half", 16, 'h', 'f' }, 315 { "uint16", 16, 'h', 'u' }, 316 { "int16", 16, 'h', 's' }, 317 /* bytes */ 318 { "uint8", 8, 'b', 'u' }, 319 { "int8", 8, 'b', 's' }, 320 }; 321 322 static const char suf[] = { 'b', 'h', 's', 'd', 'q' }; 323 int i, j; 324 325 /* First define types and totals in a whole VL */ 326 for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) { 327 gdb_feature_builder_append_tag( 328 builder, "<vector id=\"%s%c%c\" type=\"%s\" count=\"%d\"/>", 329 name, vec_lanes[i].sz, vec_lanes[i].suffix, 330 vec_lanes[i].gdb_type, reg_width / vec_lanes[i].size); 331 } 332 333 /* 334 * Now define a union for each size group containing unsigned and 335 * signed and potentially float versions of each size from 128 to 336 * 8 bits. 337 */ 338 for (i = 0; i < ARRAY_SIZE(suf); i++) { 339 int bits = 8 << i; 340 341 gdb_feature_builder_append_tag(builder, "<union id=\"%sn%c\">", 342 name, suf[i]); 343 for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) { 344 if (vec_lanes[j].size == bits) { 345 gdb_feature_builder_append_tag( 346 builder, "<field name=\"%c\" type=\"%s%c%c\"/>", 347 vec_lanes[j].suffix, name, 348 vec_lanes[j].sz, vec_lanes[j].suffix); 349 } 350 } 351 gdb_feature_builder_append_tag(builder, "</union>"); 352 } 353 354 /* And now the final union of unions */ 355 gdb_feature_builder_append_tag(builder, "<union id=\"%s\">", name); 356 for (i = ARRAY_SIZE(suf) - 1; i >= 0; i--) { 357 gdb_feature_builder_append_tag(builder, 358 "<field name=\"%c\" type=\"%sn%c\"/>", 359 suf[i], name, suf[i]); 360 } 361 gdb_feature_builder_append_tag(builder, "</union>"); 362 } 363 364 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cs, int base_reg) 365 { 366 ARMCPU *cpu = ARM_CPU(cs); 367 int reg_width = cpu->sve_max_vq * 128; 368 int pred_width = cpu->sve_max_vq * 16; 369 GDBFeatureBuilder builder; 370 char *name; 371 int reg = 0; 372 int i; 373 374 gdb_feature_builder_init(&builder, &cpu->dyn_svereg_feature.desc, 375 "org.gnu.gdb.aarch64.sve", "sve-registers.xml", 376 base_reg); 377 378 /* Create the vector union type. */ 379 output_vector_union_type(&builder, reg_width, "svev"); 380 381 /* Create the predicate vector type. */ 382 gdb_feature_builder_append_tag( 383 &builder, "<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>", 384 pred_width / 8); 385 386 /* Define the vector registers. */ 387 for (i = 0; i < 32; i++) { 388 name = g_strdup_printf("z%d", i); 389 gdb_feature_builder_append_reg(&builder, name, reg_width, reg++, 390 "svev", NULL); 391 } 392 393 /* fpscr & status registers */ 394 gdb_feature_builder_append_reg(&builder, "fpsr", 32, reg++, 395 "int", "float"); 396 gdb_feature_builder_append_reg(&builder, "fpcr", 32, reg++, 397 "int", "float"); 398 399 /* Define the predicate registers. */ 400 for (i = 0; i < 16; i++) { 401 name = g_strdup_printf("p%d", i); 402 gdb_feature_builder_append_reg(&builder, name, pred_width, reg++, 403 "svep", NULL); 404 } 405 gdb_feature_builder_append_reg(&builder, "ffr", pred_width, reg++, 406 "svep", "vector"); 407 408 /* Define the vector length pseudo-register. */ 409 gdb_feature_builder_append_reg(&builder, "vg", 64, reg++, "int", NULL); 410 411 gdb_feature_builder_end(&builder); 412 413 return &cpu->dyn_svereg_feature.desc; 414 } 415 416 #ifdef CONFIG_USER_ONLY 417 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg) 418 { 419 ARMCPU *cpu = ARM_CPU(cs); 420 CPUARMState *env = &cpu->env; 421 uint64_t tcf0; 422 423 assert(reg == 0); 424 425 tcf0 = extract64(env->cp15.sctlr_el[1], 38, 2); 426 427 return gdb_get_reg64(buf, tcf0); 428 } 429 430 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg) 431 { 432 #if defined(CONFIG_LINUX) 433 ARMCPU *cpu = ARM_CPU(cs); 434 CPUARMState *env = &cpu->env; 435 436 uint8_t tcf; 437 438 assert(reg == 0); 439 440 tcf = *buf << PR_MTE_TCF_SHIFT; 441 442 if (!tcf) { 443 return 0; 444 } 445 446 /* 447 * 'tag_ctl' register is actually a "pseudo-register" provided by GDB to 448 * expose options regarding the type of MTE fault that can be controlled at 449 * runtime. 450 */ 451 arm_set_mte_tcf0(env, tcf); 452 453 return 1; 454 #else 455 return 0; 456 #endif 457 } 458 #endif /* CONFIG_USER_ONLY */ 459 460 #ifdef CONFIG_TCG 461 static void handle_q_memtag(GArray *params, void *user_ctx) 462 { 463 ARMCPU *cpu = ARM_CPU(user_ctx); 464 CPUARMState *env = &cpu->env; 465 uint32_t mmu_index; 466 467 uint64_t addr = gdb_get_cmd_param(params, 0)->val_ull; 468 uint64_t len = gdb_get_cmd_param(params, 1)->val_ul; 469 int type = gdb_get_cmd_param(params, 2)->val_ul; 470 471 uint8_t *tags; 472 uint8_t addr_tag; 473 474 g_autoptr(GString) str_buf = g_string_new(NULL); 475 476 /* 477 * GDB does not query multiple tags for a memory range on remote targets, so 478 * that's not supported either by gdbstub. 479 */ 480 if (len != 1) { 481 gdb_put_packet("E02"); 482 } 483 484 /* GDB never queries a tag different from an allocation tag (type 1). */ 485 if (type != 1) { 486 gdb_put_packet("E03"); 487 } 488 489 /* Find out the current translation regime for probe. */ 490 mmu_index = cpu_mmu_index(env_cpu(env), false); 491 /* Note that tags are packed here (2 tags packed in one byte). */ 492 tags = allocation_tag_mem_probe(env, mmu_index, addr, MMU_DATA_LOAD, 1, 493 MMU_DATA_LOAD, true, 0); 494 if (!tags) { 495 /* Address is not in a tagged region. */ 496 gdb_put_packet("E04"); 497 return; 498 } 499 500 /* Unpack tag from byte. */ 501 addr_tag = load_tag1(addr, tags); 502 g_string_printf(str_buf, "m%.2x", addr_tag); 503 504 gdb_put_packet(str_buf->str); 505 } 506 507 static void handle_q_isaddresstagged(GArray *params, void *user_ctx) 508 { 509 ARMCPU *cpu = ARM_CPU(user_ctx); 510 CPUARMState *env = &cpu->env; 511 uint32_t mmu_index; 512 513 uint64_t addr = gdb_get_cmd_param(params, 0)->val_ull; 514 515 uint8_t *tags; 516 const char *reply; 517 518 /* Find out the current translation regime for probe. */ 519 mmu_index = cpu_mmu_index(env_cpu(env), false); 520 tags = allocation_tag_mem_probe(env, mmu_index, addr, MMU_DATA_LOAD, 1, 521 MMU_DATA_LOAD, true, 0); 522 reply = tags ? "01" : "00"; 523 524 gdb_put_packet(reply); 525 } 526 527 static void handle_Q_memtag(GArray *params, void *user_ctx) 528 { 529 ARMCPU *cpu = ARM_CPU(user_ctx); 530 CPUARMState *env = &cpu->env; 531 uint32_t mmu_index; 532 533 uint64_t start_addr = gdb_get_cmd_param(params, 0)->val_ull; 534 uint64_t len = gdb_get_cmd_param(params, 1)->val_ul; 535 int type = gdb_get_cmd_param(params, 2)->val_ul; 536 char const *new_tags_str = gdb_get_cmd_param(params, 3)->data; 537 538 uint64_t end_addr; 539 540 int num_new_tags; 541 uint8_t *tags; 542 543 g_autoptr(GByteArray) new_tags = g_byte_array_new(); 544 545 /* 546 * Only the allocation tag (i.e. type 1) can be set at the stub side. 547 */ 548 if (type != 1) { 549 gdb_put_packet("E02"); 550 return; 551 } 552 553 end_addr = start_addr + (len - 1); /* 'len' is always >= 1 */ 554 /* Check if request's memory range does not cross page boundaries. */ 555 if ((start_addr ^ end_addr) & TARGET_PAGE_MASK) { 556 gdb_put_packet("E03"); 557 return; 558 } 559 560 /* 561 * Get all tags in the page starting from the tag of the start address. 562 * Note that there are two tags packed into a single byte here. 563 */ 564 /* Find out the current translation regime for probe. */ 565 mmu_index = cpu_mmu_index(env_cpu(env), false); 566 tags = allocation_tag_mem_probe(env, mmu_index, start_addr, MMU_DATA_STORE, 567 1, MMU_DATA_STORE, true, 0); 568 if (!tags) { 569 /* Address is not in a tagged region. */ 570 gdb_put_packet("E04"); 571 return; 572 } 573 574 /* Convert tags provided by GDB, 2 hex digits per tag. */ 575 num_new_tags = strlen(new_tags_str) / 2; 576 gdb_hextomem(new_tags, new_tags_str, num_new_tags); 577 578 uint64_t address = start_addr; 579 int new_tag_index = 0; 580 while (address <= end_addr) { 581 uint8_t new_tag; 582 int packed_index; 583 584 /* 585 * Find packed tag index from unpacked tag index. There are two tags 586 * in one packed index (one tag per nibble). 587 */ 588 packed_index = new_tag_index / 2; 589 590 new_tag = new_tags->data[new_tag_index % num_new_tags]; 591 store_tag1(address, tags + packed_index, new_tag); 592 593 address += TAG_GRANULE; 594 new_tag_index++; 595 } 596 597 gdb_put_packet("OK"); 598 } 599 600 enum Command { 601 qMemTags, 602 qIsAddressTagged, 603 QMemTags, 604 NUM_CMDS 605 }; 606 607 static const GdbCmdParseEntry cmd_handler_table[NUM_CMDS] = { 608 [qMemTags] = { 609 .handler = handle_q_memtag, 610 .cmd_startswith = true, 611 .cmd = "MemTags:", 612 .schema = "L,l:l0", 613 .need_cpu_context = true 614 }, 615 [qIsAddressTagged] = { 616 .handler = handle_q_isaddresstagged, 617 .cmd_startswith = true, 618 .cmd = "IsAddressTagged:", 619 .schema = "L0", 620 .need_cpu_context = true 621 }, 622 [QMemTags] = { 623 .handler = handle_Q_memtag, 624 .cmd_startswith = true, 625 .cmd = "MemTags:", 626 .schema = "L,l:l:s0", 627 .need_cpu_context = true 628 }, 629 }; 630 #endif /* CONFIG_TCG */ 631 632 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *qsupported, 633 GPtrArray *qtable, GPtrArray *stable) 634 { 635 /* MTE */ 636 #ifdef CONFIG_TCG 637 if (cpu_isar_feature(aa64_mte, cpu)) { 638 g_string_append(qsupported, ";memory-tagging+"); 639 640 g_ptr_array_add(qtable, (gpointer) &cmd_handler_table[qMemTags]); 641 g_ptr_array_add(qtable, (gpointer) &cmd_handler_table[qIsAddressTagged]); 642 g_ptr_array_add(stable, (gpointer) &cmd_handler_table[QMemTags]); 643 } 644 #endif 645 } 646