1 /* 2 * ARM gdb server stub: AArch64 specific functions. 3 * 4 * Copyright (c) 2013 SUSE LINUX Products GmbH 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/log.h" 21 #include "cpu.h" 22 #include "internals.h" 23 #include "gdbstub/helpers.h" 24 #include "gdbstub/commands.h" 25 #include "tcg/mte_helper.h" 26 #if defined(CONFIG_USER_ONLY) && defined(CONFIG_LINUX) 27 #include <sys/prctl.h> 28 #include "mte_user_helper.h" 29 #endif 30 31 int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) 32 { 33 ARMCPU *cpu = ARM_CPU(cs); 34 CPUARMState *env = &cpu->env; 35 36 if (n < 31) { 37 /* Core integer register. */ 38 return gdb_get_reg64(mem_buf, env->xregs[n]); 39 } 40 switch (n) { 41 case 31: 42 return gdb_get_reg64(mem_buf, env->xregs[31]); 43 case 32: 44 return gdb_get_reg64(mem_buf, env->pc); 45 case 33: 46 return gdb_get_reg32(mem_buf, pstate_read(env)); 47 } 48 /* Unknown register. */ 49 return 0; 50 } 51 52 int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) 53 { 54 ARMCPU *cpu = ARM_CPU(cs); 55 CPUARMState *env = &cpu->env; 56 uint64_t tmp; 57 58 tmp = ldq_p(mem_buf); 59 60 if (n < 31) { 61 /* Core integer register. */ 62 env->xregs[n] = tmp; 63 return 8; 64 } 65 switch (n) { 66 case 31: 67 env->xregs[31] = tmp; 68 return 8; 69 case 32: 70 env->pc = tmp; 71 return 8; 72 case 33: 73 /* CPSR */ 74 pstate_write(env, tmp); 75 return 4; 76 } 77 /* Unknown register. */ 78 return 0; 79 } 80 81 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg) 82 { 83 ARMCPU *cpu = ARM_CPU(cs); 84 CPUARMState *env = &cpu->env; 85 86 switch (reg) { 87 case 0 ... 31: 88 { 89 /* 128 bit FP register - quads are in LE order */ 90 uint64_t *q = aa64_vfp_qreg(env, reg); 91 return gdb_get_reg128(buf, q[1], q[0]); 92 } 93 case 32: 94 /* FPSR */ 95 return gdb_get_reg32(buf, vfp_get_fpsr(env)); 96 case 33: 97 /* FPCR */ 98 return gdb_get_reg32(buf, vfp_get_fpcr(env)); 99 default: 100 return 0; 101 } 102 } 103 104 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg) 105 { 106 ARMCPU *cpu = ARM_CPU(cs); 107 CPUARMState *env = &cpu->env; 108 109 switch (reg) { 110 case 0 ... 31: 111 /* 128 bit FP register */ 112 { 113 uint64_t *q = aa64_vfp_qreg(env, reg); 114 q[0] = ldq_le_p(buf); 115 q[1] = ldq_le_p(buf + 8); 116 return 16; 117 } 118 case 32: 119 /* FPSR */ 120 vfp_set_fpsr(env, ldl_p(buf)); 121 return 4; 122 case 33: 123 /* FPCR */ 124 vfp_set_fpcr(env, ldl_p(buf)); 125 return 4; 126 default: 127 return 0; 128 } 129 } 130 131 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg) 132 { 133 ARMCPU *cpu = ARM_CPU(cs); 134 CPUARMState *env = &cpu->env; 135 136 switch (reg) { 137 /* The first 32 registers are the zregs */ 138 case 0 ... 31: 139 { 140 int vq, len = 0; 141 for (vq = 0; vq < cpu->sve_max_vq; vq++) { 142 len += gdb_get_reg128(buf, 143 env->vfp.zregs[reg].d[vq * 2 + 1], 144 env->vfp.zregs[reg].d[vq * 2]); 145 } 146 return len; 147 } 148 case 32: 149 return gdb_get_reg32(buf, vfp_get_fpsr(env)); 150 case 33: 151 return gdb_get_reg32(buf, vfp_get_fpcr(env)); 152 /* then 16 predicates and the ffr */ 153 case 34 ... 50: 154 { 155 int preg = reg - 34; 156 int vq, len = 0; 157 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { 158 len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]); 159 } 160 return len; 161 } 162 case 51: 163 { 164 /* 165 * We report in Vector Granules (VG) which is 64bit in a Z reg 166 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks. 167 */ 168 int vq = sve_vqm1_for_el(env, arm_current_el(env)) + 1; 169 return gdb_get_reg64(buf, vq * 2); 170 } 171 default: 172 /* gdbstub asked for something out our range */ 173 qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg); 174 break; 175 } 176 177 return 0; 178 } 179 180 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg) 181 { 182 ARMCPU *cpu = ARM_CPU(cs); 183 CPUARMState *env = &cpu->env; 184 185 /* The first 32 registers are the zregs */ 186 switch (reg) { 187 /* The first 32 registers are the zregs */ 188 case 0 ... 31: 189 { 190 int vq, len = 0; 191 uint64_t *p = (uint64_t *) buf; 192 for (vq = 0; vq < cpu->sve_max_vq; vq++) { 193 env->vfp.zregs[reg].d[vq * 2 + 1] = *p++; 194 env->vfp.zregs[reg].d[vq * 2] = *p++; 195 len += 16; 196 } 197 return len; 198 } 199 case 32: 200 vfp_set_fpsr(env, *(uint32_t *)buf); 201 return 4; 202 case 33: 203 vfp_set_fpcr(env, *(uint32_t *)buf); 204 return 4; 205 case 34 ... 50: 206 { 207 int preg = reg - 34; 208 int vq, len = 0; 209 uint64_t *p = (uint64_t *) buf; 210 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { 211 env->vfp.pregs[preg].p[vq / 4] = *p++; 212 len += 8; 213 } 214 return len; 215 } 216 case 51: 217 /* cannot set vg via gdbstub */ 218 return 0; 219 default: 220 /* gdbstub asked for something out our range */ 221 break; 222 } 223 224 return 0; 225 } 226 227 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg) 228 { 229 ARMCPU *cpu = ARM_CPU(cs); 230 CPUARMState *env = &cpu->env; 231 232 switch (reg) { 233 case 0: /* pauth_dmask */ 234 case 1: /* pauth_cmask */ 235 case 2: /* pauth_dmask_high */ 236 case 3: /* pauth_cmask_high */ 237 /* 238 * Note that older versions of this feature only contained 239 * pauth_{d,c}mask, for use with Linux user processes, and 240 * thus exclusively in the low half of the address space. 241 * 242 * To support system mode, and to debug kernels, two new regs 243 * were added to cover the high half of the address space. 244 * For the purpose of pauth_ptr_mask, we can use any well-formed 245 * address within the address space half -- here, 0 and -1. 246 */ 247 { 248 bool is_data = !(reg & 1); 249 bool is_high = reg & 2; 250 ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); 251 ARMVAParameters param; 252 253 param = aa64_va_parameters(env, -is_high, mmu_idx, is_data, false); 254 return gdb_get_reg64(buf, pauth_ptr_mask(param)); 255 } 256 default: 257 return 0; 258 } 259 } 260 261 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg) 262 { 263 /* All pseudo registers are read-only. */ 264 return 0; 265 } 266 267 static void output_vector_union_type(GDBFeatureBuilder *builder, int reg_width, 268 const char *name) 269 { 270 struct TypeSize { 271 const char *gdb_type; 272 short size; 273 char sz, suffix; 274 }; 275 276 static const struct TypeSize vec_lanes[] = { 277 /* quads */ 278 { "uint128", 128, 'q', 'u' }, 279 { "int128", 128, 'q', 's' }, 280 /* 64 bit */ 281 { "ieee_double", 64, 'd', 'f' }, 282 { "uint64", 64, 'd', 'u' }, 283 { "int64", 64, 'd', 's' }, 284 /* 32 bit */ 285 { "ieee_single", 32, 's', 'f' }, 286 { "uint32", 32, 's', 'u' }, 287 { "int32", 32, 's', 's' }, 288 /* 16 bit */ 289 { "ieee_half", 16, 'h', 'f' }, 290 { "uint16", 16, 'h', 'u' }, 291 { "int16", 16, 'h', 's' }, 292 /* bytes */ 293 { "uint8", 8, 'b', 'u' }, 294 { "int8", 8, 'b', 's' }, 295 }; 296 297 static const char suf[] = { 'b', 'h', 's', 'd', 'q' }; 298 int i, j; 299 300 /* First define types and totals in a whole VL */ 301 for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) { 302 gdb_feature_builder_append_tag( 303 builder, "<vector id=\"%s%c%c\" type=\"%s\" count=\"%d\"/>", 304 name, vec_lanes[i].sz, vec_lanes[i].suffix, 305 vec_lanes[i].gdb_type, reg_width / vec_lanes[i].size); 306 } 307 308 /* 309 * Now define a union for each size group containing unsigned and 310 * signed and potentially float versions of each size from 128 to 311 * 8 bits. 312 */ 313 for (i = 0; i < ARRAY_SIZE(suf); i++) { 314 int bits = 8 << i; 315 316 gdb_feature_builder_append_tag(builder, "<union id=\"%sn%c\">", 317 name, suf[i]); 318 for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) { 319 if (vec_lanes[j].size == bits) { 320 gdb_feature_builder_append_tag( 321 builder, "<field name=\"%c\" type=\"%s%c%c\"/>", 322 vec_lanes[j].suffix, name, 323 vec_lanes[j].sz, vec_lanes[j].suffix); 324 } 325 } 326 gdb_feature_builder_append_tag(builder, "</union>"); 327 } 328 329 /* And now the final union of unions */ 330 gdb_feature_builder_append_tag(builder, "<union id=\"%s\">", name); 331 for (i = ARRAY_SIZE(suf) - 1; i >= 0; i--) { 332 gdb_feature_builder_append_tag(builder, 333 "<field name=\"%c\" type=\"%sn%c\"/>", 334 suf[i], name, suf[i]); 335 } 336 gdb_feature_builder_append_tag(builder, "</union>"); 337 } 338 339 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cs, int base_reg) 340 { 341 ARMCPU *cpu = ARM_CPU(cs); 342 int reg_width = cpu->sve_max_vq * 128; 343 int pred_width = cpu->sve_max_vq * 16; 344 GDBFeatureBuilder builder; 345 char *name; 346 int reg = 0; 347 int i; 348 349 gdb_feature_builder_init(&builder, &cpu->dyn_svereg_feature.desc, 350 "org.gnu.gdb.aarch64.sve", "sve-registers.xml", 351 base_reg); 352 353 /* Create the vector union type. */ 354 output_vector_union_type(&builder, reg_width, "svev"); 355 356 /* Create the predicate vector type. */ 357 gdb_feature_builder_append_tag( 358 &builder, "<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>", 359 pred_width / 8); 360 361 /* Define the vector registers. */ 362 for (i = 0; i < 32; i++) { 363 name = g_strdup_printf("z%d", i); 364 gdb_feature_builder_append_reg(&builder, name, reg_width, reg++, 365 "svev", NULL); 366 } 367 368 /* fpscr & status registers */ 369 gdb_feature_builder_append_reg(&builder, "fpsr", 32, reg++, 370 "int", "float"); 371 gdb_feature_builder_append_reg(&builder, "fpcr", 32, reg++, 372 "int", "float"); 373 374 /* Define the predicate registers. */ 375 for (i = 0; i < 16; i++) { 376 name = g_strdup_printf("p%d", i); 377 gdb_feature_builder_append_reg(&builder, name, pred_width, reg++, 378 "svep", NULL); 379 } 380 gdb_feature_builder_append_reg(&builder, "ffr", pred_width, reg++, 381 "svep", "vector"); 382 383 /* Define the vector length pseudo-register. */ 384 gdb_feature_builder_append_reg(&builder, "vg", 64, reg++, "int", NULL); 385 386 gdb_feature_builder_end(&builder); 387 388 return &cpu->dyn_svereg_feature.desc; 389 } 390 391 #ifdef CONFIG_USER_ONLY 392 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg) 393 { 394 ARMCPU *cpu = ARM_CPU(cs); 395 CPUARMState *env = &cpu->env; 396 uint64_t tcf0; 397 398 assert(reg == 0); 399 400 tcf0 = extract64(env->cp15.sctlr_el[1], 38, 2); 401 402 return gdb_get_reg64(buf, tcf0); 403 } 404 405 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg) 406 { 407 #if defined(CONFIG_LINUX) 408 ARMCPU *cpu = ARM_CPU(cs); 409 CPUARMState *env = &cpu->env; 410 411 uint8_t tcf; 412 413 assert(reg == 0); 414 415 tcf = *buf << PR_MTE_TCF_SHIFT; 416 417 if (!tcf) { 418 return 0; 419 } 420 421 /* 422 * 'tag_ctl' register is actually a "pseudo-register" provided by GDB to 423 * expose options regarding the type of MTE fault that can be controlled at 424 * runtime. 425 */ 426 arm_set_mte_tcf0(env, tcf); 427 428 return 1; 429 #else 430 return 0; 431 #endif 432 } 433 434 static void handle_q_memtag(GArray *params, void *user_ctx) 435 { 436 ARMCPU *cpu = ARM_CPU(user_ctx); 437 CPUARMState *env = &cpu->env; 438 439 uint64_t addr = gdb_get_cmd_param(params, 0)->val_ull; 440 uint64_t len = gdb_get_cmd_param(params, 1)->val_ul; 441 int type = gdb_get_cmd_param(params, 2)->val_ul; 442 443 uint8_t *tags; 444 uint8_t addr_tag; 445 446 g_autoptr(GString) str_buf = g_string_new(NULL); 447 448 /* 449 * GDB does not query multiple tags for a memory range on remote targets, so 450 * that's not supported either by gdbstub. 451 */ 452 if (len != 1) { 453 gdb_put_packet("E02"); 454 } 455 456 /* GDB never queries a tag different from an allocation tag (type 1). */ 457 if (type != 1) { 458 gdb_put_packet("E03"); 459 } 460 461 /* Note that tags are packed here (2 tags packed in one byte). */ 462 tags = allocation_tag_mem_probe(env, 0, addr, MMU_DATA_LOAD, 8 /* 64-bit */, 463 MMU_DATA_LOAD, true, 0); 464 if (!tags) { 465 /* Address is not in a tagged region. */ 466 gdb_put_packet("E04"); 467 return; 468 } 469 470 /* Unpack tag from byte. */ 471 addr_tag = load_tag1(addr, tags); 472 g_string_printf(str_buf, "m%.2x", addr_tag); 473 474 gdb_put_packet(str_buf->str); 475 } 476 477 static void handle_q_isaddresstagged(GArray *params, void *user_ctx) 478 { 479 ARMCPU *cpu = ARM_CPU(user_ctx); 480 CPUARMState *env = &cpu->env; 481 482 uint64_t addr = gdb_get_cmd_param(params, 0)->val_ull; 483 484 uint8_t *tags; 485 const char *reply; 486 487 tags = allocation_tag_mem_probe(env, 0, addr, MMU_DATA_LOAD, 8 /* 64-bit */, 488 MMU_DATA_LOAD, true, 0); 489 reply = tags ? "01" : "00"; 490 491 gdb_put_packet(reply); 492 } 493 494 static void handle_Q_memtag(GArray *params, void *user_ctx) 495 { 496 ARMCPU *cpu = ARM_CPU(user_ctx); 497 CPUARMState *env = &cpu->env; 498 499 uint64_t start_addr = gdb_get_cmd_param(params, 0)->val_ull; 500 uint64_t len = gdb_get_cmd_param(params, 1)->val_ul; 501 int type = gdb_get_cmd_param(params, 2)->val_ul; 502 char const *new_tags_str = gdb_get_cmd_param(params, 3)->data; 503 504 uint64_t end_addr; 505 506 int num_new_tags; 507 uint8_t *tags; 508 509 g_autoptr(GByteArray) new_tags = g_byte_array_new(); 510 511 /* 512 * Only the allocation tag (i.e. type 1) can be set at the stub side. 513 */ 514 if (type != 1) { 515 gdb_put_packet("E02"); 516 return; 517 } 518 519 end_addr = start_addr + (len - 1); /* 'len' is always >= 1 */ 520 /* Check if request's memory range does not cross page boundaries. */ 521 if ((start_addr ^ end_addr) & TARGET_PAGE_MASK) { 522 gdb_put_packet("E03"); 523 return; 524 } 525 526 /* 527 * Get all tags in the page starting from the tag of the start address. 528 * Note that there are two tags packed into a single byte here. 529 */ 530 tags = allocation_tag_mem_probe(env, 0, start_addr, MMU_DATA_STORE, 531 8 /* 64-bit */, MMU_DATA_STORE, true, 0); 532 if (!tags) { 533 /* Address is not in a tagged region. */ 534 gdb_put_packet("E04"); 535 return; 536 } 537 538 /* Convert tags provided by GDB, 2 hex digits per tag. */ 539 num_new_tags = strlen(new_tags_str) / 2; 540 gdb_hextomem(new_tags, new_tags_str, num_new_tags); 541 542 uint64_t address = start_addr; 543 int new_tag_index = 0; 544 while (address <= end_addr) { 545 uint8_t new_tag; 546 int packed_index; 547 548 /* 549 * Find packed tag index from unpacked tag index. There are two tags 550 * in one packed index (one tag per nibble). 551 */ 552 packed_index = new_tag_index / 2; 553 554 new_tag = new_tags->data[new_tag_index % num_new_tags]; 555 store_tag1(address, tags + packed_index, new_tag); 556 557 address += TAG_GRANULE; 558 new_tag_index++; 559 } 560 561 gdb_put_packet("OK"); 562 } 563 564 enum Command { 565 qMemTags, 566 qIsAddressTagged, 567 QMemTags, 568 NUM_CMDS 569 }; 570 571 static const GdbCmdParseEntry cmd_handler_table[NUM_CMDS] = { 572 [qMemTags] = { 573 .handler = handle_q_memtag, 574 .cmd_startswith = true, 575 .cmd = "MemTags:", 576 .schema = "L,l:l0", 577 .need_cpu_context = true 578 }, 579 [qIsAddressTagged] = { 580 .handler = handle_q_isaddresstagged, 581 .cmd_startswith = true, 582 .cmd = "IsAddressTagged:", 583 .schema = "L0", 584 .need_cpu_context = true 585 }, 586 [QMemTags] = { 587 .handler = handle_Q_memtag, 588 .cmd_startswith = true, 589 .cmd = "MemTags:", 590 .schema = "L,l:l:s0", 591 .need_cpu_context = true 592 }, 593 }; 594 #endif /* CONFIG_USER_ONLY */ 595 596 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *qsupported, 597 GPtrArray *qtable, GPtrArray *stable) 598 { 599 #ifdef CONFIG_USER_ONLY 600 /* MTE */ 601 if (cpu_isar_feature(aa64_mte, cpu)) { 602 g_string_append(qsupported, ";memory-tagging+"); 603 604 g_ptr_array_add(qtable, (gpointer) &cmd_handler_table[qMemTags]); 605 g_ptr_array_add(qtable, (gpointer) &cmd_handler_table[qIsAddressTagged]); 606 g_ptr_array_add(stable, (gpointer) &cmd_handler_table[QMemTags]); 607 } 608 #endif 609 } 610