1 /* 2 * gdb server stub - softmmu specific bits 3 * 4 * Debug integration depends on support from the individual 5 * accelerators so most of this involves calling the ops helpers. 6 * 7 * Copyright (c) 2003-2005 Fabrice Bellard 8 * Copyright (c) 2022 Linaro Ltd 9 * 10 * SPDX-License-Identifier: LGPL-2.0+ 11 */ 12 13 #include "qemu/osdep.h" 14 #include "qapi/error.h" 15 #include "qemu/error-report.h" 16 #include "qemu/cutils.h" 17 #include "exec/gdbstub.h" 18 #include "gdbstub/syscalls.h" 19 #include "exec/hwaddr.h" 20 #include "exec/tb-flush.h" 21 #include "sysemu/cpus.h" 22 #include "sysemu/runstate.h" 23 #include "sysemu/replay.h" 24 #include "hw/core/cpu.h" 25 #include "hw/cpu/cluster.h" 26 #include "hw/boards.h" 27 #include "chardev/char.h" 28 #include "chardev/char-fe.h" 29 #include "monitor/monitor.h" 30 #include "trace.h" 31 #include "internals.h" 32 33 /* System emulation specific state */ 34 typedef struct { 35 CharBackend chr; 36 Chardev *mon_chr; 37 } GDBSystemState; 38 39 GDBSystemState gdbserver_system_state; 40 41 static void reset_gdbserver_state(void) 42 { 43 g_free(gdbserver_state.processes); 44 gdbserver_state.processes = NULL; 45 gdbserver_state.process_num = 0; 46 gdbserver_state.allow_stop_reply = false; 47 } 48 49 /* 50 * Return the GDB index for a given vCPU state. 51 * 52 * In system mode GDB numbers CPUs from 1 as 0 is reserved as an "any 53 * cpu" index. 54 */ 55 int gdb_get_cpu_index(CPUState *cpu) 56 { 57 return cpu->cpu_index + 1; 58 } 59 60 /* 61 * We check the status of the last message in the chardev receive code 62 */ 63 bool gdb_got_immediate_ack(void) 64 { 65 return true; 66 } 67 68 /* 69 * GDB Connection management. For system emulation we do all of this 70 * via our existing Chardev infrastructure which allows us to support 71 * network and unix sockets. 72 */ 73 74 void gdb_put_buffer(const uint8_t *buf, int len) 75 { 76 /* 77 * XXX this blocks entire thread. Rewrite to use 78 * qemu_chr_fe_write and background I/O callbacks 79 */ 80 qemu_chr_fe_write_all(&gdbserver_system_state.chr, buf, len); 81 } 82 83 static void gdb_chr_event(void *opaque, QEMUChrEvent event) 84 { 85 int i; 86 GDBState *s = (GDBState *) opaque; 87 88 switch (event) { 89 case CHR_EVENT_OPENED: 90 /* Start with first process attached, others detached */ 91 for (i = 0; i < s->process_num; i++) { 92 s->processes[i].attached = !i; 93 } 94 95 s->c_cpu = gdb_first_attached_cpu(); 96 s->g_cpu = s->c_cpu; 97 98 vm_stop(RUN_STATE_PAUSED); 99 replay_gdb_attached(); 100 break; 101 default: 102 break; 103 } 104 } 105 106 /* 107 * In system-mode we stop the VM and wait to send the syscall packet 108 * until notification that the CPU has stopped. This must be done 109 * because if the packet is sent now the reply from the syscall 110 * request could be received while the CPU is still in the running 111 * state, which can cause packets to be dropped and state transition 112 * 'T' packets to be sent while the syscall is still being processed. 113 */ 114 void gdb_syscall_handling(const char *syscall_packet) 115 { 116 vm_stop(RUN_STATE_DEBUG); 117 qemu_cpu_kick(gdbserver_state.c_cpu); 118 } 119 120 static void gdb_vm_state_change(void *opaque, bool running, RunState state) 121 { 122 CPUState *cpu = gdbserver_state.c_cpu; 123 g_autoptr(GString) buf = g_string_new(NULL); 124 g_autoptr(GString) tid = g_string_new(NULL); 125 const char *type; 126 int ret; 127 128 if (running || gdbserver_state.state == RS_INACTIVE) { 129 return; 130 } 131 132 /* Is there a GDB syscall waiting to be sent? */ 133 if (gdb_handled_syscall()) { 134 return; 135 } 136 137 if (cpu == NULL) { 138 /* No process attached */ 139 return; 140 } 141 142 if (!gdbserver_state.allow_stop_reply) { 143 return; 144 } 145 146 gdb_append_thread_id(cpu, tid); 147 148 switch (state) { 149 case RUN_STATE_DEBUG: 150 if (cpu->watchpoint_hit) { 151 switch (cpu->watchpoint_hit->flags & BP_MEM_ACCESS) { 152 case BP_MEM_READ: 153 type = "r"; 154 break; 155 case BP_MEM_ACCESS: 156 type = "a"; 157 break; 158 default: 159 type = ""; 160 break; 161 } 162 trace_gdbstub_hit_watchpoint(type, 163 gdb_get_cpu_index(cpu), 164 cpu->watchpoint_hit->vaddr); 165 g_string_printf(buf, "T%02xthread:%s;%swatch:%" VADDR_PRIx ";", 166 GDB_SIGNAL_TRAP, tid->str, type, 167 cpu->watchpoint_hit->vaddr); 168 cpu->watchpoint_hit = NULL; 169 goto send_packet; 170 } else { 171 trace_gdbstub_hit_break(); 172 } 173 tb_flush(cpu); 174 ret = GDB_SIGNAL_TRAP; 175 break; 176 case RUN_STATE_PAUSED: 177 trace_gdbstub_hit_paused(); 178 ret = GDB_SIGNAL_INT; 179 break; 180 case RUN_STATE_SHUTDOWN: 181 trace_gdbstub_hit_shutdown(); 182 ret = GDB_SIGNAL_QUIT; 183 break; 184 case RUN_STATE_IO_ERROR: 185 trace_gdbstub_hit_io_error(); 186 ret = GDB_SIGNAL_STOP; 187 break; 188 case RUN_STATE_WATCHDOG: 189 trace_gdbstub_hit_watchdog(); 190 ret = GDB_SIGNAL_ALRM; 191 break; 192 case RUN_STATE_INTERNAL_ERROR: 193 trace_gdbstub_hit_internal_error(); 194 ret = GDB_SIGNAL_ABRT; 195 break; 196 case RUN_STATE_SAVE_VM: 197 case RUN_STATE_RESTORE_VM: 198 return; 199 case RUN_STATE_FINISH_MIGRATE: 200 ret = GDB_SIGNAL_XCPU; 201 break; 202 default: 203 trace_gdbstub_hit_unknown(state); 204 ret = GDB_SIGNAL_UNKNOWN; 205 break; 206 } 207 gdb_set_stop_cpu(cpu); 208 g_string_printf(buf, "T%02xthread:%s;", ret, tid->str); 209 210 send_packet: 211 gdb_put_packet(buf->str); 212 gdbserver_state.allow_stop_reply = false; 213 214 /* disable single step if it was enabled */ 215 cpu_single_step(cpu, 0); 216 } 217 218 #ifndef _WIN32 219 static void gdb_sigterm_handler(int signal) 220 { 221 if (runstate_is_running()) { 222 vm_stop(RUN_STATE_PAUSED); 223 } 224 } 225 #endif 226 227 static int gdb_monitor_write(Chardev *chr, const uint8_t *buf, int len) 228 { 229 g_autoptr(GString) hex_buf = g_string_new("O"); 230 gdb_memtohex(hex_buf, buf, len); 231 gdb_put_packet(hex_buf->str); 232 return len; 233 } 234 235 static void gdb_monitor_open(Chardev *chr, ChardevBackend *backend, 236 bool *be_opened, Error **errp) 237 { 238 *be_opened = false; 239 } 240 241 static void char_gdb_class_init(ObjectClass *oc, void *data) 242 { 243 ChardevClass *cc = CHARDEV_CLASS(oc); 244 245 cc->internal = true; 246 cc->open = gdb_monitor_open; 247 cc->chr_write = gdb_monitor_write; 248 } 249 250 #define TYPE_CHARDEV_GDB "chardev-gdb" 251 252 static const TypeInfo char_gdb_type_info = { 253 .name = TYPE_CHARDEV_GDB, 254 .parent = TYPE_CHARDEV, 255 .class_init = char_gdb_class_init, 256 }; 257 258 static int gdb_chr_can_receive(void *opaque) 259 { 260 /* 261 * We can handle an arbitrarily large amount of data. 262 * Pick the maximum packet size, which is as good as anything. 263 */ 264 return MAX_PACKET_LENGTH; 265 } 266 267 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size) 268 { 269 int i; 270 271 for (i = 0; i < size; i++) { 272 gdb_read_byte(buf[i]); 273 } 274 } 275 276 static int find_cpu_clusters(Object *child, void *opaque) 277 { 278 if (object_dynamic_cast(child, TYPE_CPU_CLUSTER)) { 279 GDBState *s = (GDBState *) opaque; 280 CPUClusterState *cluster = CPU_CLUSTER(child); 281 GDBProcess *process; 282 283 s->processes = g_renew(GDBProcess, s->processes, ++s->process_num); 284 285 process = &s->processes[s->process_num - 1]; 286 287 /* 288 * GDB process IDs -1 and 0 are reserved. To avoid subtle errors at 289 * runtime, we enforce here that the machine does not use a cluster ID 290 * that would lead to PID 0. 291 */ 292 assert(cluster->cluster_id != UINT32_MAX); 293 process->pid = cluster->cluster_id + 1; 294 process->attached = false; 295 process->target_xml = NULL; 296 297 return 0; 298 } 299 300 return object_child_foreach(child, find_cpu_clusters, opaque); 301 } 302 303 static int pid_order(const void *a, const void *b) 304 { 305 GDBProcess *pa = (GDBProcess *) a; 306 GDBProcess *pb = (GDBProcess *) b; 307 308 if (pa->pid < pb->pid) { 309 return -1; 310 } else if (pa->pid > pb->pid) { 311 return 1; 312 } else { 313 return 0; 314 } 315 } 316 317 static void create_processes(GDBState *s) 318 { 319 object_child_foreach(object_get_root(), find_cpu_clusters, s); 320 321 if (gdbserver_state.processes) { 322 /* Sort by PID */ 323 qsort(gdbserver_state.processes, 324 gdbserver_state.process_num, 325 sizeof(gdbserver_state.processes[0]), 326 pid_order); 327 } 328 329 gdb_create_default_process(s); 330 } 331 332 int gdbserver_start(const char *device) 333 { 334 Chardev *chr = NULL; 335 Chardev *mon_chr; 336 g_autoptr(GString) cs = g_string_new(device); 337 338 if (!first_cpu) { 339 error_report("gdbstub: meaningless to attach gdb to a " 340 "machine without any CPU."); 341 return -1; 342 } 343 344 if (!gdb_supports_guest_debug()) { 345 error_report("gdbstub: current accelerator doesn't " 346 "support guest debugging"); 347 return -1; 348 } 349 350 if (cs->len == 0) { 351 return -1; 352 } 353 354 trace_gdbstub_op_start(cs->str); 355 356 if (g_strcmp0(cs->str, "none") != 0) { 357 if (g_str_has_prefix(cs->str, "tcp:")) { 358 /* enforce required TCP attributes */ 359 g_string_append_printf(cs, ",wait=off,nodelay=on,server=on"); 360 } 361 #ifndef _WIN32 362 else if (strcmp(device, "stdio") == 0) { 363 struct sigaction act; 364 365 memset(&act, 0, sizeof(act)); 366 act.sa_handler = gdb_sigterm_handler; 367 sigaction(SIGINT, &act, NULL); 368 } 369 #endif 370 /* 371 * FIXME: it's a bit weird to allow using a mux chardev here 372 * and implicitly setup a monitor. We may want to break this. 373 */ 374 chr = qemu_chr_new_noreplay("gdb", cs->str, true, NULL); 375 if (!chr) { 376 return -1; 377 } 378 } 379 380 if (!gdbserver_state.init) { 381 gdb_init_gdbserver_state(); 382 383 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL); 384 385 /* Initialize a monitor terminal for gdb */ 386 mon_chr = qemu_chardev_new(NULL, TYPE_CHARDEV_GDB, 387 NULL, NULL, &error_abort); 388 monitor_init_hmp(mon_chr, false, &error_abort); 389 } else { 390 qemu_chr_fe_deinit(&gdbserver_system_state.chr, true); 391 mon_chr = gdbserver_system_state.mon_chr; 392 reset_gdbserver_state(); 393 } 394 395 create_processes(&gdbserver_state); 396 397 if (chr) { 398 qemu_chr_fe_init(&gdbserver_system_state.chr, chr, &error_abort); 399 qemu_chr_fe_set_handlers(&gdbserver_system_state.chr, 400 gdb_chr_can_receive, 401 gdb_chr_receive, gdb_chr_event, 402 NULL, &gdbserver_state, NULL, true); 403 } 404 gdbserver_state.state = chr ? RS_IDLE : RS_INACTIVE; 405 gdbserver_system_state.mon_chr = mon_chr; 406 gdb_syscall_reset(); 407 408 return 0; 409 } 410 411 static void register_types(void) 412 { 413 type_register_static(&char_gdb_type_info); 414 } 415 416 type_init(register_types); 417 418 /* Tell the remote gdb that the process has exited. */ 419 void gdb_exit(int code) 420 { 421 char buf[4]; 422 423 if (!gdbserver_state.init) { 424 return; 425 } 426 427 trace_gdbstub_op_exiting((uint8_t)code); 428 429 if (gdbserver_state.allow_stop_reply) { 430 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code); 431 gdb_put_packet(buf); 432 gdbserver_state.allow_stop_reply = false; 433 } 434 435 qemu_chr_fe_deinit(&gdbserver_system_state.chr, true); 436 } 437 438 void gdb_qemu_exit(int code) 439 { 440 qemu_system_shutdown_request_with_code(SHUTDOWN_CAUSE_GUEST_SHUTDOWN, 441 code); 442 } 443 444 /* 445 * Memory access 446 */ 447 static int phy_memory_mode; 448 449 int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr, 450 uint8_t *buf, int len, bool is_write) 451 { 452 CPUClass *cc; 453 454 if (phy_memory_mode) { 455 if (is_write) { 456 cpu_physical_memory_write(addr, buf, len); 457 } else { 458 cpu_physical_memory_read(addr, buf, len); 459 } 460 return 0; 461 } 462 463 cc = CPU_GET_CLASS(cpu); 464 if (cc->memory_rw_debug) { 465 return cc->memory_rw_debug(cpu, addr, buf, len, is_write); 466 } 467 468 return cpu_memory_rw_debug(cpu, addr, buf, len, is_write); 469 } 470 471 /* 472 * cpu helpers 473 */ 474 475 unsigned int gdb_get_max_cpus(void) 476 { 477 MachineState *ms = MACHINE(qdev_get_machine()); 478 return ms->smp.max_cpus; 479 } 480 481 bool gdb_can_reverse(void) 482 { 483 return replay_mode == REPLAY_MODE_PLAY; 484 } 485 486 /* 487 * Softmmu specific command helpers 488 */ 489 490 void gdb_handle_query_qemu_phy_mem_mode(GArray *params, 491 void *user_ctx) 492 { 493 g_string_printf(gdbserver_state.str_buf, "%d", phy_memory_mode); 494 gdb_put_strbuf(); 495 } 496 497 void gdb_handle_set_qemu_phy_mem_mode(GArray *params, void *user_ctx) 498 { 499 if (!params->len) { 500 gdb_put_packet("E22"); 501 return; 502 } 503 504 if (!get_param(params, 0)->val_ul) { 505 phy_memory_mode = 0; 506 } else { 507 phy_memory_mode = 1; 508 } 509 gdb_put_packet("OK"); 510 } 511 512 void gdb_handle_query_rcmd(GArray *params, void *user_ctx) 513 { 514 const guint8 zero = 0; 515 int len; 516 517 if (!params->len) { 518 gdb_put_packet("E22"); 519 return; 520 } 521 522 len = strlen(get_param(params, 0)->data); 523 if (len % 2) { 524 gdb_put_packet("E01"); 525 return; 526 } 527 528 g_assert(gdbserver_state.mem_buf->len == 0); 529 len = len / 2; 530 gdb_hextomem(gdbserver_state.mem_buf, get_param(params, 0)->data, len); 531 g_byte_array_append(gdbserver_state.mem_buf, &zero, 1); 532 qemu_chr_be_write(gdbserver_system_state.mon_chr, 533 gdbserver_state.mem_buf->data, 534 gdbserver_state.mem_buf->len); 535 gdb_put_packet("OK"); 536 } 537 538 /* 539 * Execution state helpers 540 */ 541 542 void gdb_handle_query_attached(GArray *params, void *user_ctx) 543 { 544 gdb_put_packet("1"); 545 } 546 547 void gdb_continue(void) 548 { 549 if (!runstate_needs_reset()) { 550 trace_gdbstub_op_continue(); 551 vm_start(); 552 } 553 } 554 555 /* 556 * Resume execution, per CPU actions. 557 */ 558 int gdb_continue_partial(char *newstates) 559 { 560 CPUState *cpu; 561 int res = 0; 562 int flag = 0; 563 564 if (!runstate_needs_reset()) { 565 bool step_requested = false; 566 CPU_FOREACH(cpu) { 567 if (newstates[cpu->cpu_index] == 's') { 568 step_requested = true; 569 break; 570 } 571 } 572 573 if (vm_prepare_start(step_requested)) { 574 return 0; 575 } 576 577 CPU_FOREACH(cpu) { 578 switch (newstates[cpu->cpu_index]) { 579 case 0: 580 case 1: 581 break; /* nothing to do here */ 582 case 's': 583 trace_gdbstub_op_stepping(cpu->cpu_index); 584 cpu_single_step(cpu, gdbserver_state.sstep_flags); 585 cpu_resume(cpu); 586 flag = 1; 587 break; 588 case 'c': 589 trace_gdbstub_op_continue_cpu(cpu->cpu_index); 590 cpu_resume(cpu); 591 flag = 1; 592 break; 593 default: 594 res = -1; 595 break; 596 } 597 } 598 } 599 if (flag) { 600 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true); 601 } 602 return res; 603 } 604 605 /* 606 * Signal Handling - in system mode we only need SIGINT and SIGTRAP; other 607 * signals are not yet supported. 608 */ 609 610 enum { 611 TARGET_SIGINT = 2, 612 TARGET_SIGTRAP = 5 613 }; 614 615 int gdb_signal_to_target(int sig) 616 { 617 switch (sig) { 618 case 2: 619 return TARGET_SIGINT; 620 case 5: 621 return TARGET_SIGTRAP; 622 default: 623 return -1; 624 } 625 } 626 627 /* 628 * Break/Watch point helpers 629 */ 630 631 bool gdb_supports_guest_debug(void) 632 { 633 const AccelOpsClass *ops = cpus_get_accel(); 634 if (ops->supports_guest_debug) { 635 return ops->supports_guest_debug(); 636 } 637 return false; 638 } 639 640 int gdb_breakpoint_insert(CPUState *cs, int type, vaddr addr, vaddr len) 641 { 642 const AccelOpsClass *ops = cpus_get_accel(); 643 if (ops->insert_breakpoint) { 644 return ops->insert_breakpoint(cs, type, addr, len); 645 } 646 return -ENOSYS; 647 } 648 649 int gdb_breakpoint_remove(CPUState *cs, int type, vaddr addr, vaddr len) 650 { 651 const AccelOpsClass *ops = cpus_get_accel(); 652 if (ops->remove_breakpoint) { 653 return ops->remove_breakpoint(cs, type, addr, len); 654 } 655 return -ENOSYS; 656 } 657 658 void gdb_breakpoint_remove_all(CPUState *cs) 659 { 660 const AccelOpsClass *ops = cpus_get_accel(); 661 if (ops->remove_all_breakpoints) { 662 ops->remove_all_breakpoints(cs); 663 } 664 } 665