1 /* 2 * Lockstep Execution Plugin 3 * 4 * Allows you to execute two QEMU instances in lockstep and report 5 * when their execution diverges. This is mainly useful for developers 6 * who want to see where a change to TCG code generation has 7 * introduced a subtle and hard to find bug. 8 * 9 * Caveats: 10 * - single-threaded linux-user apps only with non-deterministic syscalls 11 * - no MTTCG enabled system emulation (icount may help) 12 * 13 * While icount makes things more deterministic it doesn't mean a 14 * particular run may execute the exact same sequence of blocks. An 15 * asynchronous event (for example X11 graphics update) may cause a 16 * block to end early and a new partial block to start. This means 17 * serial only test cases are a better bet. -d nochain may also help 18 * as well as -accel tcg,one-insn-per-tb=on 19 * 20 * This code is not thread safe! 21 * 22 * Copyright (c) 2020 Linaro Ltd 23 * 24 * SPDX-License-Identifier: GPL-2.0-or-later 25 */ 26 27 #include <glib.h> 28 #include <inttypes.h> 29 #include <unistd.h> 30 #include <sys/socket.h> 31 #include <sys/un.h> 32 #include <stdio.h> 33 #include <errno.h> 34 35 #include <qemu-plugin.h> 36 37 QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; 38 39 /* saved so we can uninstall later */ 40 static qemu_plugin_id_t our_id; 41 42 static unsigned long bb_count; 43 static unsigned long insn_count; 44 45 /* Information about a translated block */ 46 typedef struct { 47 uint64_t pc; 48 uint64_t insns; 49 } BlockInfo; 50 51 /* Information about an execution state in the log */ 52 typedef struct { 53 BlockInfo *block; 54 unsigned long insn_count; 55 unsigned long block_count; 56 } ExecInfo; 57 58 /* The execution state we compare */ 59 typedef struct { 60 uint64_t pc; 61 uint64_t insn_count; 62 } ExecState; 63 64 typedef struct { 65 GSList *log_pos; 66 int distance; 67 } DivergeState; 68 69 /* list of translated block info */ 70 static GSList *blocks; 71 72 /* execution log and points of divergence */ 73 static GSList *log, *divergence_log; 74 75 static int socket_fd; 76 static char *path_to_unlink; 77 78 static bool verbose; 79 80 static void plugin_cleanup(qemu_plugin_id_t id) 81 { 82 /* Free our block data */ 83 g_slist_free_full(blocks, &g_free); 84 g_slist_free_full(log, &g_free); 85 g_slist_free(divergence_log); 86 87 close(socket_fd); 88 if (path_to_unlink) { 89 unlink(path_to_unlink); 90 } 91 } 92 93 static void plugin_exit(qemu_plugin_id_t id, void *p) 94 { 95 g_autoptr(GString) out = g_string_new("No divergence :-)\n"); 96 g_string_append_printf(out, "Executed %ld/%d blocks\n", 97 bb_count, g_slist_length(log)); 98 g_string_append_printf(out, "Executed ~%ld instructions\n", insn_count); 99 qemu_plugin_outs(out->str); 100 101 plugin_cleanup(id); 102 } 103 104 static void report_divergance(ExecState *us, ExecState *them) 105 { 106 DivergeState divrec = { log, 0 }; 107 g_autoptr(GString) out = g_string_new(""); 108 bool diverged = false; 109 110 /* 111 * If we have diverged before did we get back on track or are we 112 * totally losing it? 113 */ 114 if (divergence_log) { 115 DivergeState *last = (DivergeState *) divergence_log->data; 116 GSList *entry; 117 118 for (entry = log; g_slist_next(entry); entry = g_slist_next(entry)) { 119 if (entry == last->log_pos) { 120 break; 121 } 122 divrec.distance++; 123 } 124 125 /* 126 * If the last two records are so close it is likely we will 127 * not recover synchronisation with the other end. 128 */ 129 if (divrec.distance == 1 && last->distance == 1) { 130 diverged = true; 131 } 132 } 133 divergence_log = g_slist_prepend(divergence_log, 134 g_memdup2(&divrec, sizeof(divrec))); 135 136 /* Output short log entry of going out of sync... */ 137 if (verbose || divrec.distance == 1 || diverged) { 138 g_string_printf(out, "@ " 139 "0x%016" PRIx64 " (%" PRId64 ") vs " 140 "0x%016" PRIx64 " (%" PRId64 ")" 141 " (%d/%d since last)\n", 142 us->pc, us->insn_count, 143 them->pc, them->insn_count, 144 g_slist_length(divergence_log), 145 divrec.distance); 146 qemu_plugin_outs(out->str); 147 } 148 149 if (diverged) { 150 int i; 151 GSList *entry; 152 153 g_string_printf(out, "Δ too high, we have diverged, previous insns\n"); 154 155 for (entry = log, i = 0; 156 g_slist_next(entry) && i < 5; 157 entry = g_slist_next(entry), i++) { 158 ExecInfo *prev = (ExecInfo *) entry->data; 159 g_string_append_printf(out, 160 " previously @ 0x%016" PRIx64 "/%" PRId64 161 " (%ld insns)\n", 162 prev->block->pc, prev->block->insns, 163 prev->insn_count); 164 } 165 qemu_plugin_outs(out->str); 166 qemu_plugin_outs("giving up\n"); 167 qemu_plugin_uninstall(our_id, plugin_cleanup); 168 } 169 } 170 171 static void vcpu_tb_exec(unsigned int cpu_index, void *udata) 172 { 173 BlockInfo *bi = (BlockInfo *) udata; 174 ExecState us, them; 175 ssize_t bytes; 176 ExecInfo *exec; 177 178 us.pc = bi->pc; 179 us.insn_count = insn_count; 180 181 /* 182 * Write our current position to the other end. If we fail the 183 * other end has probably died and we should shut down gracefully. 184 */ 185 bytes = write(socket_fd, &us, sizeof(ExecState)); 186 if (bytes < sizeof(ExecState)) { 187 qemu_plugin_outs(bytes < 0 ? 188 "problem writing to socket" : 189 "wrote less than expected to socket"); 190 qemu_plugin_uninstall(our_id, plugin_cleanup); 191 return; 192 } 193 194 /* 195 * Now read where our peer has reached. Again a failure probably 196 * indicates the other end died and we should close down cleanly. 197 */ 198 bytes = read(socket_fd, &them, sizeof(ExecState)); 199 if (bytes < sizeof(ExecState)) { 200 qemu_plugin_outs(bytes < 0 ? 201 "problem reading from socket" : 202 "read less than expected"); 203 qemu_plugin_uninstall(our_id, plugin_cleanup); 204 return; 205 } 206 207 /* 208 * Compare and report if we have diverged. 209 */ 210 if (us.pc != them.pc) { 211 report_divergance(&us, &them); 212 } 213 214 /* 215 * Assume this block will execute fully and record it 216 * in the execution log. 217 */ 218 insn_count += bi->insns; 219 bb_count++; 220 exec = g_new0(ExecInfo, 1); 221 exec->block = bi; 222 exec->insn_count = insn_count; 223 exec->block_count = bb_count; 224 log = g_slist_prepend(log, exec); 225 } 226 227 static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) 228 { 229 BlockInfo *bi = g_new0(BlockInfo, 1); 230 bi->pc = qemu_plugin_tb_vaddr(tb); 231 bi->insns = qemu_plugin_tb_n_insns(tb); 232 233 /* save a reference so we can free later */ 234 blocks = g_slist_prepend(blocks, bi); 235 qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec, 236 QEMU_PLUGIN_CB_NO_REGS, (void *)bi); 237 } 238 239 240 /* 241 * Instead of encoding master/slave status into what is essentially 242 * two peers we shall just take the simple approach of checking for 243 * the existence of the pipe and assuming if it's not there we are the 244 * first process. 245 */ 246 static bool setup_socket(const char *path) 247 { 248 struct sockaddr_un sockaddr; 249 const gsize pathlen = sizeof(sockaddr.sun_path) - 1; 250 int fd; 251 252 fd = socket(AF_UNIX, SOCK_STREAM, 0); 253 if (fd < 0) { 254 perror("create socket"); 255 return false; 256 } 257 258 sockaddr.sun_family = AF_UNIX; 259 if (g_strlcpy(sockaddr.sun_path, path, pathlen) >= pathlen) { 260 perror("bad path"); 261 close(fd); 262 return false; 263 } 264 265 if (bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)) < 0) { 266 perror("bind socket"); 267 close(fd); 268 return false; 269 } 270 271 /* remember to clean-up */ 272 path_to_unlink = g_strdup(path); 273 274 if (listen(fd, 1) < 0) { 275 perror("listen socket"); 276 close(fd); 277 return false; 278 } 279 280 socket_fd = accept(fd, NULL, NULL); 281 if (socket_fd < 0 && errno != EINTR) { 282 perror("accept socket"); 283 close(fd); 284 return false; 285 } 286 287 qemu_plugin_outs("setup_socket::ready\n"); 288 289 close(fd); 290 return true; 291 } 292 293 static bool connect_socket(const char *path) 294 { 295 int fd; 296 struct sockaddr_un sockaddr; 297 const gsize pathlen = sizeof(sockaddr.sun_path) - 1; 298 299 fd = socket(AF_UNIX, SOCK_STREAM, 0); 300 if (fd < 0) { 301 perror("create socket"); 302 return false; 303 } 304 305 sockaddr.sun_family = AF_UNIX; 306 if (g_strlcpy(sockaddr.sun_path, path, pathlen) >= pathlen) { 307 perror("bad path"); 308 close(fd); 309 return false; 310 } 311 312 if (connect(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)) < 0) { 313 perror("failed to connect"); 314 close(fd); 315 return false; 316 } 317 318 qemu_plugin_outs("connect_socket::ready\n"); 319 320 socket_fd = fd; 321 return true; 322 } 323 324 static bool setup_unix_socket(const char *path) 325 { 326 if (g_file_test(path, G_FILE_TEST_EXISTS)) { 327 return connect_socket(path); 328 } else { 329 return setup_socket(path); 330 } 331 } 332 333 334 QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, 335 const qemu_info_t *info, 336 int argc, char **argv) 337 { 338 int i; 339 g_autofree char *sock_path = NULL; 340 341 for (i = 0; i < argc; i++) { 342 char *p = argv[i]; 343 g_auto(GStrv) tokens = g_strsplit(p, "=", 2); 344 345 if (g_strcmp0(tokens[0], "verbose") == 0) { 346 if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &verbose)) { 347 fprintf(stderr, "boolean argument parsing failed: %s\n", p); 348 return -1; 349 } 350 } else if (g_strcmp0(tokens[0], "sockpath") == 0) { 351 sock_path = g_strdup(tokens[1]); 352 } else { 353 fprintf(stderr, "option parsing failed: %s\n", p); 354 return -1; 355 } 356 } 357 358 if (sock_path == NULL) { 359 fprintf(stderr, "Need a socket path to talk to other instance.\n"); 360 return -1; 361 } 362 363 if (!setup_unix_socket(sock_path)) { 364 fprintf(stderr, "Failed to setup socket for communications.\n"); 365 return -1; 366 } 367 368 our_id = id; 369 370 qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); 371 qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); 372 return 0; 373 } 374