1 /*
2 * Lockstep Execution Plugin
3 *
4 * Allows you to execute two QEMU instances in lockstep and report
5 * when their execution diverges. This is mainly useful for developers
6 * who want to see where a change to TCG code generation has
7 * introduced a subtle and hard to find bug.
8 *
9 * Caveats:
10 * - single-threaded linux-user apps only with non-deterministic syscalls
11 * - no MTTCG enabled system emulation (icount may help)
12 *
13 * While icount makes things more deterministic it doesn't mean a
14 * particular run may execute the exact same sequence of blocks. An
15 * asynchronous event (for example X11 graphics update) may cause a
16 * block to end early and a new partial block to start. This means
17 * serial only test cases are a better bet. -d nochain may also help.
18 *
19 * This code is not thread safe!
20 *
21 * Copyright (c) 2020 Linaro Ltd
22 *
23 * SPDX-License-Identifier: GPL-2.0-or-later
24 */
25
26 #include <glib.h>
27 #include <inttypes.h>
28 #include <unistd.h>
29 #include <sys/socket.h>
30 #include <sys/un.h>
31 #include <stdio.h>
32 #include <errno.h>
33
34 #include <qemu-plugin.h>
35
36 QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
37
38 /* saved so we can uninstall later */
39 static qemu_plugin_id_t our_id;
40
41 static unsigned long bb_count;
42 static unsigned long insn_count;
43
44 /* Information about a translated block */
45 typedef struct {
46 uint64_t pc;
47 uint64_t insns;
48 } BlockInfo;
49
50 /* Information about an execution state in the log */
51 typedef struct {
52 BlockInfo *block;
53 unsigned long insn_count;
54 unsigned long block_count;
55 } ExecInfo;
56
57 /* The execution state we compare */
58 typedef struct {
59 uint64_t pc;
60 unsigned long insn_count;
61 } ExecState;
62
63 typedef struct {
64 GSList *log_pos;
65 int distance;
66 } DivergeState;
67
68 /* list of translated block info */
69 static GSList *blocks;
70
71 /* execution log and points of divergence */
72 static GSList *log, *divergence_log;
73
74 static int socket_fd;
75 static char *path_to_unlink;
76
77 static bool verbose;
78
plugin_cleanup(qemu_plugin_id_t id)79 static void plugin_cleanup(qemu_plugin_id_t id)
80 {
81 /* Free our block data */
82 g_slist_free_full(blocks, &g_free);
83 g_slist_free_full(log, &g_free);
84 g_slist_free(divergence_log);
85
86 close(socket_fd);
87 if (path_to_unlink) {
88 unlink(path_to_unlink);
89 }
90 }
91
plugin_exit(qemu_plugin_id_t id,void * p)92 static void plugin_exit(qemu_plugin_id_t id, void *p)
93 {
94 g_autoptr(GString) out = g_string_new("No divergence :-)\n");
95 g_string_append_printf(out, "Executed %ld/%d blocks\n",
96 bb_count, g_slist_length(log));
97 g_string_append_printf(out, "Executed ~%ld instructions\n", insn_count);
98 qemu_plugin_outs(out->str);
99
100 plugin_cleanup(id);
101 }
102
report_divergance(ExecState * us,ExecState * them)103 static void report_divergance(ExecState *us, ExecState *them)
104 {
105 DivergeState divrec = { log, 0 };
106 g_autoptr(GString) out = g_string_new("");
107 bool diverged = false;
108
109 /*
110 * If we have diverged before did we get back on track or are we
111 * totally losing it?
112 */
113 if (divergence_log) {
114 DivergeState *last = (DivergeState *) divergence_log->data;
115 GSList *entry;
116
117 for (entry = log; g_slist_next(entry); entry = g_slist_next(entry)) {
118 if (entry == last->log_pos) {
119 break;
120 }
121 divrec.distance++;
122 }
123
124 /*
125 * If the last two records are so close it is likely we will
126 * not recover synchronisation with the other end.
127 */
128 if (divrec.distance == 1 && last->distance == 1) {
129 diverged = true;
130 }
131 }
132 divergence_log = g_slist_prepend(divergence_log,
133 g_memdup2(&divrec, sizeof(divrec)));
134
135 /* Output short log entry of going out of sync... */
136 if (verbose || divrec.distance == 1 || diverged) {
137 g_string_printf(out,
138 "@ 0x%016" PRIx64 " vs 0x%016" PRIx64
139 " (%d/%d since last)\n",
140 us->pc, them->pc, g_slist_length(divergence_log),
141 divrec.distance);
142 qemu_plugin_outs(out->str);
143 }
144
145 if (diverged) {
146 int i;
147 GSList *entry;
148
149 g_string_printf(out,
150 "Δ insn_count @ 0x%016" PRIx64
151 " (%ld) vs 0x%016" PRIx64 " (%ld)\n",
152 us->pc, us->insn_count, them->pc, them->insn_count);
153
154 for (entry = log, i = 0;
155 g_slist_next(entry) && i < 5;
156 entry = g_slist_next(entry), i++) {
157 ExecInfo *prev = (ExecInfo *) entry->data;
158 g_string_append_printf(out,
159 " previously @ 0x%016" PRIx64 "/%" PRId64
160 " (%ld insns)\n",
161 prev->block->pc, prev->block->insns,
162 prev->insn_count);
163 }
164 qemu_plugin_outs(out->str);
165 qemu_plugin_outs("too much divergence... giving up.");
166 qemu_plugin_uninstall(our_id, plugin_cleanup);
167 }
168 }
169
vcpu_tb_exec(unsigned int cpu_index,void * udata)170 static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
171 {
172 BlockInfo *bi = (BlockInfo *) udata;
173 ExecState us, them;
174 ssize_t bytes;
175 ExecInfo *exec;
176
177 us.pc = bi->pc;
178 us.insn_count = insn_count;
179
180 /*
181 * Write our current position to the other end. If we fail the
182 * other end has probably died and we should shut down gracefully.
183 */
184 bytes = write(socket_fd, &us, sizeof(ExecState));
185 if (bytes < sizeof(ExecState)) {
186 qemu_plugin_outs(bytes < 0 ?
187 "problem writing to socket" :
188 "wrote less than expected to socket");
189 qemu_plugin_uninstall(our_id, plugin_cleanup);
190 return;
191 }
192
193 /*
194 * Now read where our peer has reached. Again a failure probably
195 * indicates the other end died and we should close down cleanly.
196 */
197 bytes = read(socket_fd, &them, sizeof(ExecState));
198 if (bytes < sizeof(ExecState)) {
199 qemu_plugin_outs(bytes < 0 ?
200 "problem reading from socket" :
201 "read less than expected");
202 qemu_plugin_uninstall(our_id, plugin_cleanup);
203 return;
204 }
205
206 /*
207 * Compare and report if we have diverged.
208 */
209 if (us.pc != them.pc) {
210 report_divergance(&us, &them);
211 }
212
213 /*
214 * Assume this block will execute fully and record it
215 * in the execution log.
216 */
217 insn_count += bi->insns;
218 bb_count++;
219 exec = g_new0(ExecInfo, 1);
220 exec->block = bi;
221 exec->insn_count = insn_count;
222 exec->block_count = bb_count;
223 log = g_slist_prepend(log, exec);
224 }
225
vcpu_tb_trans(qemu_plugin_id_t id,struct qemu_plugin_tb * tb)226 static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
227 {
228 BlockInfo *bi = g_new0(BlockInfo, 1);
229 bi->pc = qemu_plugin_tb_vaddr(tb);
230 bi->insns = qemu_plugin_tb_n_insns(tb);
231
232 /* save a reference so we can free later */
233 blocks = g_slist_prepend(blocks, bi);
234 qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec,
235 QEMU_PLUGIN_CB_NO_REGS, (void *)bi);
236 }
237
238
239 /*
240 * Instead of encoding master/slave status into what is essentially
241 * two peers we shall just take the simple approach of checking for
242 * the existence of the pipe and assuming if it's not there we are the
243 * first process.
244 */
setup_socket(const char * path)245 static bool setup_socket(const char *path)
246 {
247 struct sockaddr_un sockaddr;
248 const gsize pathlen = sizeof(sockaddr.sun_path) - 1;
249 int fd;
250
251 fd = socket(AF_UNIX, SOCK_STREAM, 0);
252 if (fd < 0) {
253 perror("create socket");
254 return false;
255 }
256
257 sockaddr.sun_family = AF_UNIX;
258 if (g_strlcpy(sockaddr.sun_path, path, pathlen) >= pathlen) {
259 perror("bad path");
260 close(fd);
261 return false;
262 }
263
264 if (bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)) < 0) {
265 perror("bind socket");
266 close(fd);
267 return false;
268 }
269
270 /* remember to clean-up */
271 path_to_unlink = g_strdup(path);
272
273 if (listen(fd, 1) < 0) {
274 perror("listen socket");
275 close(fd);
276 return false;
277 }
278
279 socket_fd = accept(fd, NULL, NULL);
280 if (socket_fd < 0 && errno != EINTR) {
281 perror("accept socket");
282 close(fd);
283 return false;
284 }
285
286 qemu_plugin_outs("setup_socket::ready\n");
287
288 close(fd);
289 return true;
290 }
291
connect_socket(const char * path)292 static bool connect_socket(const char *path)
293 {
294 int fd;
295 struct sockaddr_un sockaddr;
296 const gsize pathlen = sizeof(sockaddr.sun_path) - 1;
297
298 fd = socket(AF_UNIX, SOCK_STREAM, 0);
299 if (fd < 0) {
300 perror("create socket");
301 return false;
302 }
303
304 sockaddr.sun_family = AF_UNIX;
305 if (g_strlcpy(sockaddr.sun_path, path, pathlen) >= pathlen) {
306 perror("bad path");
307 close(fd);
308 return false;
309 }
310
311 if (connect(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)) < 0) {
312 perror("failed to connect");
313 close(fd);
314 return false;
315 }
316
317 qemu_plugin_outs("connect_socket::ready\n");
318
319 socket_fd = fd;
320 return true;
321 }
322
setup_unix_socket(const char * path)323 static bool setup_unix_socket(const char *path)
324 {
325 if (g_file_test(path, G_FILE_TEST_EXISTS)) {
326 return connect_socket(path);
327 } else {
328 return setup_socket(path);
329 }
330 }
331
332
qemu_plugin_install(qemu_plugin_id_t id,const qemu_info_t * info,int argc,char ** argv)333 QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
334 const qemu_info_t *info,
335 int argc, char **argv)
336 {
337 int i;
338 g_autofree char *sock_path = NULL;
339
340 for (i = 0; i < argc; i++) {
341 char *p = argv[i];
342 g_auto(GStrv) tokens = g_strsplit(p, "=", 2);
343
344 if (g_strcmp0(tokens[0], "verbose") == 0) {
345 if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &verbose)) {
346 fprintf(stderr, "boolean argument parsing failed: %s\n", p);
347 return -1;
348 }
349 } else if (g_strcmp0(tokens[0], "sockpath") == 0) {
350 sock_path = tokens[1];
351 } else {
352 fprintf(stderr, "option parsing failed: %s\n", p);
353 return -1;
354 }
355 }
356
357 if (sock_path == NULL) {
358 fprintf(stderr, "Need a socket path to talk to other instance.\n");
359 return -1;
360 }
361
362 if (!setup_unix_socket(sock_path)) {
363 fprintf(stderr, "Failed to setup socket for communications.\n");
364 return -1;
365 }
366
367 our_id = id;
368
369 qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
370 qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
371 return 0;
372 }
373