1 /*
2 * plugin-gen.c - TCG-related bits of plugin infrastructure
3 *
4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5 * License: GNU GPL, version 2 or later.
6 * See the COPYING file in the top-level directory.
7 *
8 * We support instrumentation at an instruction granularity. That is,
9 * if a plugin wants to instrument the memory accesses performed by a
10 * particular instruction, it can just do that instead of instrumenting
11 * all memory accesses. Thus, in order to do this we first have to
12 * translate a TB, so that plugins can decide what/where to instrument.
13 *
14 * Injecting the desired instrumentation could be done with a second
15 * translation pass that combined the instrumentation requests, but that
16 * would be ugly and inefficient since we would decode the guest code twice.
17 * Instead, during TB translation we add "plugin_cb" marker opcodes
18 * for all possible instrumentation events, and then once we collect the
19 * instrumentation requests from plugins, we generate code for those markers
20 * or remove them if they have no requests.
21 */
22 #include "qemu/osdep.h"
23 #include "qemu/plugin.h"
24 #include "qemu/log.h"
25 #include "tcg/tcg.h"
26 #include "tcg/tcg-temp-internal.h"
27 #include "tcg/tcg-op-common.h"
28 #include "exec/plugin-gen.h"
29 #include "exec/translator.h"
30 #include "exec/translation-block.h"
31
32 enum plugin_gen_from {
33 PLUGIN_GEN_FROM_TB,
34 PLUGIN_GEN_FROM_INSN,
35 PLUGIN_GEN_AFTER_INSN,
36 PLUGIN_GEN_AFTER_TB,
37 };
38
39 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
plugin_gen_disable_mem_helpers(void)40 void plugin_gen_disable_mem_helpers(void)
41 {
42 if (tcg_ctx->plugin_insn) {
43 tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_TB);
44 }
45 }
46
gen_enable_mem_helper(struct qemu_plugin_tb * ptb,struct qemu_plugin_insn * insn)47 static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
48 struct qemu_plugin_insn *insn)
49 {
50 GArray *arr;
51 size_t len;
52
53 /*
54 * Tracking memory accesses performed from helpers requires extra work.
55 * If an instruction is emulated with helpers, we do two things:
56 * (1) copy the CB descriptors, and keep track of it so that they can be
57 * freed later on, and (2) point CPUState.neg.plugin_mem_cbs to the
58 * descriptors, so that we can read them at run-time
59 * (i.e. when the helper executes).
60 * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
61 *
62 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
63 * is possible that the code we generate after the instruction is
64 * dead, we also add checks before generating tb_exit etc.
65 */
66 if (!insn->calls_helpers) {
67 return;
68 }
69
70 if (!insn->mem_cbs || !insn->mem_cbs->len) {
71 insn->mem_helper = false;
72 return;
73 }
74 insn->mem_helper = true;
75 ptb->mem_helper = true;
76
77 /*
78 * TODO: It seems like we should be able to use ref/unref
79 * to avoid needing to actually copy this array.
80 * Alternately, perhaps we could allocate new memory adjacent
81 * to the TranslationBlock itself, so that we do not have to
82 * actively manage the lifetime after this.
83 */
84 len = insn->mem_cbs->len;
85 arr = g_array_sized_new(false, false,
86 sizeof(struct qemu_plugin_dyn_cb), len);
87 g_array_append_vals(arr, insn->mem_cbs->data, len);
88 qemu_plugin_add_dyn_cb_arr(arr);
89
90 tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env,
91 offsetof(CPUState, neg.plugin_mem_cbs) - sizeof(CPUState));
92 }
93
gen_disable_mem_helper(void)94 static void gen_disable_mem_helper(void)
95 {
96 tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env,
97 offsetof(CPUState, neg.plugin_mem_cbs) - sizeof(CPUState));
98 }
99
gen_cpu_index(void)100 static TCGv_i32 gen_cpu_index(void)
101 {
102 /*
103 * Optimize when we run with a single vcpu. All values using cpu_index,
104 * including scoreboard index, will be optimized out.
105 * User-mode calls tb_flush when setting this flag. In system-mode, all
106 * vcpus are created before generating code.
107 */
108 if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
109 return tcg_constant_i32(current_cpu->cpu_index);
110 }
111 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
112 tcg_gen_ld_i32(cpu_index, tcg_env,
113 offsetof(CPUState, cpu_index) - sizeof(CPUState));
114 return cpu_index;
115 }
116
gen_udata_cb(struct qemu_plugin_regular_cb * cb)117 static void gen_udata_cb(struct qemu_plugin_regular_cb *cb)
118 {
119 TCGv_i32 cpu_index = gen_cpu_index();
120 enum qemu_plugin_cb_flags cb_flags =
121 tcg_call_to_qemu_plugin_cb_flags(cb->info->flags);
122 TCGv_i32 flags = tcg_constant_i32(cb_flags);
123 TCGv_i32 clear_flags = tcg_constant_i32(QEMU_PLUGIN_CB_NO_REGS);
124 tcg_gen_st_i32(flags, tcg_env,
125 offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState));
126 tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL,
127 tcgv_i32_temp(cpu_index),
128 tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
129 tcg_gen_st_i32(clear_flags, tcg_env,
130 offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState));
131 tcg_temp_free_i32(cpu_index);
132 tcg_temp_free_i32(flags);
133 tcg_temp_free_i32(clear_flags);
134 }
135
gen_plugin_u64_ptr(qemu_plugin_u64 entry)136 static TCGv_ptr gen_plugin_u64_ptr(qemu_plugin_u64 entry)
137 {
138 TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
139
140 GArray *arr = entry.score->data;
141 char *base_ptr = arr->data + entry.offset;
142 size_t entry_size = g_array_get_element_size(arr);
143
144 TCGv_i32 cpu_index = gen_cpu_index();
145 tcg_gen_muli_i32(cpu_index, cpu_index, entry_size);
146 tcg_gen_ext_i32_ptr(ptr, cpu_index);
147 tcg_temp_free_i32(cpu_index);
148 tcg_gen_addi_ptr(ptr, ptr, (intptr_t) base_ptr);
149
150 return ptr;
151 }
152
plugin_cond_to_tcgcond(enum qemu_plugin_cond cond)153 static TCGCond plugin_cond_to_tcgcond(enum qemu_plugin_cond cond)
154 {
155 switch (cond) {
156 case QEMU_PLUGIN_COND_EQ:
157 return TCG_COND_EQ;
158 case QEMU_PLUGIN_COND_NE:
159 return TCG_COND_NE;
160 case QEMU_PLUGIN_COND_LT:
161 return TCG_COND_LTU;
162 case QEMU_PLUGIN_COND_LE:
163 return TCG_COND_LEU;
164 case QEMU_PLUGIN_COND_GT:
165 return TCG_COND_GTU;
166 case QEMU_PLUGIN_COND_GE:
167 return TCG_COND_GEU;
168 default:
169 /* ALWAYS and NEVER conditions should never reach */
170 g_assert_not_reached();
171 }
172 }
173
gen_udata_cond_cb(struct qemu_plugin_conditional_cb * cb)174 static void gen_udata_cond_cb(struct qemu_plugin_conditional_cb *cb)
175 {
176 TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry);
177 TCGv_i64 val = tcg_temp_ebb_new_i64();
178 TCGLabel *after_cb = gen_new_label();
179
180 /* Condition should be negated, as calling the cb is the "else" path */
181 TCGCond cond = tcg_invert_cond(plugin_cond_to_tcgcond(cb->cond));
182
183 tcg_gen_ld_i64(val, ptr, 0);
184 tcg_gen_brcondi_i64(cond, val, cb->imm, after_cb);
185 TCGv_i32 cpu_index = gen_cpu_index();
186 enum qemu_plugin_cb_flags cb_flags =
187 tcg_call_to_qemu_plugin_cb_flags(cb->info->flags);
188 TCGv_i32 flags = tcg_constant_i32(cb_flags);
189 TCGv_i32 clear_flags = tcg_constant_i32(QEMU_PLUGIN_CB_NO_REGS);
190 tcg_gen_st_i32(flags, tcg_env,
191 offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState));
192 tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL,
193 tcgv_i32_temp(cpu_index),
194 tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
195 tcg_gen_st_i32(clear_flags, tcg_env,
196 offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState));
197 tcg_temp_free_i32(cpu_index);
198 tcg_temp_free_i32(flags);
199 tcg_temp_free_i32(clear_flags);
200 gen_set_label(after_cb);
201
202 tcg_temp_free_i64(val);
203 tcg_temp_free_ptr(ptr);
204 }
205
gen_inline_add_u64_cb(struct qemu_plugin_inline_cb * cb)206 static void gen_inline_add_u64_cb(struct qemu_plugin_inline_cb *cb)
207 {
208 TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry);
209 TCGv_i64 val = tcg_temp_ebb_new_i64();
210
211 tcg_gen_ld_i64(val, ptr, 0);
212 tcg_gen_addi_i64(val, val, cb->imm);
213 tcg_gen_st_i64(val, ptr, 0);
214
215 tcg_temp_free_i64(val);
216 tcg_temp_free_ptr(ptr);
217 }
218
gen_inline_store_u64_cb(struct qemu_plugin_inline_cb * cb)219 static void gen_inline_store_u64_cb(struct qemu_plugin_inline_cb *cb)
220 {
221 TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry);
222 TCGv_i64 val = tcg_constant_i64(cb->imm);
223
224 tcg_gen_st_i64(val, ptr, 0);
225
226 tcg_temp_free_ptr(ptr);
227 }
228
gen_mem_cb(struct qemu_plugin_regular_cb * cb,qemu_plugin_meminfo_t meminfo,TCGv_i64 addr)229 static void gen_mem_cb(struct qemu_plugin_regular_cb *cb,
230 qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
231 {
232 TCGv_i32 cpu_index = gen_cpu_index();
233 enum qemu_plugin_cb_flags cb_flags =
234 tcg_call_to_qemu_plugin_cb_flags(cb->info->flags);
235 TCGv_i32 flags = tcg_constant_i32(cb_flags);
236 TCGv_i32 clear_flags = tcg_constant_i32(QEMU_PLUGIN_CB_NO_REGS);
237 tcg_gen_st_i32(flags, tcg_env,
238 offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState));
239 tcg_gen_call4(cb->f.vcpu_mem, cb->info, NULL,
240 tcgv_i32_temp(cpu_index),
241 tcgv_i32_temp(tcg_constant_i32(meminfo)),
242 tcgv_i64_temp(addr),
243 tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
244 tcg_gen_st_i32(clear_flags, tcg_env,
245 offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState));
246 tcg_temp_free_i32(cpu_index);
247 tcg_temp_free_i32(flags);
248 tcg_temp_free_i32(clear_flags);
249 }
250
inject_cb(struct qemu_plugin_dyn_cb * cb)251 static void inject_cb(struct qemu_plugin_dyn_cb *cb)
252
253 {
254 switch (cb->type) {
255 case PLUGIN_CB_REGULAR:
256 gen_udata_cb(&cb->regular);
257 break;
258 case PLUGIN_CB_COND:
259 gen_udata_cond_cb(&cb->cond);
260 break;
261 case PLUGIN_CB_INLINE_ADD_U64:
262 gen_inline_add_u64_cb(&cb->inline_insn);
263 break;
264 case PLUGIN_CB_INLINE_STORE_U64:
265 gen_inline_store_u64_cb(&cb->inline_insn);
266 break;
267 default:
268 g_assert_not_reached();
269 }
270 }
271
inject_mem_cb(struct qemu_plugin_dyn_cb * cb,enum qemu_plugin_mem_rw rw,qemu_plugin_meminfo_t meminfo,TCGv_i64 addr)272 static void inject_mem_cb(struct qemu_plugin_dyn_cb *cb,
273 enum qemu_plugin_mem_rw rw,
274 qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
275 {
276 switch (cb->type) {
277 case PLUGIN_CB_MEM_REGULAR:
278 if (rw & cb->regular.rw) {
279 gen_mem_cb(&cb->regular, meminfo, addr);
280 }
281 break;
282 case PLUGIN_CB_INLINE_ADD_U64:
283 case PLUGIN_CB_INLINE_STORE_U64:
284 if (rw & cb->inline_insn.rw) {
285 inject_cb(cb);
286 }
287 break;
288 default:
289 g_assert_not_reached();
290 }
291 }
292
plugin_gen_inject(struct qemu_plugin_tb * plugin_tb)293 static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
294 {
295 TCGOp *op, *next;
296 int insn_idx = -1;
297
298 if (unlikely(qemu_loglevel_mask(LOG_TB_OP_PLUGIN)
299 && qemu_log_in_addr_range(tcg_ctx->plugin_db->pc_first))) {
300 FILE *logfile = qemu_log_trylock();
301 if (logfile) {
302 fprintf(logfile, "OP before plugin injection:\n");
303 tcg_dump_ops(tcg_ctx, logfile, false);
304 fprintf(logfile, "\n");
305 qemu_log_unlock(logfile);
306 }
307 }
308
309 /*
310 * While injecting code, we cannot afford to reuse any ebb temps
311 * that might be live within the existing opcode stream.
312 * The simplest solution is to release them all and create new.
313 */
314 tcg_temp_ebb_reset_freed(tcg_ctx);
315
316 QTAILQ_FOREACH_SAFE(op, &tcg_ctx->ops, link, next) {
317 switch (op->opc) {
318 case INDEX_op_insn_start:
319 insn_idx++;
320 break;
321
322 case INDEX_op_plugin_cb:
323 {
324 enum plugin_gen_from from = op->args[0];
325 struct qemu_plugin_insn *insn = NULL;
326 const GArray *cbs;
327 int i, n;
328
329 if (insn_idx >= 0) {
330 insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
331 }
332
333 tcg_ctx->emit_before_op = op;
334
335 switch (from) {
336 case PLUGIN_GEN_AFTER_TB:
337 if (plugin_tb->mem_helper) {
338 gen_disable_mem_helper();
339 }
340 break;
341
342 case PLUGIN_GEN_AFTER_INSN:
343 assert(insn != NULL);
344 if (insn->mem_helper) {
345 gen_disable_mem_helper();
346 }
347 break;
348
349 case PLUGIN_GEN_FROM_TB:
350 assert(insn == NULL);
351
352 cbs = plugin_tb->cbs;
353 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
354 inject_cb(
355 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i));
356 }
357 break;
358
359 case PLUGIN_GEN_FROM_INSN:
360 assert(insn != NULL);
361
362 gen_enable_mem_helper(plugin_tb, insn);
363
364 cbs = insn->insn_cbs;
365 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
366 inject_cb(
367 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i));
368 }
369 break;
370
371 default:
372 g_assert_not_reached();
373 }
374
375 tcg_ctx->emit_before_op = NULL;
376 tcg_op_remove(tcg_ctx, op);
377 break;
378 }
379
380 case INDEX_op_plugin_mem_cb:
381 {
382 TCGv_i64 addr = temp_tcgv_i64(arg_temp(op->args[0]));
383 qemu_plugin_meminfo_t meminfo = op->args[1];
384 enum qemu_plugin_mem_rw rw =
385 (qemu_plugin_mem_is_store(meminfo)
386 ? QEMU_PLUGIN_MEM_W : QEMU_PLUGIN_MEM_R);
387 struct qemu_plugin_insn *insn;
388 const GArray *cbs;
389 int i, n;
390
391 assert(insn_idx >= 0);
392 insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
393
394 tcg_ctx->emit_before_op = op;
395
396 cbs = insn->mem_cbs;
397 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
398 inject_mem_cb(&g_array_index(cbs, struct qemu_plugin_dyn_cb, i),
399 rw, meminfo, addr);
400 }
401
402 tcg_ctx->emit_before_op = NULL;
403 tcg_op_remove(tcg_ctx, op);
404 break;
405 }
406
407 default:
408 /* plugins don't care about any other ops */
409 break;
410 }
411 }
412 }
413
plugin_gen_tb_start(CPUState * cpu,const DisasContextBase * db)414 bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db)
415 {
416 struct qemu_plugin_tb *ptb;
417
418 if (!test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS,
419 cpu->plugin_state->event_mask)) {
420 return false;
421 }
422
423 tcg_ctx->plugin_db = db;
424 tcg_ctx->plugin_insn = NULL;
425 ptb = tcg_ctx->plugin_tb;
426
427 if (ptb) {
428 /* Reset callbacks */
429 if (ptb->cbs) {
430 g_array_set_size(ptb->cbs, 0);
431 }
432 ptb->n = 0;
433 ptb->mem_helper = false;
434 } else {
435 ptb = g_new0(struct qemu_plugin_tb, 1);
436 tcg_ctx->plugin_tb = ptb;
437 ptb->insns = g_ptr_array_new();
438 }
439
440 tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB);
441 return true;
442 }
443
plugin_gen_insn_start(CPUState * cpu,const DisasContextBase * db)444 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
445 {
446 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
447 struct qemu_plugin_insn *insn;
448 size_t n = db->num_insns;
449 vaddr pc;
450
451 assert(n >= 1);
452 ptb->n = n;
453 if (n <= ptb->insns->len) {
454 insn = g_ptr_array_index(ptb->insns, n - 1);
455 } else {
456 assert(n - 1 == ptb->insns->len);
457 insn = g_new0(struct qemu_plugin_insn, 1);
458 g_ptr_array_add(ptb->insns, insn);
459 }
460
461 tcg_ctx->plugin_insn = insn;
462 insn->calls_helpers = false;
463 insn->mem_helper = false;
464 if (insn->insn_cbs) {
465 g_array_set_size(insn->insn_cbs, 0);
466 }
467 if (insn->mem_cbs) {
468 g_array_set_size(insn->mem_cbs, 0);
469 }
470
471 pc = db->pc_next;
472 insn->vaddr = pc;
473
474 tcg_gen_plugin_cb(PLUGIN_GEN_FROM_INSN);
475 }
476
plugin_gen_insn_end(void)477 void plugin_gen_insn_end(void)
478 {
479 const DisasContextBase *db = tcg_ctx->plugin_db;
480 struct qemu_plugin_insn *pinsn = tcg_ctx->plugin_insn;
481
482 pinsn->len = db->fake_insn ? db->record_len : db->pc_next - pinsn->vaddr;
483
484 tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_INSN);
485 }
486
487 /*
488 * There are cases where we never get to finalise a translation - for
489 * example a page fault during translation. As a result we shouldn't
490 * do any clean-up here and make sure things are reset in
491 * plugin_gen_tb_start.
492 */
plugin_gen_tb_end(CPUState * cpu,size_t num_insns)493 void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
494 {
495 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
496
497 /* translator may have removed instructions, update final count */
498 g_assert(num_insns <= ptb->n);
499 ptb->n = num_insns;
500
501 /* collect instrumentation requests */
502 qemu_plugin_tb_trans_cb(cpu, ptb);
503
504 /* inject the instrumentation at the appropriate places */
505 plugin_gen_inject(ptb);
506
507 /* reset plugin translation state (plugin_tb is reused between blocks) */
508 tcg_ctx->plugin_db = NULL;
509 tcg_ctx->plugin_insn = NULL;
510 }
511