1 /*
2 * plugin-gen.c - TCG-related bits of plugin infrastructure
3 *
4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5 * License: GNU GPL, version 2 or later.
6 * See the COPYING file in the top-level directory.
7 *
8 * We support instrumentation at an instruction granularity. That is,
9 * if a plugin wants to instrument the memory accesses performed by a
10 * particular instruction, it can just do that instead of instrumenting
11 * all memory accesses. Thus, in order to do this we first have to
12 * translate a TB, so that plugins can decide what/where to instrument.
13 *
14 * Injecting the desired instrumentation could be done with a second
15 * translation pass that combined the instrumentation requests, but that
16 * would be ugly and inefficient since we would decode the guest code twice.
17 * Instead, during TB translation we add "plugin_cb" marker opcodes
18 * for all possible instrumentation events, and then once we collect the
19 * instrumentation requests from plugins, we generate code for those markers
20 * or remove them if they have no requests.
21 */
22 #include "qemu/osdep.h"
23 #include "qemu/plugin.h"
24 #include "qemu/log.h"
25 #include "cpu.h"
26 #include "tcg/tcg.h"
27 #include "tcg/tcg-temp-internal.h"
28 #include "tcg/tcg-op.h"
29 #include "exec/exec-all.h"
30 #include "exec/plugin-gen.h"
31 #include "exec/translator.h"
32
33 enum plugin_gen_from {
34 PLUGIN_GEN_FROM_TB,
35 PLUGIN_GEN_FROM_INSN,
36 PLUGIN_GEN_AFTER_INSN,
37 PLUGIN_GEN_AFTER_TB,
38 };
39
40 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
plugin_gen_disable_mem_helpers(void)41 void plugin_gen_disable_mem_helpers(void)
42 {
43 if (tcg_ctx->plugin_insn) {
44 tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_TB);
45 }
46 }
47
gen_enable_mem_helper(struct qemu_plugin_tb * ptb,struct qemu_plugin_insn * insn)48 static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
49 struct qemu_plugin_insn *insn)
50 {
51 GArray *arr;
52 size_t len;
53
54 /*
55 * Tracking memory accesses performed from helpers requires extra work.
56 * If an instruction is emulated with helpers, we do two things:
57 * (1) copy the CB descriptors, and keep track of it so that they can be
58 * freed later on, and (2) point CPUState.neg.plugin_mem_cbs to the
59 * descriptors, so that we can read them at run-time
60 * (i.e. when the helper executes).
61 * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
62 *
63 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
64 * is possible that the code we generate after the instruction is
65 * dead, we also add checks before generating tb_exit etc.
66 */
67 if (!insn->calls_helpers) {
68 return;
69 }
70
71 if (!insn->mem_cbs || !insn->mem_cbs->len) {
72 insn->mem_helper = false;
73 return;
74 }
75 insn->mem_helper = true;
76 ptb->mem_helper = true;
77
78 /*
79 * TODO: It seems like we should be able to use ref/unref
80 * to avoid needing to actually copy this array.
81 * Alternately, perhaps we could allocate new memory adjacent
82 * to the TranslationBlock itself, so that we do not have to
83 * actively manage the lifetime after this.
84 */
85 len = insn->mem_cbs->len;
86 arr = g_array_sized_new(false, false,
87 sizeof(struct qemu_plugin_dyn_cb), len);
88 g_array_append_vals(arr, insn->mem_cbs->data, len);
89 qemu_plugin_add_dyn_cb_arr(arr);
90
91 tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env,
92 offsetof(CPUState, neg.plugin_mem_cbs) -
93 offsetof(ArchCPU, env));
94 }
95
gen_disable_mem_helper(void)96 static void gen_disable_mem_helper(void)
97 {
98 tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env,
99 offsetof(CPUState, neg.plugin_mem_cbs) -
100 offsetof(ArchCPU, env));
101 }
102
gen_cpu_index(void)103 static TCGv_i32 gen_cpu_index(void)
104 {
105 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
106 tcg_gen_ld_i32(cpu_index, tcg_env,
107 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
108 return cpu_index;
109 }
110
gen_udata_cb(struct qemu_plugin_regular_cb * cb)111 static void gen_udata_cb(struct qemu_plugin_regular_cb *cb)
112 {
113 TCGv_i32 cpu_index = gen_cpu_index();
114 tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL,
115 tcgv_i32_temp(cpu_index),
116 tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
117 tcg_temp_free_i32(cpu_index);
118 }
119
gen_plugin_u64_ptr(qemu_plugin_u64 entry)120 static TCGv_ptr gen_plugin_u64_ptr(qemu_plugin_u64 entry)
121 {
122 TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
123
124 GArray *arr = entry.score->data;
125 char *base_ptr = arr->data + entry.offset;
126 size_t entry_size = g_array_get_element_size(arr);
127
128 TCGv_i32 cpu_index = gen_cpu_index();
129 tcg_gen_muli_i32(cpu_index, cpu_index, entry_size);
130 tcg_gen_ext_i32_ptr(ptr, cpu_index);
131 tcg_temp_free_i32(cpu_index);
132 tcg_gen_addi_ptr(ptr, ptr, (intptr_t) base_ptr);
133
134 return ptr;
135 }
136
plugin_cond_to_tcgcond(enum qemu_plugin_cond cond)137 static TCGCond plugin_cond_to_tcgcond(enum qemu_plugin_cond cond)
138 {
139 switch (cond) {
140 case QEMU_PLUGIN_COND_EQ:
141 return TCG_COND_EQ;
142 case QEMU_PLUGIN_COND_NE:
143 return TCG_COND_NE;
144 case QEMU_PLUGIN_COND_LT:
145 return TCG_COND_LTU;
146 case QEMU_PLUGIN_COND_LE:
147 return TCG_COND_LEU;
148 case QEMU_PLUGIN_COND_GT:
149 return TCG_COND_GTU;
150 case QEMU_PLUGIN_COND_GE:
151 return TCG_COND_GEU;
152 default:
153 /* ALWAYS and NEVER conditions should never reach */
154 g_assert_not_reached();
155 }
156 }
157
gen_udata_cond_cb(struct qemu_plugin_conditional_cb * cb)158 static void gen_udata_cond_cb(struct qemu_plugin_conditional_cb *cb)
159 {
160 TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry);
161 TCGv_i64 val = tcg_temp_ebb_new_i64();
162 TCGLabel *after_cb = gen_new_label();
163
164 /* Condition should be negated, as calling the cb is the "else" path */
165 TCGCond cond = tcg_invert_cond(plugin_cond_to_tcgcond(cb->cond));
166
167 tcg_gen_ld_i64(val, ptr, 0);
168 tcg_gen_brcondi_i64(cond, val, cb->imm, after_cb);
169 TCGv_i32 cpu_index = gen_cpu_index();
170 tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL,
171 tcgv_i32_temp(cpu_index),
172 tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
173 tcg_temp_free_i32(cpu_index);
174 gen_set_label(after_cb);
175
176 tcg_temp_free_i64(val);
177 tcg_temp_free_ptr(ptr);
178 }
179
gen_inline_add_u64_cb(struct qemu_plugin_inline_cb * cb)180 static void gen_inline_add_u64_cb(struct qemu_plugin_inline_cb *cb)
181 {
182 TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry);
183 TCGv_i64 val = tcg_temp_ebb_new_i64();
184
185 tcg_gen_ld_i64(val, ptr, 0);
186 tcg_gen_addi_i64(val, val, cb->imm);
187 tcg_gen_st_i64(val, ptr, 0);
188
189 tcg_temp_free_i64(val);
190 tcg_temp_free_ptr(ptr);
191 }
192
gen_inline_store_u64_cb(struct qemu_plugin_inline_cb * cb)193 static void gen_inline_store_u64_cb(struct qemu_plugin_inline_cb *cb)
194 {
195 TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry);
196 TCGv_i64 val = tcg_constant_i64(cb->imm);
197
198 tcg_gen_st_i64(val, ptr, 0);
199
200 tcg_temp_free_ptr(ptr);
201 }
202
gen_mem_cb(struct qemu_plugin_regular_cb * cb,qemu_plugin_meminfo_t meminfo,TCGv_i64 addr)203 static void gen_mem_cb(struct qemu_plugin_regular_cb *cb,
204 qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
205 {
206 TCGv_i32 cpu_index = gen_cpu_index();
207 tcg_gen_call4(cb->f.vcpu_mem, cb->info, NULL,
208 tcgv_i32_temp(cpu_index),
209 tcgv_i32_temp(tcg_constant_i32(meminfo)),
210 tcgv_i64_temp(addr),
211 tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
212 tcg_temp_free_i32(cpu_index);
213 }
214
inject_cb(struct qemu_plugin_dyn_cb * cb)215 static void inject_cb(struct qemu_plugin_dyn_cb *cb)
216
217 {
218 switch (cb->type) {
219 case PLUGIN_CB_REGULAR:
220 gen_udata_cb(&cb->regular);
221 break;
222 case PLUGIN_CB_COND:
223 gen_udata_cond_cb(&cb->cond);
224 break;
225 case PLUGIN_CB_INLINE_ADD_U64:
226 gen_inline_add_u64_cb(&cb->inline_insn);
227 break;
228 case PLUGIN_CB_INLINE_STORE_U64:
229 gen_inline_store_u64_cb(&cb->inline_insn);
230 break;
231 default:
232 g_assert_not_reached();
233 }
234 }
235
inject_mem_cb(struct qemu_plugin_dyn_cb * cb,enum qemu_plugin_mem_rw rw,qemu_plugin_meminfo_t meminfo,TCGv_i64 addr)236 static void inject_mem_cb(struct qemu_plugin_dyn_cb *cb,
237 enum qemu_plugin_mem_rw rw,
238 qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
239 {
240 switch (cb->type) {
241 case PLUGIN_CB_MEM_REGULAR:
242 if (rw & cb->regular.rw) {
243 gen_mem_cb(&cb->regular, meminfo, addr);
244 }
245 break;
246 case PLUGIN_CB_INLINE_ADD_U64:
247 case PLUGIN_CB_INLINE_STORE_U64:
248 if (rw & cb->inline_insn.rw) {
249 inject_cb(cb);
250 }
251 break;
252 default:
253 g_assert_not_reached();
254 break;
255 }
256 }
257
plugin_gen_inject(struct qemu_plugin_tb * plugin_tb)258 static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
259 {
260 TCGOp *op, *next;
261 int insn_idx = -1;
262
263 if (unlikely(qemu_loglevel_mask(LOG_TB_OP_PLUGIN)
264 && qemu_log_in_addr_range(tcg_ctx->plugin_db->pc_first))) {
265 FILE *logfile = qemu_log_trylock();
266 if (logfile) {
267 fprintf(logfile, "OP before plugin injection:\n");
268 tcg_dump_ops(tcg_ctx, logfile, false);
269 fprintf(logfile, "\n");
270 qemu_log_unlock(logfile);
271 }
272 }
273
274 /*
275 * While injecting code, we cannot afford to reuse any ebb temps
276 * that might be live within the existing opcode stream.
277 * The simplest solution is to release them all and create new.
278 */
279 memset(tcg_ctx->free_temps, 0, sizeof(tcg_ctx->free_temps));
280
281 QTAILQ_FOREACH_SAFE(op, &tcg_ctx->ops, link, next) {
282 switch (op->opc) {
283 case INDEX_op_insn_start:
284 insn_idx++;
285 break;
286
287 case INDEX_op_plugin_cb:
288 {
289 enum plugin_gen_from from = op->args[0];
290 struct qemu_plugin_insn *insn = NULL;
291 const GArray *cbs;
292 int i, n;
293
294 if (insn_idx >= 0) {
295 insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
296 }
297
298 tcg_ctx->emit_before_op = op;
299
300 switch (from) {
301 case PLUGIN_GEN_AFTER_TB:
302 if (plugin_tb->mem_helper) {
303 gen_disable_mem_helper();
304 }
305 break;
306
307 case PLUGIN_GEN_AFTER_INSN:
308 assert(insn != NULL);
309 if (insn->mem_helper) {
310 gen_disable_mem_helper();
311 }
312 break;
313
314 case PLUGIN_GEN_FROM_TB:
315 assert(insn == NULL);
316
317 cbs = plugin_tb->cbs;
318 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
319 inject_cb(
320 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i));
321 }
322 break;
323
324 case PLUGIN_GEN_FROM_INSN:
325 assert(insn != NULL);
326
327 gen_enable_mem_helper(plugin_tb, insn);
328
329 cbs = insn->insn_cbs;
330 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
331 inject_cb(
332 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i));
333 }
334 break;
335
336 default:
337 g_assert_not_reached();
338 }
339
340 tcg_ctx->emit_before_op = NULL;
341 tcg_op_remove(tcg_ctx, op);
342 break;
343 }
344
345 case INDEX_op_plugin_mem_cb:
346 {
347 TCGv_i64 addr = temp_tcgv_i64(arg_temp(op->args[0]));
348 qemu_plugin_meminfo_t meminfo = op->args[1];
349 enum qemu_plugin_mem_rw rw =
350 (qemu_plugin_mem_is_store(meminfo)
351 ? QEMU_PLUGIN_MEM_W : QEMU_PLUGIN_MEM_R);
352 struct qemu_plugin_insn *insn;
353 const GArray *cbs;
354 int i, n;
355
356 assert(insn_idx >= 0);
357 insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
358
359 tcg_ctx->emit_before_op = op;
360
361 cbs = insn->mem_cbs;
362 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
363 inject_mem_cb(&g_array_index(cbs, struct qemu_plugin_dyn_cb, i),
364 rw, meminfo, addr);
365 }
366
367 tcg_ctx->emit_before_op = NULL;
368 tcg_op_remove(tcg_ctx, op);
369 break;
370 }
371
372 default:
373 /* plugins don't care about any other ops */
374 break;
375 }
376 }
377 }
378
plugin_gen_tb_start(CPUState * cpu,const DisasContextBase * db)379 bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db)
380 {
381 struct qemu_plugin_tb *ptb;
382
383 if (!test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS,
384 cpu->plugin_state->event_mask)) {
385 return false;
386 }
387
388 tcg_ctx->plugin_db = db;
389 tcg_ctx->plugin_insn = NULL;
390 ptb = tcg_ctx->plugin_tb;
391
392 if (ptb) {
393 /* Reset callbacks */
394 if (ptb->cbs) {
395 g_array_set_size(ptb->cbs, 0);
396 }
397 ptb->n = 0;
398 ptb->mem_helper = false;
399 } else {
400 ptb = g_new0(struct qemu_plugin_tb, 1);
401 tcg_ctx->plugin_tb = ptb;
402 ptb->insns = g_ptr_array_new();
403 }
404
405 tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB);
406 return true;
407 }
408
plugin_gen_insn_start(CPUState * cpu,const DisasContextBase * db)409 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
410 {
411 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
412 struct qemu_plugin_insn *insn;
413 size_t n = db->num_insns;
414 vaddr pc;
415
416 assert(n >= 1);
417 ptb->n = n;
418 if (n <= ptb->insns->len) {
419 insn = g_ptr_array_index(ptb->insns, n - 1);
420 } else {
421 assert(n - 1 == ptb->insns->len);
422 insn = g_new0(struct qemu_plugin_insn, 1);
423 g_ptr_array_add(ptb->insns, insn);
424 }
425
426 tcg_ctx->plugin_insn = insn;
427 insn->calls_helpers = false;
428 insn->mem_helper = false;
429 if (insn->insn_cbs) {
430 g_array_set_size(insn->insn_cbs, 0);
431 }
432 if (insn->mem_cbs) {
433 g_array_set_size(insn->mem_cbs, 0);
434 }
435
436 pc = db->pc_next;
437 insn->vaddr = pc;
438
439 tcg_gen_plugin_cb(PLUGIN_GEN_FROM_INSN);
440 }
441
plugin_gen_insn_end(void)442 void plugin_gen_insn_end(void)
443 {
444 const DisasContextBase *db = tcg_ctx->plugin_db;
445 struct qemu_plugin_insn *pinsn = tcg_ctx->plugin_insn;
446
447 pinsn->len = db->fake_insn ? db->record_len : db->pc_next - pinsn->vaddr;
448
449 tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_INSN);
450 }
451
452 /*
453 * There are cases where we never get to finalise a translation - for
454 * example a page fault during translation. As a result we shouldn't
455 * do any clean-up here and make sure things are reset in
456 * plugin_gen_tb_start.
457 */
plugin_gen_tb_end(CPUState * cpu,size_t num_insns)458 void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
459 {
460 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
461
462 /* translator may have removed instructions, update final count */
463 g_assert(num_insns <= ptb->n);
464 ptb->n = num_insns;
465
466 /* collect instrumentation requests */
467 qemu_plugin_tb_trans_cb(cpu, ptb);
468
469 /* inject the instrumentation at the appropriate places */
470 plugin_gen_inject(ptb);
471 }
472