xref: /openbmc/qemu/accel/tcg/plugin-gen.c (revision c85cad81)
1 /*
2  * plugin-gen.c - TCG-related bits of plugin infrastructure
3  *
4  * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5  * License: GNU GPL, version 2 or later.
6  *   See the COPYING file in the top-level directory.
7  *
8  * We support instrumentation at an instruction granularity. That is,
9  * if a plugin wants to instrument the memory accesses performed by a
10  * particular instruction, it can just do that instead of instrumenting
11  * all memory accesses. Thus, in order to do this we first have to
12  * translate a TB, so that plugins can decide what/where to instrument.
13  *
14  * Injecting the desired instrumentation could be done with a second
15  * translation pass that combined the instrumentation requests, but that
16  * would be ugly and inefficient since we would decode the guest code twice.
17  * Instead, during TB translation we add "empty" instrumentation calls for all
18  * possible instrumentation events, and then once we collect the instrumentation
19  * requests from plugins, we either "fill in" those empty events or remove them
20  * if they have no requests.
21  *
22  * When "filling in" an event we first copy the empty callback's TCG ops. This
23  * might seem unnecessary, but it is done to support an arbitrary number
24  * of callbacks per event. Take for example a regular instruction callback.
25  * We first generate a callback to an empty helper function. Then, if two
26  * plugins register one callback each for this instruction, we make two copies
27  * of the TCG ops generated for the empty callback, substituting the function
28  * pointer that points to the empty helper function with the plugins' desired
29  * callback functions. After that we remove the empty callback's ops.
30  *
31  * Note that the location in TCGOp.args[] of the pointer to a helper function
32  * varies across different guest and host architectures. Instead of duplicating
33  * the logic that figures this out, we rely on the fact that the empty
34  * callbacks point to empty functions that are unique pointers in the program.
35  * Thus, to find the right location we just have to look for a match in
36  * TCGOp.args[]. This is the main reason why we first copy an empty callback's
37  * TCG ops and then fill them in; regardless of whether we have one or many
38  * callbacks for that event, the logic to add all of them is the same.
39  *
40  * When generating more than one callback per event, we make a small
41  * optimization to avoid generating redundant operations. For instance, for the
42  * second and all subsequent callbacks of an event, we do not need to reload the
43  * CPU's index into a TCG temp, since the first callback did it already.
44  */
45 #include "qemu/osdep.h"
46 #include "cpu.h"
47 #include "tcg/tcg.h"
48 #include "tcg/tcg-temp-internal.h"
49 #include "tcg/tcg-op.h"
50 #include "exec/exec-all.h"
51 #include "exec/plugin-gen.h"
52 #include "exec/translator.h"
53 #include "exec/helper-proto-common.h"
54 
55 #define HELPER_H  "accel/tcg/plugin-helpers.h"
56 #include "exec/helper-info.c.inc"
57 #undef  HELPER_H
58 
59 #ifdef CONFIG_SOFTMMU
60 # define CONFIG_SOFTMMU_GATE 1
61 #else
62 # define CONFIG_SOFTMMU_GATE 0
63 #endif
64 
65 /*
66  * plugin_cb_start TCG op args[]:
67  * 0: enum plugin_gen_from
68  * 1: enum plugin_gen_cb
69  * 2: set to 1 for mem callback that is a write, 0 otherwise.
70  */
71 
72 enum plugin_gen_from {
73     PLUGIN_GEN_FROM_TB,
74     PLUGIN_GEN_FROM_INSN,
75     PLUGIN_GEN_FROM_MEM,
76     PLUGIN_GEN_AFTER_INSN,
77     PLUGIN_GEN_N_FROMS,
78 };
79 
80 enum plugin_gen_cb {
81     PLUGIN_GEN_CB_UDATA,
82     PLUGIN_GEN_CB_INLINE,
83     PLUGIN_GEN_CB_MEM,
84     PLUGIN_GEN_ENABLE_MEM_HELPER,
85     PLUGIN_GEN_DISABLE_MEM_HELPER,
86     PLUGIN_GEN_N_CBS,
87 };
88 
89 /*
90  * These helpers are stubs that get dynamically switched out for calls
91  * direct to the plugin if they are subscribed to.
92  */
93 void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata)
94 { }
95 
96 void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
97                                 qemu_plugin_meminfo_t info, uint64_t vaddr,
98                                 void *userdata)
99 { }
100 
101 static void gen_empty_udata_cb(void)
102 {
103     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
104     TCGv_ptr udata = tcg_temp_ebb_new_ptr();
105 
106     tcg_gen_movi_ptr(udata, 0);
107     tcg_gen_ld_i32(cpu_index, cpu_env,
108                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
109     gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
110 
111     tcg_temp_free_ptr(udata);
112     tcg_temp_free_i32(cpu_index);
113 }
114 
115 /*
116  * For now we only support addi_i64.
117  * When we support more ops, we can generate one empty inline cb for each.
118  */
119 static void gen_empty_inline_cb(void)
120 {
121     TCGv_i64 val = tcg_temp_ebb_new_i64();
122     TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
123 
124     tcg_gen_movi_ptr(ptr, 0);
125     tcg_gen_ld_i64(val, ptr, 0);
126     /* pass an immediate != 0 so that it doesn't get optimized away */
127     tcg_gen_addi_i64(val, val, 0xdeadface);
128     tcg_gen_st_i64(val, ptr, 0);
129     tcg_temp_free_ptr(ptr);
130     tcg_temp_free_i64(val);
131 }
132 
133 static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
134 {
135     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
136     TCGv_i32 meminfo = tcg_temp_ebb_new_i32();
137     TCGv_ptr udata = tcg_temp_ebb_new_ptr();
138 
139     tcg_gen_movi_i32(meminfo, info);
140     tcg_gen_movi_ptr(udata, 0);
141     tcg_gen_ld_i32(cpu_index, cpu_env,
142                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
143 
144     gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata);
145 
146     tcg_temp_free_ptr(udata);
147     tcg_temp_free_i32(meminfo);
148     tcg_temp_free_i32(cpu_index);
149 }
150 
151 /*
152  * Share the same function for enable/disable. When enabling, the NULL
153  * pointer will be overwritten later.
154  */
155 static void gen_empty_mem_helper(void)
156 {
157     TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
158 
159     tcg_gen_movi_ptr(ptr, 0);
160     tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
161                                  offsetof(ArchCPU, env));
162     tcg_temp_free_ptr(ptr);
163 }
164 
165 static void gen_plugin_cb_start(enum plugin_gen_from from,
166                                 enum plugin_gen_cb type, unsigned wr)
167 {
168     tcg_gen_plugin_cb_start(from, type, wr);
169 }
170 
171 static void gen_wrapped(enum plugin_gen_from from,
172                         enum plugin_gen_cb type, void (*func)(void))
173 {
174     gen_plugin_cb_start(from, type, 0);
175     func();
176     tcg_gen_plugin_cb_end();
177 }
178 
179 static void plugin_gen_empty_callback(enum plugin_gen_from from)
180 {
181     switch (from) {
182     case PLUGIN_GEN_AFTER_INSN:
183         gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER,
184                     gen_empty_mem_helper);
185         break;
186     case PLUGIN_GEN_FROM_INSN:
187         /*
188          * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
189          * the first callback of an instruction
190          */
191         gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER,
192                     gen_empty_mem_helper);
193         /* fall through */
194     case PLUGIN_GEN_FROM_TB:
195         gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb);
196         gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
197         break;
198     default:
199         g_assert_not_reached();
200     }
201 }
202 
203 void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info)
204 {
205     enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
206 
207     gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_MEM, rw);
208     gen_empty_mem_cb(addr, info);
209     tcg_gen_plugin_cb_end();
210 
211     gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_INLINE, rw);
212     gen_empty_inline_cb();
213     tcg_gen_plugin_cb_end();
214 }
215 
216 static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
217 {
218     while (op) {
219         if (op->opc == opc) {
220             return op;
221         }
222         op = QTAILQ_NEXT(op, link);
223     }
224     return NULL;
225 }
226 
227 static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end)
228 {
229     TCGOp *ret = QTAILQ_NEXT(end, link);
230 
231     QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link);
232     return ret;
233 }
234 
235 /* remove all ops until (and including) plugin_cb_end */
236 static TCGOp *rm_ops(TCGOp *op)
237 {
238     TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end);
239 
240     tcg_debug_assert(end_op);
241     return rm_ops_range(op, end_op);
242 }
243 
244 static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op)
245 {
246     TCGOp *old_op = QTAILQ_NEXT(*begin_op, link);
247     unsigned nargs = old_op->nargs;
248 
249     *begin_op = old_op;
250     op = tcg_op_insert_after(tcg_ctx, op, old_op->opc, nargs);
251     memcpy(op->args, old_op->args, sizeof(op->args[0]) * nargs);
252 
253     return op;
254 }
255 
256 static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
257 {
258     op = copy_op_nocheck(begin_op, op);
259     tcg_debug_assert((*begin_op)->opc == opc);
260     return op;
261 }
262 
263 static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
264 {
265     if (UINTPTR_MAX == UINT32_MAX) {
266         /* mov_i32 */
267         op = copy_op(begin_op, op, INDEX_op_mov_i32);
268         op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr));
269     } else {
270         /* mov_i64 */
271         op = copy_op(begin_op, op, INDEX_op_mov_i64);
272         op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr));
273     }
274     return op;
275 }
276 
277 static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
278 {
279     if (TCG_TARGET_REG_BITS == 32) {
280         /* 2x ld_i32 */
281         op = copy_op(begin_op, op, INDEX_op_ld_i32);
282         op = copy_op(begin_op, op, INDEX_op_ld_i32);
283     } else {
284         /* ld_i64 */
285         op = copy_op(begin_op, op, INDEX_op_ld_i64);
286     }
287     return op;
288 }
289 
290 static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op)
291 {
292     if (TCG_TARGET_REG_BITS == 32) {
293         /* 2x st_i32 */
294         op = copy_op(begin_op, op, INDEX_op_st_i32);
295         op = copy_op(begin_op, op, INDEX_op_st_i32);
296     } else {
297         /* st_i64 */
298         op = copy_op(begin_op, op, INDEX_op_st_i64);
299     }
300     return op;
301 }
302 
303 static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
304 {
305     if (TCG_TARGET_REG_BITS == 32) {
306         /* all 32-bit backends must implement add2_i32 */
307         g_assert(TCG_TARGET_HAS_add2_i32);
308         op = copy_op(begin_op, op, INDEX_op_add2_i32);
309         op->args[4] = tcgv_i32_arg(tcg_constant_i32(v));
310         op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
311     } else {
312         op = copy_op(begin_op, op, INDEX_op_add_i64);
313         op->args[2] = tcgv_i64_arg(tcg_constant_i64(v));
314     }
315     return op;
316 }
317 
318 static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
319 {
320     if (UINTPTR_MAX == UINT32_MAX) {
321         /* st_i32 */
322         op = copy_op(begin_op, op, INDEX_op_st_i32);
323     } else {
324         /* st_i64 */
325         op = copy_st_i64(begin_op, op);
326     }
327     return op;
328 }
329 
330 static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
331                         void *func, int *cb_idx)
332 {
333     TCGOp *old_op;
334     int func_idx;
335 
336     /* copy all ops until the call */
337     do {
338         op = copy_op_nocheck(begin_op, op);
339     } while (op->opc != INDEX_op_call);
340 
341     /* fill in the op call */
342     old_op = *begin_op;
343     TCGOP_CALLI(op) = TCGOP_CALLI(old_op);
344     TCGOP_CALLO(op) = TCGOP_CALLO(old_op);
345     tcg_debug_assert(op->life == 0);
346 
347     func_idx = TCGOP_CALLO(op) + TCGOP_CALLI(op);
348     *cb_idx = func_idx;
349     op->args[func_idx] = (uintptr_t)func;
350 
351     return op;
352 }
353 
354 /*
355  * When we append/replace ops here we are sensitive to changing patterns of
356  * TCGOps generated by the tcg_gen_FOO calls when we generated the
357  * empty callbacks. This will assert very quickly in a debug build as
358  * we assert the ops we are replacing are the correct ones.
359  */
360 static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
361                               TCGOp *begin_op, TCGOp *op, int *cb_idx)
362 {
363     /* const_ptr */
364     op = copy_const_ptr(&begin_op, op, cb->userp);
365 
366     /* copy the ld_i32, but note that we only have to copy it once */
367     if (*cb_idx == -1) {
368         op = copy_op(&begin_op, op, INDEX_op_ld_i32);
369     } else {
370         begin_op = QTAILQ_NEXT(begin_op, link);
371         tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
372     }
373 
374     /* call */
375     op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb),
376                    cb->f.vcpu_udata, cb_idx);
377 
378     return op;
379 }
380 
381 static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
382                                TCGOp *begin_op, TCGOp *op,
383                                int *unused)
384 {
385     /* const_ptr */
386     op = copy_const_ptr(&begin_op, op, cb->userp);
387 
388     /* ld_i64 */
389     op = copy_ld_i64(&begin_op, op);
390 
391     /* add_i64 */
392     op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
393 
394     /* st_i64 */
395     op = copy_st_i64(&begin_op, op);
396 
397     return op;
398 }
399 
400 static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
401                             TCGOp *begin_op, TCGOp *op, int *cb_idx)
402 {
403     enum plugin_gen_cb type = begin_op->args[1];
404 
405     tcg_debug_assert(type == PLUGIN_GEN_CB_MEM);
406 
407     /* const_i32 == mov_i32 ("info", so it remains as is) */
408     op = copy_op(&begin_op, op, INDEX_op_mov_i32);
409 
410     /* const_ptr */
411     op = copy_const_ptr(&begin_op, op, cb->userp);
412 
413     /* copy the ld_i32, but note that we only have to copy it once */
414     if (*cb_idx == -1) {
415         op = copy_op(&begin_op, op, INDEX_op_ld_i32);
416     } else {
417         begin_op = QTAILQ_NEXT(begin_op, link);
418         tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
419     }
420 
421     if (type == PLUGIN_GEN_CB_MEM) {
422         /* call */
423         op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb),
424                        cb->f.vcpu_udata, cb_idx);
425     }
426 
427     return op;
428 }
429 
430 typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb,
431                             TCGOp *begin_op, TCGOp *op, int *intp);
432 typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb);
433 
434 static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
435 {
436     return true;
437 }
438 
439 static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
440 {
441     int w;
442 
443     w = op->args[2];
444     return !!(cb->rw & (w + 1));
445 }
446 
447 static void inject_cb_type(const GArray *cbs, TCGOp *begin_op,
448                            inject_fn inject, op_ok_fn ok)
449 {
450     TCGOp *end_op;
451     TCGOp *op;
452     int cb_idx = -1;
453     int i;
454 
455     if (!cbs || cbs->len == 0) {
456         rm_ops(begin_op);
457         return;
458     }
459 
460     end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
461     tcg_debug_assert(end_op);
462 
463     op = end_op;
464     for (i = 0; i < cbs->len; i++) {
465         struct qemu_plugin_dyn_cb *cb =
466             &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
467 
468         if (!ok(begin_op, cb)) {
469             continue;
470         }
471         op = inject(cb, begin_op, op, &cb_idx);
472     }
473     rm_ops_range(begin_op, end_op);
474 }
475 
476 static void
477 inject_udata_cb(const GArray *cbs, TCGOp *begin_op)
478 {
479     inject_cb_type(cbs, begin_op, append_udata_cb, op_ok);
480 }
481 
482 static void
483 inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok)
484 {
485     inject_cb_type(cbs, begin_op, append_inline_cb, ok);
486 }
487 
488 static void
489 inject_mem_cb(const GArray *cbs, TCGOp *begin_op)
490 {
491     inject_cb_type(cbs, begin_op, append_mem_cb, op_rw);
492 }
493 
494 /* we could change the ops in place, but we can reuse more code by copying */
495 static void inject_mem_helper(TCGOp *begin_op, GArray *arr)
496 {
497     TCGOp *orig_op = begin_op;
498     TCGOp *end_op;
499     TCGOp *op;
500 
501     end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
502     tcg_debug_assert(end_op);
503 
504     /* const ptr */
505     op = copy_const_ptr(&begin_op, end_op, arr);
506 
507     /* st_ptr */
508     op = copy_st_ptr(&begin_op, op);
509 
510     rm_ops_range(orig_op, end_op);
511 }
512 
513 /*
514  * Tracking memory accesses performed from helpers requires extra work.
515  * If an instruction is emulated with helpers, we do two things:
516  * (1) copy the CB descriptors, and keep track of it so that they can be
517  * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so
518  * that we can read them at run-time (i.e. when the helper executes).
519  * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
520  *
521  * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
522  * is possible that the code we generate after the instruction is
523  * dead, we also add checks before generating tb_exit etc.
524  */
525 static void inject_mem_enable_helper(struct qemu_plugin_tb *ptb,
526                                      struct qemu_plugin_insn *plugin_insn,
527                                      TCGOp *begin_op)
528 {
529     GArray *cbs[2];
530     GArray *arr;
531     size_t n_cbs, i;
532 
533     cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
534     cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
535 
536     n_cbs = 0;
537     for (i = 0; i < ARRAY_SIZE(cbs); i++) {
538         n_cbs += cbs[i]->len;
539     }
540 
541     plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs;
542     if (likely(!plugin_insn->mem_helper)) {
543         rm_ops(begin_op);
544         return;
545     }
546     ptb->mem_helper = true;
547 
548     arr = g_array_sized_new(false, false,
549                             sizeof(struct qemu_plugin_dyn_cb), n_cbs);
550 
551     for (i = 0; i < ARRAY_SIZE(cbs); i++) {
552         g_array_append_vals(arr, cbs[i]->data, cbs[i]->len);
553     }
554 
555     qemu_plugin_add_dyn_cb_arr(arr);
556     inject_mem_helper(begin_op, arr);
557 }
558 
559 static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
560                                       TCGOp *begin_op)
561 {
562     if (likely(!plugin_insn->mem_helper)) {
563         rm_ops(begin_op);
564         return;
565     }
566     inject_mem_helper(begin_op, NULL);
567 }
568 
569 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
570 void plugin_gen_disable_mem_helpers(void)
571 {
572     /*
573      * We could emit the clearing unconditionally and be done. However, this can
574      * be wasteful if for instance plugins don't track memory accesses, or if
575      * most TBs don't use helpers. Instead, emit the clearing iff the TB calls
576      * helpers that might access guest memory.
577      *
578      * Note: we do not reset plugin_tb->mem_helper here; a TB might have several
579      * exit points, and we want to emit the clearing from all of them.
580      */
581     if (!tcg_ctx->plugin_tb->mem_helper) {
582         return;
583     }
584     tcg_gen_st_ptr(tcg_constant_ptr(NULL), cpu_env,
585                    offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env));
586 }
587 
588 static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
589                                 TCGOp *begin_op)
590 {
591     inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op);
592 }
593 
594 static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb,
595                                  TCGOp *begin_op)
596 {
597     inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok);
598 }
599 
600 static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
601                                   TCGOp *begin_op, int insn_idx)
602 {
603     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
604 
605     inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
606 }
607 
608 static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
609                                    TCGOp *begin_op, int insn_idx)
610 {
611     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
612     inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
613                      begin_op, op_ok);
614 }
615 
616 static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb,
617                                    TCGOp *begin_op, int insn_idx)
618 {
619     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
620     inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op);
621 }
622 
623 static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb,
624                                   TCGOp *begin_op, int insn_idx)
625 {
626     const GArray *cbs;
627     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
628 
629     cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
630     inject_inline_cb(cbs, begin_op, op_rw);
631 }
632 
633 static void plugin_gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
634                                          TCGOp *begin_op, int insn_idx)
635 {
636     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
637     inject_mem_enable_helper(ptb, insn, begin_op);
638 }
639 
640 static void plugin_gen_disable_mem_helper(struct qemu_plugin_tb *ptb,
641                                           TCGOp *begin_op, int insn_idx)
642 {
643     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
644     inject_mem_disable_helper(insn, begin_op);
645 }
646 
647 /* #define DEBUG_PLUGIN_GEN_OPS */
648 static void pr_ops(void)
649 {
650 #ifdef DEBUG_PLUGIN_GEN_OPS
651     TCGOp *op;
652     int i = 0;
653 
654     QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
655         const char *name = "";
656         const char *type = "";
657 
658         if (op->opc == INDEX_op_plugin_cb_start) {
659             switch (op->args[0]) {
660             case PLUGIN_GEN_FROM_TB:
661                 name = "tb";
662                 break;
663             case PLUGIN_GEN_FROM_INSN:
664                 name = "insn";
665                 break;
666             case PLUGIN_GEN_FROM_MEM:
667                 name = "mem";
668                 break;
669             case PLUGIN_GEN_AFTER_INSN:
670                 name = "after insn";
671                 break;
672             default:
673                 break;
674             }
675             switch (op->args[1]) {
676             case PLUGIN_GEN_CB_UDATA:
677                 type = "udata";
678                 break;
679             case PLUGIN_GEN_CB_INLINE:
680                 type = "inline";
681                 break;
682             case PLUGIN_GEN_CB_MEM:
683                 type = "mem";
684                 break;
685             case PLUGIN_GEN_ENABLE_MEM_HELPER:
686                 type = "enable mem helper";
687                 break;
688             case PLUGIN_GEN_DISABLE_MEM_HELPER:
689                 type = "disable mem helper";
690                 break;
691             default:
692                 break;
693             }
694         }
695         printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type);
696         i++;
697     }
698 #endif
699 }
700 
701 static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
702 {
703     TCGOp *op;
704     int insn_idx = -1;
705 
706     pr_ops();
707 
708     QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
709         switch (op->opc) {
710         case INDEX_op_insn_start:
711             insn_idx++;
712             break;
713         case INDEX_op_plugin_cb_start:
714         {
715             enum plugin_gen_from from = op->args[0];
716             enum plugin_gen_cb type = op->args[1];
717 
718             switch (from) {
719             case PLUGIN_GEN_FROM_TB:
720             {
721                 g_assert(insn_idx == -1);
722 
723                 switch (type) {
724                 case PLUGIN_GEN_CB_UDATA:
725                     plugin_gen_tb_udata(plugin_tb, op);
726                     break;
727                 case PLUGIN_GEN_CB_INLINE:
728                     plugin_gen_tb_inline(plugin_tb, op);
729                     break;
730                 default:
731                     g_assert_not_reached();
732                 }
733                 break;
734             }
735             case PLUGIN_GEN_FROM_INSN:
736             {
737                 g_assert(insn_idx >= 0);
738 
739                 switch (type) {
740                 case PLUGIN_GEN_CB_UDATA:
741                     plugin_gen_insn_udata(plugin_tb, op, insn_idx);
742                     break;
743                 case PLUGIN_GEN_CB_INLINE:
744                     plugin_gen_insn_inline(plugin_tb, op, insn_idx);
745                     break;
746                 case PLUGIN_GEN_ENABLE_MEM_HELPER:
747                     plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx);
748                     break;
749                 default:
750                     g_assert_not_reached();
751                 }
752                 break;
753             }
754             case PLUGIN_GEN_FROM_MEM:
755             {
756                 g_assert(insn_idx >= 0);
757 
758                 switch (type) {
759                 case PLUGIN_GEN_CB_MEM:
760                     plugin_gen_mem_regular(plugin_tb, op, insn_idx);
761                     break;
762                 case PLUGIN_GEN_CB_INLINE:
763                     plugin_gen_mem_inline(plugin_tb, op, insn_idx);
764                     break;
765                 default:
766                     g_assert_not_reached();
767                 }
768 
769                 break;
770             }
771             case PLUGIN_GEN_AFTER_INSN:
772             {
773                 g_assert(insn_idx >= 0);
774 
775                 switch (type) {
776                 case PLUGIN_GEN_DISABLE_MEM_HELPER:
777                     plugin_gen_disable_mem_helper(plugin_tb, op, insn_idx);
778                     break;
779                 default:
780                     g_assert_not_reached();
781                 }
782                 break;
783             }
784             default:
785                 g_assert_not_reached();
786             }
787             break;
788         }
789         default:
790             /* plugins don't care about any other ops */
791             break;
792         }
793     }
794     pr_ops();
795 }
796 
797 bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
798                          bool mem_only)
799 {
800     bool ret = false;
801 
802     if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) {
803         struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
804         int i;
805 
806         /* reset callbacks */
807         for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) {
808             if (ptb->cbs[i]) {
809                 g_array_set_size(ptb->cbs[i], 0);
810             }
811         }
812         ptb->n = 0;
813 
814         ret = true;
815 
816         ptb->vaddr = db->pc_first;
817         ptb->vaddr2 = -1;
818         ptb->haddr1 = db->host_addr[0];
819         ptb->haddr2 = NULL;
820         ptb->mem_only = mem_only;
821         ptb->mem_helper = false;
822 
823         plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB);
824     }
825 
826     tcg_ctx->plugin_insn = NULL;
827 
828     return ret;
829 }
830 
831 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
832 {
833     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
834     struct qemu_plugin_insn *pinsn;
835 
836     pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next);
837     tcg_ctx->plugin_insn = pinsn;
838     plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
839 
840     /*
841      * Detect page crossing to get the new host address.
842      * Note that we skip this when haddr1 == NULL, e.g. when we're
843      * fetching instructions from a region not backed by RAM.
844      */
845     if (ptb->haddr1 == NULL) {
846         pinsn->haddr = NULL;
847     } else if (is_same_page(db, db->pc_next)) {
848         pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
849     } else {
850         if (ptb->vaddr2 == -1) {
851             ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
852             get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2);
853         }
854         pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
855     }
856 }
857 
858 void plugin_gen_insn_end(void)
859 {
860     plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN);
861 }
862 
863 /*
864  * There are cases where we never get to finalise a translation - for
865  * example a page fault during translation. As a result we shouldn't
866  * do any clean-up here and make sure things are reset in
867  * plugin_gen_tb_start.
868  */
869 void plugin_gen_tb_end(CPUState *cpu)
870 {
871     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
872 
873     /* collect instrumentation requests */
874     qemu_plugin_tb_trans_cb(cpu, ptb);
875 
876     /* inject the instrumentation at the appropriate places */
877     plugin_gen_inject(ptb);
878 }
879