xref: /openbmc/qemu/accel/tcg/plugin-gen.c (revision 6c1e3906)
1 /*
2  * plugin-gen.c - TCG-related bits of plugin infrastructure
3  *
4  * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5  * License: GNU GPL, version 2 or later.
6  *   See the COPYING file in the top-level directory.
7  *
8  * We support instrumentation at an instruction granularity. That is,
9  * if a plugin wants to instrument the memory accesses performed by a
10  * particular instruction, it can just do that instead of instrumenting
11  * all memory accesses. Thus, in order to do this we first have to
12  * translate a TB, so that plugins can decide what/where to instrument.
13  *
14  * Injecting the desired instrumentation could be done with a second
15  * translation pass that combined the instrumentation requests, but that
16  * would be ugly and inefficient since we would decode the guest code twice.
17  * Instead, during TB translation we add "empty" instrumentation calls for all
18  * possible instrumentation events, and then once we collect the instrumentation
19  * requests from plugins, we either "fill in" those empty events or remove them
20  * if they have no requests.
21  *
22  * When "filling in" an event we first copy the empty callback's TCG ops. This
23  * might seem unnecessary, but it is done to support an arbitrary number
24  * of callbacks per event. Take for example a regular instruction callback.
25  * We first generate a callback to an empty helper function. Then, if two
26  * plugins register one callback each for this instruction, we make two copies
27  * of the TCG ops generated for the empty callback, substituting the function
28  * pointer that points to the empty helper function with the plugins' desired
29  * callback functions. After that we remove the empty callback's ops.
30  *
31  * Note that the location in TCGOp.args[] of the pointer to a helper function
32  * varies across different guest and host architectures. Instead of duplicating
33  * the logic that figures this out, we rely on the fact that the empty
34  * callbacks point to empty functions that are unique pointers in the program.
35  * Thus, to find the right location we just have to look for a match in
36  * TCGOp.args[]. This is the main reason why we first copy an empty callback's
37  * TCG ops and then fill them in; regardless of whether we have one or many
38  * callbacks for that event, the logic to add all of them is the same.
39  *
40  * When generating more than one callback per event, we make a small
41  * optimization to avoid generating redundant operations. For instance, for the
42  * second and all subsequent callbacks of an event, we do not need to reload the
43  * CPU's index into a TCG temp, since the first callback did it already.
44  */
45 #include "qemu/osdep.h"
46 #include "tcg/tcg.h"
47 #include "tcg/tcg-temp-internal.h"
48 #include "tcg/tcg-op.h"
49 #include "exec/exec-all.h"
50 #include "exec/plugin-gen.h"
51 #include "exec/translator.h"
52 
53 #ifdef CONFIG_SOFTMMU
54 # define CONFIG_SOFTMMU_GATE 1
55 #else
56 # define CONFIG_SOFTMMU_GATE 0
57 #endif
58 
59 /*
60  * plugin_cb_start TCG op args[]:
61  * 0: enum plugin_gen_from
62  * 1: enum plugin_gen_cb
63  * 2: set to 1 for mem callback that is a write, 0 otherwise.
64  */
65 
66 enum plugin_gen_from {
67     PLUGIN_GEN_FROM_TB,
68     PLUGIN_GEN_FROM_INSN,
69     PLUGIN_GEN_FROM_MEM,
70     PLUGIN_GEN_AFTER_INSN,
71     PLUGIN_GEN_N_FROMS,
72 };
73 
74 enum plugin_gen_cb {
75     PLUGIN_GEN_CB_UDATA,
76     PLUGIN_GEN_CB_INLINE,
77     PLUGIN_GEN_CB_MEM,
78     PLUGIN_GEN_ENABLE_MEM_HELPER,
79     PLUGIN_GEN_DISABLE_MEM_HELPER,
80     PLUGIN_GEN_N_CBS,
81 };
82 
83 /*
84  * These helpers are stubs that get dynamically switched out for calls
85  * direct to the plugin if they are subscribed to.
86  */
87 void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata)
88 { }
89 
90 void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
91                                 qemu_plugin_meminfo_t info, uint64_t vaddr,
92                                 void *userdata)
93 { }
94 
95 static void gen_empty_udata_cb(void)
96 {
97     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
98     TCGv_ptr udata = tcg_temp_ebb_new_ptr();
99 
100     tcg_gen_movi_ptr(udata, 0);
101     tcg_gen_ld_i32(cpu_index, cpu_env,
102                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
103     gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
104 
105     tcg_temp_free_ptr(udata);
106     tcg_temp_free_i32(cpu_index);
107 }
108 
109 /*
110  * For now we only support addi_i64.
111  * When we support more ops, we can generate one empty inline cb for each.
112  */
113 static void gen_empty_inline_cb(void)
114 {
115     TCGv_i64 val = tcg_temp_ebb_new_i64();
116     TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
117 
118     tcg_gen_movi_ptr(ptr, 0);
119     tcg_gen_ld_i64(val, ptr, 0);
120     /* pass an immediate != 0 so that it doesn't get optimized away */
121     tcg_gen_addi_i64(val, val, 0xdeadface);
122     tcg_gen_st_i64(val, ptr, 0);
123     tcg_temp_free_ptr(ptr);
124     tcg_temp_free_i64(val);
125 }
126 
127 static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
128 {
129     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
130     TCGv_i32 meminfo = tcg_temp_ebb_new_i32();
131     TCGv_ptr udata = tcg_temp_ebb_new_ptr();
132 
133     tcg_gen_movi_i32(meminfo, info);
134     tcg_gen_movi_ptr(udata, 0);
135     tcg_gen_ld_i32(cpu_index, cpu_env,
136                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
137 
138     gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata);
139 
140     tcg_temp_free_ptr(udata);
141     tcg_temp_free_i32(meminfo);
142     tcg_temp_free_i32(cpu_index);
143 }
144 
145 /*
146  * Share the same function for enable/disable. When enabling, the NULL
147  * pointer will be overwritten later.
148  */
149 static void gen_empty_mem_helper(void)
150 {
151     TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
152 
153     tcg_gen_movi_ptr(ptr, 0);
154     tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
155                                  offsetof(ArchCPU, env));
156     tcg_temp_free_ptr(ptr);
157 }
158 
159 static void gen_plugin_cb_start(enum plugin_gen_from from,
160                                 enum plugin_gen_cb type, unsigned wr)
161 {
162     tcg_gen_plugin_cb_start(from, type, wr);
163 }
164 
165 static void gen_wrapped(enum plugin_gen_from from,
166                         enum plugin_gen_cb type, void (*func)(void))
167 {
168     gen_plugin_cb_start(from, type, 0);
169     func();
170     tcg_gen_plugin_cb_end();
171 }
172 
173 static void plugin_gen_empty_callback(enum plugin_gen_from from)
174 {
175     switch (from) {
176     case PLUGIN_GEN_AFTER_INSN:
177         gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER,
178                     gen_empty_mem_helper);
179         break;
180     case PLUGIN_GEN_FROM_INSN:
181         /*
182          * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
183          * the first callback of an instruction
184          */
185         gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER,
186                     gen_empty_mem_helper);
187         /* fall through */
188     case PLUGIN_GEN_FROM_TB:
189         gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb);
190         gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
191         break;
192     default:
193         g_assert_not_reached();
194     }
195 }
196 
197 void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info)
198 {
199     enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
200 
201     gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_MEM, rw);
202     gen_empty_mem_cb(addr, info);
203     tcg_gen_plugin_cb_end();
204 
205     gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_INLINE, rw);
206     gen_empty_inline_cb();
207     tcg_gen_plugin_cb_end();
208 }
209 
210 static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
211 {
212     while (op) {
213         if (op->opc == opc) {
214             return op;
215         }
216         op = QTAILQ_NEXT(op, link);
217     }
218     return NULL;
219 }
220 
221 static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end)
222 {
223     TCGOp *ret = QTAILQ_NEXT(end, link);
224 
225     QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link);
226     return ret;
227 }
228 
229 /* remove all ops until (and including) plugin_cb_end */
230 static TCGOp *rm_ops(TCGOp *op)
231 {
232     TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end);
233 
234     tcg_debug_assert(end_op);
235     return rm_ops_range(op, end_op);
236 }
237 
238 static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op)
239 {
240     TCGOp *old_op = QTAILQ_NEXT(*begin_op, link);
241     unsigned nargs = old_op->nargs;
242 
243     *begin_op = old_op;
244     op = tcg_op_insert_after(tcg_ctx, op, old_op->opc, nargs);
245     memcpy(op->args, old_op->args, sizeof(op->args[0]) * nargs);
246 
247     return op;
248 }
249 
250 static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
251 {
252     op = copy_op_nocheck(begin_op, op);
253     tcg_debug_assert((*begin_op)->opc == opc);
254     return op;
255 }
256 
257 static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op)
258 {
259     if (TCG_TARGET_REG_BITS == 32) {
260         /* mov_i32 */
261         op = copy_op(begin_op, op, INDEX_op_mov_i32);
262         /* mov_i32 w/ $0 */
263         op = copy_op(begin_op, op, INDEX_op_mov_i32);
264     } else {
265         /* extu_i32_i64 */
266         op = copy_op(begin_op, op, INDEX_op_extu_i32_i64);
267     }
268     return op;
269 }
270 
271 static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op)
272 {
273     if (TCG_TARGET_REG_BITS == 32) {
274         /* 2x mov_i32 */
275         op = copy_op(begin_op, op, INDEX_op_mov_i32);
276         op = copy_op(begin_op, op, INDEX_op_mov_i32);
277     } else {
278         /* mov_i64 */
279         op = copy_op(begin_op, op, INDEX_op_mov_i64);
280     }
281     return op;
282 }
283 
284 static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
285 {
286     if (UINTPTR_MAX == UINT32_MAX) {
287         /* mov_i32 */
288         op = copy_op(begin_op, op, INDEX_op_mov_i32);
289         op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr));
290     } else {
291         /* mov_i64 */
292         op = copy_op(begin_op, op, INDEX_op_mov_i64);
293         op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr));
294     }
295     return op;
296 }
297 
298 static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op)
299 {
300     if (TARGET_LONG_BITS == 32) {
301         /* extu_i32_i64 */
302         op = copy_extu_i32_i64(begin_op, op);
303     } else {
304         /* mov_i64 */
305         op = copy_mov_i64(begin_op, op);
306     }
307     return op;
308 }
309 
310 static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
311 {
312     if (TCG_TARGET_REG_BITS == 32) {
313         /* 2x ld_i32 */
314         op = copy_op(begin_op, op, INDEX_op_ld_i32);
315         op = copy_op(begin_op, op, INDEX_op_ld_i32);
316     } else {
317         /* ld_i64 */
318         op = copy_op(begin_op, op, INDEX_op_ld_i64);
319     }
320     return op;
321 }
322 
323 static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op)
324 {
325     if (TCG_TARGET_REG_BITS == 32) {
326         /* 2x st_i32 */
327         op = copy_op(begin_op, op, INDEX_op_st_i32);
328         op = copy_op(begin_op, op, INDEX_op_st_i32);
329     } else {
330         /* st_i64 */
331         op = copy_op(begin_op, op, INDEX_op_st_i64);
332     }
333     return op;
334 }
335 
336 static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
337 {
338     if (TCG_TARGET_REG_BITS == 32) {
339         /* all 32-bit backends must implement add2_i32 */
340         g_assert(TCG_TARGET_HAS_add2_i32);
341         op = copy_op(begin_op, op, INDEX_op_add2_i32);
342         op->args[4] = tcgv_i32_arg(tcg_constant_i32(v));
343         op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
344     } else {
345         op = copy_op(begin_op, op, INDEX_op_add_i64);
346         op->args[2] = tcgv_i64_arg(tcg_constant_i64(v));
347     }
348     return op;
349 }
350 
351 static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
352 {
353     if (UINTPTR_MAX == UINT32_MAX) {
354         /* st_i32 */
355         op = copy_op(begin_op, op, INDEX_op_st_i32);
356     } else {
357         /* st_i64 */
358         op = copy_st_i64(begin_op, op);
359     }
360     return op;
361 }
362 
363 static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
364                         void *func, int *cb_idx)
365 {
366     TCGOp *old_op;
367     int func_idx;
368 
369     /* copy all ops until the call */
370     do {
371         op = copy_op_nocheck(begin_op, op);
372     } while (op->opc != INDEX_op_call);
373 
374     /* fill in the op call */
375     old_op = *begin_op;
376     TCGOP_CALLI(op) = TCGOP_CALLI(old_op);
377     TCGOP_CALLO(op) = TCGOP_CALLO(old_op);
378     tcg_debug_assert(op->life == 0);
379 
380     func_idx = TCGOP_CALLO(op) + TCGOP_CALLI(op);
381     *cb_idx = func_idx;
382     op->args[func_idx] = (uintptr_t)func;
383 
384     return op;
385 }
386 
387 /*
388  * When we append/replace ops here we are sensitive to changing patterns of
389  * TCGOps generated by the tcg_gen_FOO calls when we generated the
390  * empty callbacks. This will assert very quickly in a debug build as
391  * we assert the ops we are replacing are the correct ones.
392  */
393 static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
394                               TCGOp *begin_op, TCGOp *op, int *cb_idx)
395 {
396     /* const_ptr */
397     op = copy_const_ptr(&begin_op, op, cb->userp);
398 
399     /* copy the ld_i32, but note that we only have to copy it once */
400     if (*cb_idx == -1) {
401         op = copy_op(&begin_op, op, INDEX_op_ld_i32);
402     } else {
403         begin_op = QTAILQ_NEXT(begin_op, link);
404         tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
405     }
406 
407     /* call */
408     op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb),
409                    cb->f.vcpu_udata, cb_idx);
410 
411     return op;
412 }
413 
414 static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
415                                TCGOp *begin_op, TCGOp *op,
416                                int *unused)
417 {
418     /* const_ptr */
419     op = copy_const_ptr(&begin_op, op, cb->userp);
420 
421     /* ld_i64 */
422     op = copy_ld_i64(&begin_op, op);
423 
424     /* add_i64 */
425     op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
426 
427     /* st_i64 */
428     op = copy_st_i64(&begin_op, op);
429 
430     return op;
431 }
432 
433 static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
434                             TCGOp *begin_op, TCGOp *op, int *cb_idx)
435 {
436     enum plugin_gen_cb type = begin_op->args[1];
437 
438     tcg_debug_assert(type == PLUGIN_GEN_CB_MEM);
439 
440     /* const_i32 == mov_i32 ("info", so it remains as is) */
441     op = copy_op(&begin_op, op, INDEX_op_mov_i32);
442 
443     /* const_ptr */
444     op = copy_const_ptr(&begin_op, op, cb->userp);
445 
446     /* copy the ld_i32, but note that we only have to copy it once */
447     if (*cb_idx == -1) {
448         op = copy_op(&begin_op, op, INDEX_op_ld_i32);
449     } else {
450         begin_op = QTAILQ_NEXT(begin_op, link);
451         tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
452     }
453 
454     /* extu_tl_i64 */
455     op = copy_extu_tl_i64(&begin_op, op);
456 
457     if (type == PLUGIN_GEN_CB_MEM) {
458         /* call */
459         op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb),
460                        cb->f.vcpu_udata, cb_idx);
461     }
462 
463     return op;
464 }
465 
466 typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb,
467                             TCGOp *begin_op, TCGOp *op, int *intp);
468 typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb);
469 
470 static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
471 {
472     return true;
473 }
474 
475 static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
476 {
477     int w;
478 
479     w = op->args[2];
480     return !!(cb->rw & (w + 1));
481 }
482 
483 static void inject_cb_type(const GArray *cbs, TCGOp *begin_op,
484                            inject_fn inject, op_ok_fn ok)
485 {
486     TCGOp *end_op;
487     TCGOp *op;
488     int cb_idx = -1;
489     int i;
490 
491     if (!cbs || cbs->len == 0) {
492         rm_ops(begin_op);
493         return;
494     }
495 
496     end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
497     tcg_debug_assert(end_op);
498 
499     op = end_op;
500     for (i = 0; i < cbs->len; i++) {
501         struct qemu_plugin_dyn_cb *cb =
502             &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
503 
504         if (!ok(begin_op, cb)) {
505             continue;
506         }
507         op = inject(cb, begin_op, op, &cb_idx);
508     }
509     rm_ops_range(begin_op, end_op);
510 }
511 
512 static void
513 inject_udata_cb(const GArray *cbs, TCGOp *begin_op)
514 {
515     inject_cb_type(cbs, begin_op, append_udata_cb, op_ok);
516 }
517 
518 static void
519 inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok)
520 {
521     inject_cb_type(cbs, begin_op, append_inline_cb, ok);
522 }
523 
524 static void
525 inject_mem_cb(const GArray *cbs, TCGOp *begin_op)
526 {
527     inject_cb_type(cbs, begin_op, append_mem_cb, op_rw);
528 }
529 
530 /* we could change the ops in place, but we can reuse more code by copying */
531 static void inject_mem_helper(TCGOp *begin_op, GArray *arr)
532 {
533     TCGOp *orig_op = begin_op;
534     TCGOp *end_op;
535     TCGOp *op;
536 
537     end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
538     tcg_debug_assert(end_op);
539 
540     /* const ptr */
541     op = copy_const_ptr(&begin_op, end_op, arr);
542 
543     /* st_ptr */
544     op = copy_st_ptr(&begin_op, op);
545 
546     rm_ops_range(orig_op, end_op);
547 }
548 
549 /*
550  * Tracking memory accesses performed from helpers requires extra work.
551  * If an instruction is emulated with helpers, we do two things:
552  * (1) copy the CB descriptors, and keep track of it so that they can be
553  * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so
554  * that we can read them at run-time (i.e. when the helper executes).
555  * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
556  *
557  * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
558  * is possible that the code we generate after the instruction is
559  * dead, we also add checks before generating tb_exit etc.
560  */
561 static void inject_mem_enable_helper(struct qemu_plugin_tb *ptb,
562                                      struct qemu_plugin_insn *plugin_insn,
563                                      TCGOp *begin_op)
564 {
565     GArray *cbs[2];
566     GArray *arr;
567     size_t n_cbs, i;
568 
569     cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
570     cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
571 
572     n_cbs = 0;
573     for (i = 0; i < ARRAY_SIZE(cbs); i++) {
574         n_cbs += cbs[i]->len;
575     }
576 
577     plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs;
578     if (likely(!plugin_insn->mem_helper)) {
579         rm_ops(begin_op);
580         return;
581     }
582     ptb->mem_helper = true;
583 
584     arr = g_array_sized_new(false, false,
585                             sizeof(struct qemu_plugin_dyn_cb), n_cbs);
586 
587     for (i = 0; i < ARRAY_SIZE(cbs); i++) {
588         g_array_append_vals(arr, cbs[i]->data, cbs[i]->len);
589     }
590 
591     qemu_plugin_add_dyn_cb_arr(arr);
592     inject_mem_helper(begin_op, arr);
593 }
594 
595 static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
596                                       TCGOp *begin_op)
597 {
598     if (likely(!plugin_insn->mem_helper)) {
599         rm_ops(begin_op);
600         return;
601     }
602     inject_mem_helper(begin_op, NULL);
603 }
604 
605 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
606 void plugin_gen_disable_mem_helpers(void)
607 {
608     /*
609      * We could emit the clearing unconditionally and be done. However, this can
610      * be wasteful if for instance plugins don't track memory accesses, or if
611      * most TBs don't use helpers. Instead, emit the clearing iff the TB calls
612      * helpers that might access guest memory.
613      *
614      * Note: we do not reset plugin_tb->mem_helper here; a TB might have several
615      * exit points, and we want to emit the clearing from all of them.
616      */
617     if (!tcg_ctx->plugin_tb->mem_helper) {
618         return;
619     }
620     tcg_gen_st_ptr(tcg_constant_ptr(NULL), cpu_env,
621                    offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env));
622 }
623 
624 static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
625                                 TCGOp *begin_op)
626 {
627     inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op);
628 }
629 
630 static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb,
631                                  TCGOp *begin_op)
632 {
633     inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok);
634 }
635 
636 static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
637                                   TCGOp *begin_op, int insn_idx)
638 {
639     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
640 
641     inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
642 }
643 
644 static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
645                                    TCGOp *begin_op, int insn_idx)
646 {
647     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
648     inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
649                      begin_op, op_ok);
650 }
651 
652 static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb,
653                                    TCGOp *begin_op, int insn_idx)
654 {
655     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
656     inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op);
657 }
658 
659 static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb,
660                                   TCGOp *begin_op, int insn_idx)
661 {
662     const GArray *cbs;
663     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
664 
665     cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
666     inject_inline_cb(cbs, begin_op, op_rw);
667 }
668 
669 static void plugin_gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
670                                          TCGOp *begin_op, int insn_idx)
671 {
672     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
673     inject_mem_enable_helper(ptb, insn, begin_op);
674 }
675 
676 static void plugin_gen_disable_mem_helper(struct qemu_plugin_tb *ptb,
677                                           TCGOp *begin_op, int insn_idx)
678 {
679     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
680     inject_mem_disable_helper(insn, begin_op);
681 }
682 
683 /* #define DEBUG_PLUGIN_GEN_OPS */
684 static void pr_ops(void)
685 {
686 #ifdef DEBUG_PLUGIN_GEN_OPS
687     TCGOp *op;
688     int i = 0;
689 
690     QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
691         const char *name = "";
692         const char *type = "";
693 
694         if (op->opc == INDEX_op_plugin_cb_start) {
695             switch (op->args[0]) {
696             case PLUGIN_GEN_FROM_TB:
697                 name = "tb";
698                 break;
699             case PLUGIN_GEN_FROM_INSN:
700                 name = "insn";
701                 break;
702             case PLUGIN_GEN_FROM_MEM:
703                 name = "mem";
704                 break;
705             case PLUGIN_GEN_AFTER_INSN:
706                 name = "after insn";
707                 break;
708             default:
709                 break;
710             }
711             switch (op->args[1]) {
712             case PLUGIN_GEN_CB_UDATA:
713                 type = "udata";
714                 break;
715             case PLUGIN_GEN_CB_INLINE:
716                 type = "inline";
717                 break;
718             case PLUGIN_GEN_CB_MEM:
719                 type = "mem";
720                 break;
721             case PLUGIN_GEN_ENABLE_MEM_HELPER:
722                 type = "enable mem helper";
723                 break;
724             case PLUGIN_GEN_DISABLE_MEM_HELPER:
725                 type = "disable mem helper";
726                 break;
727             default:
728                 break;
729             }
730         }
731         printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type);
732         i++;
733     }
734 #endif
735 }
736 
737 static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
738 {
739     TCGOp *op;
740     int insn_idx = -1;
741 
742     pr_ops();
743 
744     QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
745         switch (op->opc) {
746         case INDEX_op_insn_start:
747             insn_idx++;
748             break;
749         case INDEX_op_plugin_cb_start:
750         {
751             enum plugin_gen_from from = op->args[0];
752             enum plugin_gen_cb type = op->args[1];
753 
754             switch (from) {
755             case PLUGIN_GEN_FROM_TB:
756             {
757                 g_assert(insn_idx == -1);
758 
759                 switch (type) {
760                 case PLUGIN_GEN_CB_UDATA:
761                     plugin_gen_tb_udata(plugin_tb, op);
762                     break;
763                 case PLUGIN_GEN_CB_INLINE:
764                     plugin_gen_tb_inline(plugin_tb, op);
765                     break;
766                 default:
767                     g_assert_not_reached();
768                 }
769                 break;
770             }
771             case PLUGIN_GEN_FROM_INSN:
772             {
773                 g_assert(insn_idx >= 0);
774 
775                 switch (type) {
776                 case PLUGIN_GEN_CB_UDATA:
777                     plugin_gen_insn_udata(plugin_tb, op, insn_idx);
778                     break;
779                 case PLUGIN_GEN_CB_INLINE:
780                     plugin_gen_insn_inline(plugin_tb, op, insn_idx);
781                     break;
782                 case PLUGIN_GEN_ENABLE_MEM_HELPER:
783                     plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx);
784                     break;
785                 default:
786                     g_assert_not_reached();
787                 }
788                 break;
789             }
790             case PLUGIN_GEN_FROM_MEM:
791             {
792                 g_assert(insn_idx >= 0);
793 
794                 switch (type) {
795                 case PLUGIN_GEN_CB_MEM:
796                     plugin_gen_mem_regular(plugin_tb, op, insn_idx);
797                     break;
798                 case PLUGIN_GEN_CB_INLINE:
799                     plugin_gen_mem_inline(plugin_tb, op, insn_idx);
800                     break;
801                 default:
802                     g_assert_not_reached();
803                 }
804 
805                 break;
806             }
807             case PLUGIN_GEN_AFTER_INSN:
808             {
809                 g_assert(insn_idx >= 0);
810 
811                 switch (type) {
812                 case PLUGIN_GEN_DISABLE_MEM_HELPER:
813                     plugin_gen_disable_mem_helper(plugin_tb, op, insn_idx);
814                     break;
815                 default:
816                     g_assert_not_reached();
817                 }
818                 break;
819             }
820             default:
821                 g_assert_not_reached();
822             }
823             break;
824         }
825         default:
826             /* plugins don't care about any other ops */
827             break;
828         }
829     }
830     pr_ops();
831 }
832 
833 bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
834                          bool mem_only)
835 {
836     bool ret = false;
837 
838     if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) {
839         struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
840         int i;
841 
842         /* reset callbacks */
843         for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) {
844             if (ptb->cbs[i]) {
845                 g_array_set_size(ptb->cbs[i], 0);
846             }
847         }
848         ptb->n = 0;
849 
850         ret = true;
851 
852         ptb->vaddr = db->pc_first;
853         ptb->vaddr2 = -1;
854         ptb->haddr1 = db->host_addr[0];
855         ptb->haddr2 = NULL;
856         ptb->mem_only = mem_only;
857         ptb->mem_helper = false;
858 
859         plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB);
860     }
861 
862     tcg_ctx->plugin_insn = NULL;
863 
864     return ret;
865 }
866 
867 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
868 {
869     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
870     struct qemu_plugin_insn *pinsn;
871 
872     pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next);
873     tcg_ctx->plugin_insn = pinsn;
874     plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
875 
876     /*
877      * Detect page crossing to get the new host address.
878      * Note that we skip this when haddr1 == NULL, e.g. when we're
879      * fetching instructions from a region not backed by RAM.
880      */
881     if (ptb->haddr1 == NULL) {
882         pinsn->haddr = NULL;
883     } else if (is_same_page(db, db->pc_next)) {
884         pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
885     } else {
886         if (ptb->vaddr2 == -1) {
887             ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
888             get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2);
889         }
890         pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
891     }
892 }
893 
894 void plugin_gen_insn_end(void)
895 {
896     plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN);
897 }
898 
899 /*
900  * There are cases where we never get to finalise a translation - for
901  * example a page fault during translation. As a result we shouldn't
902  * do any clean-up here and make sure things are reset in
903  * plugin_gen_tb_start.
904  */
905 void plugin_gen_tb_end(CPUState *cpu)
906 {
907     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
908 
909     /* collect instrumentation requests */
910     qemu_plugin_tb_trans_cb(cpu, ptb);
911 
912     /* inject the instrumentation at the appropriate places */
913     plugin_gen_inject(ptb);
914 }
915