xref: /openbmc/qemu/accel/tcg/plugin-gen.c (revision e03b5686)
1 /*
2  * plugin-gen.c - TCG-related bits of plugin infrastructure
3  *
4  * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5  * License: GNU GPL, version 2 or later.
6  *   See the COPYING file in the top-level directory.
7  *
8  * We support instrumentation at an instruction granularity. That is,
9  * if a plugin wants to instrument the memory accesses performed by a
10  * particular instruction, it can just do that instead of instrumenting
11  * all memory accesses. Thus, in order to do this we first have to
12  * translate a TB, so that plugins can decide what/where to instrument.
13  *
14  * Injecting the desired instrumentation could be done with a second
15  * translation pass that combined the instrumentation requests, but that
16  * would be ugly and inefficient since we would decode the guest code twice.
17  * Instead, during TB translation we add "empty" instrumentation calls for all
18  * possible instrumentation events, and then once we collect the instrumentation
19  * requests from plugins, we either "fill in" those empty events or remove them
20  * if they have no requests.
21  *
22  * When "filling in" an event we first copy the empty callback's TCG ops. This
23  * might seem unnecessary, but it is done to support an arbitrary number
24  * of callbacks per event. Take for example a regular instruction callback.
25  * We first generate a callback to an empty helper function. Then, if two
26  * plugins register one callback each for this instruction, we make two copies
27  * of the TCG ops generated for the empty callback, substituting the function
28  * pointer that points to the empty helper function with the plugins' desired
29  * callback functions. After that we remove the empty callback's ops.
30  *
31  * Note that the location in TCGOp.args[] of the pointer to a helper function
32  * varies across different guest and host architectures. Instead of duplicating
33  * the logic that figures this out, we rely on the fact that the empty
34  * callbacks point to empty functions that are unique pointers in the program.
35  * Thus, to find the right location we just have to look for a match in
36  * TCGOp.args[]. This is the main reason why we first copy an empty callback's
37  * TCG ops and then fill them in; regardless of whether we have one or many
38  * callbacks for that event, the logic to add all of them is the same.
39  *
40  * When generating more than one callback per event, we make a small
41  * optimization to avoid generating redundant operations. For instance, for the
42  * second and all subsequent callbacks of an event, we do not need to reload the
43  * CPU's index into a TCG temp, since the first callback did it already.
44  */
45 #include "qemu/osdep.h"
46 #include "tcg/tcg.h"
47 #include "tcg/tcg-op.h"
48 #include "exec/exec-all.h"
49 #include "exec/plugin-gen.h"
50 #include "exec/translator.h"
51 
52 #ifdef CONFIG_SOFTMMU
53 # define CONFIG_SOFTMMU_GATE 1
54 #else
55 # define CONFIG_SOFTMMU_GATE 0
56 #endif
57 
58 /*
59  * plugin_cb_start TCG op args[]:
60  * 0: enum plugin_gen_from
61  * 1: enum plugin_gen_cb
62  * 2: set to 1 for mem callback that is a write, 0 otherwise.
63  */
64 
65 enum plugin_gen_from {
66     PLUGIN_GEN_FROM_TB,
67     PLUGIN_GEN_FROM_INSN,
68     PLUGIN_GEN_FROM_MEM,
69     PLUGIN_GEN_AFTER_INSN,
70     PLUGIN_GEN_N_FROMS,
71 };
72 
73 enum plugin_gen_cb {
74     PLUGIN_GEN_CB_UDATA,
75     PLUGIN_GEN_CB_INLINE,
76     PLUGIN_GEN_CB_MEM,
77     PLUGIN_GEN_ENABLE_MEM_HELPER,
78     PLUGIN_GEN_DISABLE_MEM_HELPER,
79     PLUGIN_GEN_N_CBS,
80 };
81 
82 /*
83  * These helpers are stubs that get dynamically switched out for calls
84  * direct to the plugin if they are subscribed to.
85  */
86 void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata)
87 { }
88 
89 void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
90                                 qemu_plugin_meminfo_t info, uint64_t vaddr,
91                                 void *userdata)
92 { }
93 
94 static void do_gen_mem_cb(TCGv vaddr, uint32_t info)
95 {
96     TCGv_i32 cpu_index = tcg_temp_new_i32();
97     TCGv_i32 meminfo = tcg_const_i32(info);
98     TCGv_i64 vaddr64 = tcg_temp_new_i64();
99     TCGv_ptr udata = tcg_const_ptr(NULL);
100 
101     tcg_gen_ld_i32(cpu_index, cpu_env,
102                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
103     tcg_gen_extu_tl_i64(vaddr64, vaddr);
104 
105     gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata);
106 
107     tcg_temp_free_ptr(udata);
108     tcg_temp_free_i64(vaddr64);
109     tcg_temp_free_i32(meminfo);
110     tcg_temp_free_i32(cpu_index);
111 }
112 
113 static void gen_empty_udata_cb(void)
114 {
115     TCGv_i32 cpu_index = tcg_temp_new_i32();
116     TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */
117 
118     tcg_gen_ld_i32(cpu_index, cpu_env,
119                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
120     gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
121 
122     tcg_temp_free_ptr(udata);
123     tcg_temp_free_i32(cpu_index);
124 }
125 
126 /*
127  * For now we only support addi_i64.
128  * When we support more ops, we can generate one empty inline cb for each.
129  */
130 static void gen_empty_inline_cb(void)
131 {
132     TCGv_i64 val = tcg_temp_new_i64();
133     TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */
134 
135     tcg_gen_ld_i64(val, ptr, 0);
136     /* pass an immediate != 0 so that it doesn't get optimized away */
137     tcg_gen_addi_i64(val, val, 0xdeadface);
138     tcg_gen_st_i64(val, ptr, 0);
139     tcg_temp_free_ptr(ptr);
140     tcg_temp_free_i64(val);
141 }
142 
143 static void gen_empty_mem_cb(TCGv addr, uint32_t info)
144 {
145     do_gen_mem_cb(addr, info);
146 }
147 
148 /*
149  * Share the same function for enable/disable. When enabling, the NULL
150  * pointer will be overwritten later.
151  */
152 static void gen_empty_mem_helper(void)
153 {
154     TCGv_ptr ptr;
155 
156     ptr = tcg_const_ptr(NULL);
157     tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
158                                  offsetof(ArchCPU, env));
159     tcg_temp_free_ptr(ptr);
160 }
161 
162 static void gen_plugin_cb_start(enum plugin_gen_from from,
163                                 enum plugin_gen_cb type, unsigned wr)
164 {
165     tcg_gen_plugin_cb_start(from, type, wr);
166 }
167 
168 static void gen_wrapped(enum plugin_gen_from from,
169                         enum plugin_gen_cb type, void (*func)(void))
170 {
171     gen_plugin_cb_start(from, type, 0);
172     func();
173     tcg_gen_plugin_cb_end();
174 }
175 
176 static void plugin_gen_empty_callback(enum plugin_gen_from from)
177 {
178     switch (from) {
179     case PLUGIN_GEN_AFTER_INSN:
180         gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER,
181                     gen_empty_mem_helper);
182         break;
183     case PLUGIN_GEN_FROM_INSN:
184         /*
185          * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
186          * the first callback of an instruction
187          */
188         gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER,
189                     gen_empty_mem_helper);
190         /* fall through */
191     case PLUGIN_GEN_FROM_TB:
192         gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb);
193         gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
194         break;
195     default:
196         g_assert_not_reached();
197     }
198 }
199 
200 union mem_gen_fn {
201     void (*mem_fn)(TCGv, uint32_t);
202     void (*inline_fn)(void);
203 };
204 
205 static void gen_mem_wrapped(enum plugin_gen_cb type,
206                             const union mem_gen_fn *f, TCGv addr,
207                             uint32_t info, bool is_mem)
208 {
209     enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
210 
211     gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw);
212     if (is_mem) {
213         f->mem_fn(addr, info);
214     } else {
215         f->inline_fn();
216     }
217     tcg_gen_plugin_cb_end();
218 }
219 
220 void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info)
221 {
222     union mem_gen_fn fn;
223 
224     fn.mem_fn = gen_empty_mem_cb;
225     gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true);
226 
227     fn.inline_fn = gen_empty_inline_cb;
228     gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false);
229 }
230 
231 static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
232 {
233     while (op) {
234         if (op->opc == opc) {
235             return op;
236         }
237         op = QTAILQ_NEXT(op, link);
238     }
239     return NULL;
240 }
241 
242 static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end)
243 {
244     TCGOp *ret = QTAILQ_NEXT(end, link);
245 
246     QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link);
247     return ret;
248 }
249 
250 /* remove all ops until (and including) plugin_cb_end */
251 static TCGOp *rm_ops(TCGOp *op)
252 {
253     TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end);
254 
255     tcg_debug_assert(end_op);
256     return rm_ops_range(op, end_op);
257 }
258 
259 static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op)
260 {
261     *begin_op = QTAILQ_NEXT(*begin_op, link);
262     tcg_debug_assert(*begin_op);
263     op = tcg_op_insert_after(tcg_ctx, op, (*begin_op)->opc);
264     memcpy(op->args, (*begin_op)->args, sizeof(op->args));
265     return op;
266 }
267 
268 static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
269 {
270     op = copy_op_nocheck(begin_op, op);
271     tcg_debug_assert((*begin_op)->opc == opc);
272     return op;
273 }
274 
275 static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op)
276 {
277     if (TCG_TARGET_REG_BITS == 32) {
278         /* mov_i32 */
279         op = copy_op(begin_op, op, INDEX_op_mov_i32);
280         /* mov_i32 w/ $0 */
281         op = copy_op(begin_op, op, INDEX_op_mov_i32);
282     } else {
283         /* extu_i32_i64 */
284         op = copy_op(begin_op, op, INDEX_op_extu_i32_i64);
285     }
286     return op;
287 }
288 
289 static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op)
290 {
291     if (TCG_TARGET_REG_BITS == 32) {
292         /* 2x mov_i32 */
293         op = copy_op(begin_op, op, INDEX_op_mov_i32);
294         op = copy_op(begin_op, op, INDEX_op_mov_i32);
295     } else {
296         /* mov_i64 */
297         op = copy_op(begin_op, op, INDEX_op_mov_i64);
298     }
299     return op;
300 }
301 
302 static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
303 {
304     if (UINTPTR_MAX == UINT32_MAX) {
305         /* mov_i32 */
306         op = copy_op(begin_op, op, INDEX_op_mov_i32);
307         op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr));
308     } else {
309         /* mov_i64 */
310         op = copy_op(begin_op, op, INDEX_op_mov_i64);
311         op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr));
312     }
313     return op;
314 }
315 
316 static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op)
317 {
318     if (TARGET_LONG_BITS == 32) {
319         /* extu_i32_i64 */
320         op = copy_extu_i32_i64(begin_op, op);
321     } else {
322         /* mov_i64 */
323         op = copy_mov_i64(begin_op, op);
324     }
325     return op;
326 }
327 
328 static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
329 {
330     if (TCG_TARGET_REG_BITS == 32) {
331         /* 2x ld_i32 */
332         op = copy_op(begin_op, op, INDEX_op_ld_i32);
333         op = copy_op(begin_op, op, INDEX_op_ld_i32);
334     } else {
335         /* ld_i64 */
336         op = copy_op(begin_op, op, INDEX_op_ld_i64);
337     }
338     return op;
339 }
340 
341 static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op)
342 {
343     if (TCG_TARGET_REG_BITS == 32) {
344         /* 2x st_i32 */
345         op = copy_op(begin_op, op, INDEX_op_st_i32);
346         op = copy_op(begin_op, op, INDEX_op_st_i32);
347     } else {
348         /* st_i64 */
349         op = copy_op(begin_op, op, INDEX_op_st_i64);
350     }
351     return op;
352 }
353 
354 static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
355 {
356     if (TCG_TARGET_REG_BITS == 32) {
357         /* all 32-bit backends must implement add2_i32 */
358         g_assert(TCG_TARGET_HAS_add2_i32);
359         op = copy_op(begin_op, op, INDEX_op_add2_i32);
360         op->args[4] = tcgv_i32_arg(tcg_constant_i32(v));
361         op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
362     } else {
363         op = copy_op(begin_op, op, INDEX_op_add_i64);
364         op->args[2] = tcgv_i64_arg(tcg_constant_i64(v));
365     }
366     return op;
367 }
368 
369 static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
370 {
371     if (UINTPTR_MAX == UINT32_MAX) {
372         /* st_i32 */
373         op = copy_op(begin_op, op, INDEX_op_st_i32);
374     } else {
375         /* st_i64 */
376         op = copy_st_i64(begin_op, op);
377     }
378     return op;
379 }
380 
381 static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
382                         void *func, int *cb_idx)
383 {
384     /* copy all ops until the call */
385     do {
386         op = copy_op_nocheck(begin_op, op);
387     } while (op->opc != INDEX_op_call);
388 
389     /* fill in the op call */
390     op->param1 = (*begin_op)->param1;
391     op->param2 = (*begin_op)->param2;
392     tcg_debug_assert(op->life == 0);
393     if (*cb_idx == -1) {
394         int i;
395 
396         /*
397          * Instead of working out the position of the callback in args[], just
398          * look for @empty_func, since it should be a unique pointer.
399          */
400         for (i = 0; i < MAX_OPC_PARAM_ARGS; i++) {
401             if ((uintptr_t)(*begin_op)->args[i] == (uintptr_t)empty_func) {
402                 *cb_idx = i;
403                 break;
404             }
405         }
406         tcg_debug_assert(i < MAX_OPC_PARAM_ARGS);
407     }
408     op->args[*cb_idx] = (uintptr_t)func;
409     op->args[*cb_idx + 1] = (*begin_op)->args[*cb_idx + 1];
410 
411     return op;
412 }
413 
414 /*
415  * When we append/replace ops here we are sensitive to changing patterns of
416  * TCGOps generated by the tcg_gen_FOO calls when we generated the
417  * empty callbacks. This will assert very quickly in a debug build as
418  * we assert the ops we are replacing are the correct ones.
419  */
420 static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
421                               TCGOp *begin_op, TCGOp *op, int *cb_idx)
422 {
423     /* const_ptr */
424     op = copy_const_ptr(&begin_op, op, cb->userp);
425 
426     /* copy the ld_i32, but note that we only have to copy it once */
427     begin_op = QTAILQ_NEXT(begin_op, link);
428     tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
429     if (*cb_idx == -1) {
430         op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32);
431         memcpy(op->args, begin_op->args, sizeof(op->args));
432     }
433 
434     /* call */
435     op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb),
436                    cb->f.vcpu_udata, cb_idx);
437 
438     return op;
439 }
440 
441 static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
442                                TCGOp *begin_op, TCGOp *op,
443                                int *unused)
444 {
445     /* const_ptr */
446     op = copy_const_ptr(&begin_op, op, cb->userp);
447 
448     /* ld_i64 */
449     op = copy_ld_i64(&begin_op, op);
450 
451     /* add_i64 */
452     op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
453 
454     /* st_i64 */
455     op = copy_st_i64(&begin_op, op);
456 
457     return op;
458 }
459 
460 static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
461                             TCGOp *begin_op, TCGOp *op, int *cb_idx)
462 {
463     enum plugin_gen_cb type = begin_op->args[1];
464 
465     tcg_debug_assert(type == PLUGIN_GEN_CB_MEM);
466 
467     /* const_i32 == mov_i32 ("info", so it remains as is) */
468     op = copy_op(&begin_op, op, INDEX_op_mov_i32);
469 
470     /* const_ptr */
471     op = copy_const_ptr(&begin_op, op, cb->userp);
472 
473     /* copy the ld_i32, but note that we only have to copy it once */
474     begin_op = QTAILQ_NEXT(begin_op, link);
475     tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
476     if (*cb_idx == -1) {
477         op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32);
478         memcpy(op->args, begin_op->args, sizeof(op->args));
479     }
480 
481     /* extu_tl_i64 */
482     op = copy_extu_tl_i64(&begin_op, op);
483 
484     if (type == PLUGIN_GEN_CB_MEM) {
485         /* call */
486         op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb),
487                        cb->f.vcpu_udata, cb_idx);
488     }
489 
490     return op;
491 }
492 
493 typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb,
494                             TCGOp *begin_op, TCGOp *op, int *intp);
495 typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb);
496 
497 static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
498 {
499     return true;
500 }
501 
502 static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
503 {
504     int w;
505 
506     w = op->args[2];
507     return !!(cb->rw & (w + 1));
508 }
509 
510 static void inject_cb_type(const GArray *cbs, TCGOp *begin_op,
511                            inject_fn inject, op_ok_fn ok)
512 {
513     TCGOp *end_op;
514     TCGOp *op;
515     int cb_idx = -1;
516     int i;
517 
518     if (!cbs || cbs->len == 0) {
519         rm_ops(begin_op);
520         return;
521     }
522 
523     end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
524     tcg_debug_assert(end_op);
525 
526     op = end_op;
527     for (i = 0; i < cbs->len; i++) {
528         struct qemu_plugin_dyn_cb *cb =
529             &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
530 
531         if (!ok(begin_op, cb)) {
532             continue;
533         }
534         op = inject(cb, begin_op, op, &cb_idx);
535     }
536     rm_ops_range(begin_op, end_op);
537 }
538 
539 static void
540 inject_udata_cb(const GArray *cbs, TCGOp *begin_op)
541 {
542     inject_cb_type(cbs, begin_op, append_udata_cb, op_ok);
543 }
544 
545 static void
546 inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok)
547 {
548     inject_cb_type(cbs, begin_op, append_inline_cb, ok);
549 }
550 
551 static void
552 inject_mem_cb(const GArray *cbs, TCGOp *begin_op)
553 {
554     inject_cb_type(cbs, begin_op, append_mem_cb, op_rw);
555 }
556 
557 /* we could change the ops in place, but we can reuse more code by copying */
558 static void inject_mem_helper(TCGOp *begin_op, GArray *arr)
559 {
560     TCGOp *orig_op = begin_op;
561     TCGOp *end_op;
562     TCGOp *op;
563 
564     end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
565     tcg_debug_assert(end_op);
566 
567     /* const ptr */
568     op = copy_const_ptr(&begin_op, end_op, arr);
569 
570     /* st_ptr */
571     op = copy_st_ptr(&begin_op, op);
572 
573     rm_ops_range(orig_op, end_op);
574 }
575 
576 /*
577  * Tracking memory accesses performed from helpers requires extra work.
578  * If an instruction is emulated with helpers, we do two things:
579  * (1) copy the CB descriptors, and keep track of it so that they can be
580  * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so
581  * that we can read them at run-time (i.e. when the helper executes).
582  * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
583  *
584  * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
585  * is possible that the code we generate after the instruction is
586  * dead, we also add checks before generating tb_exit etc.
587  */
588 static void inject_mem_enable_helper(struct qemu_plugin_insn *plugin_insn,
589                                      TCGOp *begin_op)
590 {
591     GArray *cbs[2];
592     GArray *arr;
593     size_t n_cbs, i;
594 
595     cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
596     cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
597 
598     n_cbs = 0;
599     for (i = 0; i < ARRAY_SIZE(cbs); i++) {
600         n_cbs += cbs[i]->len;
601     }
602 
603     plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs;
604     if (likely(!plugin_insn->mem_helper)) {
605         rm_ops(begin_op);
606         return;
607     }
608 
609     arr = g_array_sized_new(false, false,
610                             sizeof(struct qemu_plugin_dyn_cb), n_cbs);
611 
612     for (i = 0; i < ARRAY_SIZE(cbs); i++) {
613         g_array_append_vals(arr, cbs[i]->data, cbs[i]->len);
614     }
615 
616     qemu_plugin_add_dyn_cb_arr(arr);
617     inject_mem_helper(begin_op, arr);
618 }
619 
620 static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
621                                       TCGOp *begin_op)
622 {
623     if (likely(!plugin_insn->mem_helper)) {
624         rm_ops(begin_op);
625         return;
626     }
627     inject_mem_helper(begin_op, NULL);
628 }
629 
630 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
631 void plugin_gen_disable_mem_helpers(void)
632 {
633     TCGv_ptr ptr;
634 
635     if (likely(tcg_ctx->plugin_insn == NULL ||
636                !tcg_ctx->plugin_insn->mem_helper)) {
637         return;
638     }
639     ptr = tcg_const_ptr(NULL);
640     tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
641                                  offsetof(ArchCPU, env));
642     tcg_temp_free_ptr(ptr);
643     tcg_ctx->plugin_insn->mem_helper = false;
644 }
645 
646 static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
647                                 TCGOp *begin_op)
648 {
649     inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op);
650 }
651 
652 static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb,
653                                  TCGOp *begin_op)
654 {
655     inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok);
656 }
657 
658 static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
659                                   TCGOp *begin_op, int insn_idx)
660 {
661     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
662 
663     inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
664 }
665 
666 static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
667                                    TCGOp *begin_op, int insn_idx)
668 {
669     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
670     inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
671                      begin_op, op_ok);
672 }
673 
674 static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb,
675                                    TCGOp *begin_op, int insn_idx)
676 {
677     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
678     inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op);
679 }
680 
681 static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb,
682                                   TCGOp *begin_op, int insn_idx)
683 {
684     const GArray *cbs;
685     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
686 
687     cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
688     inject_inline_cb(cbs, begin_op, op_rw);
689 }
690 
691 static void plugin_gen_enable_mem_helper(const struct qemu_plugin_tb *ptb,
692                                          TCGOp *begin_op, int insn_idx)
693 {
694     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
695     inject_mem_enable_helper(insn, begin_op);
696 }
697 
698 static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb *ptb,
699                                           TCGOp *begin_op, int insn_idx)
700 {
701     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
702     inject_mem_disable_helper(insn, begin_op);
703 }
704 
705 /* #define DEBUG_PLUGIN_GEN_OPS */
706 static void pr_ops(void)
707 {
708 #ifdef DEBUG_PLUGIN_GEN_OPS
709     TCGOp *op;
710     int i = 0;
711 
712     QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
713         const char *name = "";
714         const char *type = "";
715 
716         if (op->opc == INDEX_op_plugin_cb_start) {
717             switch (op->args[0]) {
718             case PLUGIN_GEN_FROM_TB:
719                 name = "tb";
720                 break;
721             case PLUGIN_GEN_FROM_INSN:
722                 name = "insn";
723                 break;
724             case PLUGIN_GEN_FROM_MEM:
725                 name = "mem";
726                 break;
727             case PLUGIN_GEN_AFTER_INSN:
728                 name = "after insn";
729                 break;
730             default:
731                 break;
732             }
733             switch (op->args[1]) {
734             case PLUGIN_GEN_CB_UDATA:
735                 type = "udata";
736                 break;
737             case PLUGIN_GEN_CB_INLINE:
738                 type = "inline";
739                 break;
740             case PLUGIN_GEN_CB_MEM:
741                 type = "mem";
742                 break;
743             case PLUGIN_GEN_ENABLE_MEM_HELPER:
744                 type = "enable mem helper";
745                 break;
746             case PLUGIN_GEN_DISABLE_MEM_HELPER:
747                 type = "disable mem helper";
748                 break;
749             default:
750                 break;
751             }
752         }
753         printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type);
754         i++;
755     }
756 #endif
757 }
758 
759 static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb)
760 {
761     TCGOp *op;
762     int insn_idx = -1;
763 
764     pr_ops();
765 
766     QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
767         switch (op->opc) {
768         case INDEX_op_insn_start:
769             insn_idx++;
770             break;
771         case INDEX_op_plugin_cb_start:
772         {
773             enum plugin_gen_from from = op->args[0];
774             enum plugin_gen_cb type = op->args[1];
775 
776             switch (from) {
777             case PLUGIN_GEN_FROM_TB:
778             {
779                 g_assert(insn_idx == -1);
780 
781                 switch (type) {
782                 case PLUGIN_GEN_CB_UDATA:
783                     plugin_gen_tb_udata(plugin_tb, op);
784                     break;
785                 case PLUGIN_GEN_CB_INLINE:
786                     plugin_gen_tb_inline(plugin_tb, op);
787                     break;
788                 default:
789                     g_assert_not_reached();
790                 }
791                 break;
792             }
793             case PLUGIN_GEN_FROM_INSN:
794             {
795                 g_assert(insn_idx >= 0);
796 
797                 switch (type) {
798                 case PLUGIN_GEN_CB_UDATA:
799                     plugin_gen_insn_udata(plugin_tb, op, insn_idx);
800                     break;
801                 case PLUGIN_GEN_CB_INLINE:
802                     plugin_gen_insn_inline(plugin_tb, op, insn_idx);
803                     break;
804                 case PLUGIN_GEN_ENABLE_MEM_HELPER:
805                     plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx);
806                     break;
807                 default:
808                     g_assert_not_reached();
809                 }
810                 break;
811             }
812             case PLUGIN_GEN_FROM_MEM:
813             {
814                 g_assert(insn_idx >= 0);
815 
816                 switch (type) {
817                 case PLUGIN_GEN_CB_MEM:
818                     plugin_gen_mem_regular(plugin_tb, op, insn_idx);
819                     break;
820                 case PLUGIN_GEN_CB_INLINE:
821                     plugin_gen_mem_inline(plugin_tb, op, insn_idx);
822                     break;
823                 default:
824                     g_assert_not_reached();
825                 }
826 
827                 break;
828             }
829             case PLUGIN_GEN_AFTER_INSN:
830             {
831                 g_assert(insn_idx >= 0);
832 
833                 switch (type) {
834                 case PLUGIN_GEN_DISABLE_MEM_HELPER:
835                     plugin_gen_disable_mem_helper(plugin_tb, op, insn_idx);
836                     break;
837                 default:
838                     g_assert_not_reached();
839                 }
840                 break;
841             }
842             default:
843                 g_assert_not_reached();
844             }
845             break;
846         }
847         default:
848             /* plugins don't care about any other ops */
849             break;
850         }
851     }
852     pr_ops();
853 }
854 
855 bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_only)
856 {
857     bool ret = false;
858 
859     if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) {
860         struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
861         int i;
862 
863         /* reset callbacks */
864         for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) {
865             if (ptb->cbs[i]) {
866                 g_array_set_size(ptb->cbs[i], 0);
867             }
868         }
869         ptb->n = 0;
870 
871         ret = true;
872 
873         ptb->vaddr = tb->pc;
874         ptb->vaddr2 = -1;
875         get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1);
876         ptb->haddr2 = NULL;
877         ptb->mem_only = mem_only;
878 
879         plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB);
880     }
881 
882     tcg_ctx->plugin_insn = NULL;
883 
884     return ret;
885 }
886 
887 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
888 {
889     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
890     struct qemu_plugin_insn *pinsn;
891 
892     pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next);
893     tcg_ctx->plugin_insn = pinsn;
894     plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
895 
896     /*
897      * Detect page crossing to get the new host address.
898      * Note that we skip this when haddr1 == NULL, e.g. when we're
899      * fetching instructions from a region not backed by RAM.
900      */
901     if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) &&
902         unlikely((db->pc_next & TARGET_PAGE_MASK) !=
903                  (db->pc_first & TARGET_PAGE_MASK))) {
904         get_page_addr_code_hostp(cpu->env_ptr, db->pc_next,
905                                  &ptb->haddr2);
906         ptb->vaddr2 = db->pc_next;
907     }
908     if (likely(ptb->vaddr2 == -1)) {
909         pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
910     } else {
911         pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
912     }
913 }
914 
915 void plugin_gen_insn_end(void)
916 {
917     plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN);
918 }
919 
920 /*
921  * There are cases where we never get to finalise a translation - for
922  * example a page fault during translation. As a result we shouldn't
923  * do any clean-up here and make sure things are reset in
924  * plugin_gen_tb_start.
925  */
926 void plugin_gen_tb_end(CPUState *cpu)
927 {
928     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
929 
930     /* collect instrumentation requests */
931     qemu_plugin_tb_trans_cb(cpu, ptb);
932 
933     /* inject the instrumentation at the appropriate places */
934     plugin_gen_inject(ptb);
935 }
936