xref: /openbmc/qemu/plugins/core.c (revision 401e311f)
1 /*
2  * QEMU Plugin Core code
3  *
4  * This is the core code that deals with injecting instrumentation into the code
5  *
6  * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
7  * Copyright (C) 2019, Linaro
8  *
9  * License: GNU GPL, version 2 or later.
10  *   See the COPYING file in the top-level directory.
11  *
12  * SPDX-License-Identifier: GPL-2.0-or-later
13  */
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "qemu/config-file.h"
17 #include "qapi/error.h"
18 #include "qemu/lockable.h"
19 #include "qemu/option.h"
20 #include "qemu/plugin.h"
21 #include "qemu/rcu_queue.h"
22 #include "qemu/xxhash.h"
23 #include "qemu/rcu.h"
24 #include "hw/core/cpu.h"
25 
26 #include "exec/exec-all.h"
27 #include "exec/tb-flush.h"
28 #include "tcg/tcg.h"
29 #include "tcg/tcg-op.h"
30 #include "plugin.h"
31 
32 struct qemu_plugin_cb {
33     struct qemu_plugin_ctx *ctx;
34     union qemu_plugin_cb_sig f;
35     void *udata;
36     QLIST_ENTRY(qemu_plugin_cb) entry;
37 };
38 
39 struct qemu_plugin_state plugin;
40 
41 struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id)
42 {
43     struct qemu_plugin_ctx *ctx;
44     qemu_plugin_id_t *id_p;
45 
46     id_p = g_hash_table_lookup(plugin.id_ht, &id);
47     ctx = container_of(id_p, struct qemu_plugin_ctx, id);
48     if (ctx == NULL) {
49         error_report("plugin: invalid plugin id %" PRIu64, id);
50         abort();
51     }
52     return ctx;
53 }
54 
55 static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data)
56 {
57     bitmap_copy(cpu->plugin_state->event_mask,
58                 &data.host_ulong, QEMU_PLUGIN_EV_MAX);
59     tcg_flush_jmp_cache(cpu);
60 }
61 
62 static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata)
63 {
64     CPUState *cpu = container_of(k, CPUState, cpu_index);
65     run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask);
66 
67     if (DEVICE(cpu)->realized) {
68         async_run_on_cpu(cpu, plugin_cpu_update__async, mask);
69     } else {
70         plugin_cpu_update__async(cpu, mask);
71     }
72 }
73 
74 void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx,
75                                   enum qemu_plugin_event ev)
76 {
77     struct qemu_plugin_cb *cb = ctx->callbacks[ev];
78 
79     if (cb == NULL) {
80         return;
81     }
82     QLIST_REMOVE_RCU(cb, entry);
83     g_free(cb);
84     ctx->callbacks[ev] = NULL;
85     if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) {
86         clear_bit(ev, plugin.mask);
87         g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL);
88     }
89 }
90 
91 /*
92  * Disable CFI checks.
93  * The callback function has been loaded from an external library so we do not
94  * have type information
95  */
96 QEMU_DISABLE_CFI
97 static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev)
98 {
99     struct qemu_plugin_cb *cb, *next;
100 
101     switch (ev) {
102     case QEMU_PLUGIN_EV_VCPU_INIT:
103     case QEMU_PLUGIN_EV_VCPU_EXIT:
104     case QEMU_PLUGIN_EV_VCPU_IDLE:
105     case QEMU_PLUGIN_EV_VCPU_RESUME:
106         /* iterate safely; plugins might uninstall themselves at any time */
107         QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
108             qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple;
109 
110             func(cb->ctx->id, cpu->cpu_index);
111         }
112         break;
113     default:
114         g_assert_not_reached();
115     }
116 }
117 
118 /*
119  * Disable CFI checks.
120  * The callback function has been loaded from an external library so we do not
121  * have type information
122  */
123 QEMU_DISABLE_CFI
124 static void plugin_cb__simple(enum qemu_plugin_event ev)
125 {
126     struct qemu_plugin_cb *cb, *next;
127 
128     switch (ev) {
129     case QEMU_PLUGIN_EV_FLUSH:
130         QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
131             qemu_plugin_simple_cb_t func = cb->f.simple;
132 
133             func(cb->ctx->id);
134         }
135         break;
136     default:
137         g_assert_not_reached();
138     }
139 }
140 
141 /*
142  * Disable CFI checks.
143  * The callback function has been loaded from an external library so we do not
144  * have type information
145  */
146 QEMU_DISABLE_CFI
147 static void plugin_cb__udata(enum qemu_plugin_event ev)
148 {
149     struct qemu_plugin_cb *cb, *next;
150 
151     switch (ev) {
152     case QEMU_PLUGIN_EV_ATEXIT:
153         QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
154             qemu_plugin_udata_cb_t func = cb->f.udata;
155 
156             func(cb->ctx->id, cb->udata);
157         }
158         break;
159     default:
160         g_assert_not_reached();
161     }
162 }
163 
164 static void
165 do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev,
166                       void *func, void *udata)
167 {
168     struct qemu_plugin_ctx *ctx;
169 
170     QEMU_LOCK_GUARD(&plugin.lock);
171     ctx = plugin_id_to_ctx_locked(id);
172     /* if the plugin is on its way out, ignore this request */
173     if (unlikely(ctx->uninstalling)) {
174         return;
175     }
176     if (func) {
177         struct qemu_plugin_cb *cb = ctx->callbacks[ev];
178 
179         if (cb) {
180             cb->f.generic = func;
181             cb->udata = udata;
182         } else {
183             cb = g_new(struct qemu_plugin_cb, 1);
184             cb->ctx = ctx;
185             cb->f.generic = func;
186             cb->udata = udata;
187             ctx->callbacks[ev] = cb;
188             QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry);
189             if (!test_bit(ev, plugin.mask)) {
190                 set_bit(ev, plugin.mask);
191                 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked,
192                                      NULL);
193             }
194         }
195     } else {
196         plugin_unregister_cb__locked(ctx, ev);
197     }
198 }
199 
200 void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev,
201                         void *func)
202 {
203     do_plugin_register_cb(id, ev, func, NULL);
204 }
205 
206 void
207 plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev,
208                          void *func, void *udata)
209 {
210     do_plugin_register_cb(id, ev, func, udata);
211 }
212 
213 CPUPluginState *qemu_plugin_create_vcpu_state(void)
214 {
215     return g_new0(CPUPluginState, 1);
216 }
217 
218 void qemu_plugin_vcpu_init_hook(CPUState *cpu)
219 {
220     bool success;
221 
222     qemu_rec_mutex_lock(&plugin.lock);
223     plugin.num_vcpus = MAX(plugin.num_vcpus, cpu->cpu_index + 1);
224     plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL);
225     success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index,
226                                   &cpu->cpu_index);
227     g_assert(success);
228     qemu_rec_mutex_unlock(&plugin.lock);
229 
230     plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT);
231 }
232 
233 void qemu_plugin_vcpu_exit_hook(CPUState *cpu)
234 {
235     bool success;
236 
237     plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT);
238 
239     qemu_rec_mutex_lock(&plugin.lock);
240     success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index);
241     g_assert(success);
242     qemu_rec_mutex_unlock(&plugin.lock);
243 }
244 
245 struct plugin_for_each_args {
246     struct qemu_plugin_ctx *ctx;
247     qemu_plugin_vcpu_simple_cb_t cb;
248 };
249 
250 static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata)
251 {
252     struct plugin_for_each_args *args = udata;
253     int cpu_index = *(int *)k;
254 
255     args->cb(args->ctx->id, cpu_index);
256 }
257 
258 void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id,
259                                qemu_plugin_vcpu_simple_cb_t cb)
260 {
261     struct plugin_for_each_args args;
262 
263     if (cb == NULL) {
264         return;
265     }
266     qemu_rec_mutex_lock(&plugin.lock);
267     args.ctx = plugin_id_to_ctx_locked(id);
268     args.cb = cb;
269     g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args);
270     qemu_rec_mutex_unlock(&plugin.lock);
271 }
272 
273 /* Allocate and return a callback record */
274 static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr)
275 {
276     GArray *cbs = *arr;
277 
278     if (!cbs) {
279         cbs = g_array_sized_new(false, false,
280                                 sizeof(struct qemu_plugin_dyn_cb), 1);
281         *arr = cbs;
282     }
283 
284     g_array_set_size(cbs, cbs->len + 1);
285     return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1);
286 }
287 
288 void plugin_register_inline_op(GArray **arr,
289                                enum qemu_plugin_mem_rw rw,
290                                enum qemu_plugin_op op, void *ptr,
291                                uint64_t imm)
292 {
293     struct qemu_plugin_dyn_cb *dyn_cb;
294 
295     dyn_cb = plugin_get_dyn_cb(arr);
296     dyn_cb->userp = ptr;
297     dyn_cb->type = PLUGIN_CB_INLINE;
298     dyn_cb->rw = rw;
299     dyn_cb->inline_insn.op = op;
300     dyn_cb->inline_insn.imm = imm;
301 }
302 
303 void plugin_register_dyn_cb__udata(GArray **arr,
304                                    qemu_plugin_vcpu_udata_cb_t cb,
305                                    enum qemu_plugin_cb_flags flags,
306                                    void *udata)
307 {
308     struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr);
309 
310     dyn_cb->userp = udata;
311     /* Note flags are discarded as unused. */
312     dyn_cb->f.vcpu_udata = cb;
313     dyn_cb->type = PLUGIN_CB_REGULAR;
314 }
315 
316 void plugin_register_vcpu_mem_cb(GArray **arr,
317                                  void *cb,
318                                  enum qemu_plugin_cb_flags flags,
319                                  enum qemu_plugin_mem_rw rw,
320                                  void *udata)
321 {
322     struct qemu_plugin_dyn_cb *dyn_cb;
323 
324     dyn_cb = plugin_get_dyn_cb(arr);
325     dyn_cb->userp = udata;
326     /* Note flags are discarded as unused. */
327     dyn_cb->type = PLUGIN_CB_REGULAR;
328     dyn_cb->rw = rw;
329     dyn_cb->f.generic = cb;
330 }
331 
332 /*
333  * Disable CFI checks.
334  * The callback function has been loaded from an external library so we do not
335  * have type information
336  */
337 QEMU_DISABLE_CFI
338 void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb)
339 {
340     struct qemu_plugin_cb *cb, *next;
341     enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS;
342 
343     /* no plugin_mask check here; caller should have checked */
344 
345     QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
346         qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans;
347 
348         func(cb->ctx->id, tb);
349     }
350 }
351 
352 /*
353  * Disable CFI checks.
354  * The callback function has been loaded from an external library so we do not
355  * have type information
356  */
357 QEMU_DISABLE_CFI
358 void
359 qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2,
360                          uint64_t a3, uint64_t a4, uint64_t a5,
361                          uint64_t a6, uint64_t a7, uint64_t a8)
362 {
363     struct qemu_plugin_cb *cb, *next;
364     enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL;
365 
366     if (!test_bit(ev, cpu->plugin_state->event_mask)) {
367         return;
368     }
369 
370     QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
371         qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall;
372 
373         func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8);
374     }
375 }
376 
377 /*
378  * Disable CFI checks.
379  * The callback function has been loaded from an external library so we do not
380  * have type information
381  */
382 QEMU_DISABLE_CFI
383 void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret)
384 {
385     struct qemu_plugin_cb *cb, *next;
386     enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET;
387 
388     if (!test_bit(ev, cpu->plugin_state->event_mask)) {
389         return;
390     }
391 
392     QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
393         qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret;
394 
395         func(cb->ctx->id, cpu->cpu_index, num, ret);
396     }
397 }
398 
399 void qemu_plugin_vcpu_idle_cb(CPUState *cpu)
400 {
401     /* idle and resume cb may be called before init, ignore in this case */
402     if (cpu->cpu_index < plugin.num_vcpus) {
403         plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE);
404     }
405 }
406 
407 void qemu_plugin_vcpu_resume_cb(CPUState *cpu)
408 {
409     if (cpu->cpu_index < plugin.num_vcpus) {
410         plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME);
411     }
412 }
413 
414 void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id,
415                                        qemu_plugin_vcpu_simple_cb_t cb)
416 {
417     plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb);
418 }
419 
420 void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id,
421                                          qemu_plugin_vcpu_simple_cb_t cb)
422 {
423     plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb);
424 }
425 
426 void qemu_plugin_register_flush_cb(qemu_plugin_id_t id,
427                                    qemu_plugin_simple_cb_t cb)
428 {
429     plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb);
430 }
431 
432 static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp)
433 {
434     g_array_free((GArray *) p, true);
435     return true;
436 }
437 
438 void qemu_plugin_flush_cb(void)
439 {
440     qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL);
441     qht_reset(&plugin.dyn_cb_arr_ht);
442 
443     plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH);
444 }
445 
446 void exec_inline_op(struct qemu_plugin_dyn_cb *cb)
447 {
448     uint64_t *val = cb->userp;
449 
450     switch (cb->inline_insn.op) {
451     case QEMU_PLUGIN_INLINE_ADD_U64:
452         *val += cb->inline_insn.imm;
453         break;
454     default:
455         g_assert_not_reached();
456     }
457 }
458 
459 void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
460                              MemOpIdx oi, enum qemu_plugin_mem_rw rw)
461 {
462     GArray *arr = cpu->plugin_mem_cbs;
463     size_t i;
464 
465     if (arr == NULL) {
466         return;
467     }
468     for (i = 0; i < arr->len; i++) {
469         struct qemu_plugin_dyn_cb *cb =
470             &g_array_index(arr, struct qemu_plugin_dyn_cb, i);
471 
472         if (!(rw & cb->rw)) {
473                 break;
474         }
475         switch (cb->type) {
476         case PLUGIN_CB_REGULAR:
477             cb->f.vcpu_mem(cpu->cpu_index, make_plugin_meminfo(oi, rw),
478                            vaddr, cb->userp);
479             break;
480         case PLUGIN_CB_INLINE:
481             exec_inline_op(cb);
482             break;
483         default:
484             g_assert_not_reached();
485         }
486     }
487 }
488 
489 void qemu_plugin_atexit_cb(void)
490 {
491     plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT);
492 }
493 
494 void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id,
495                                     qemu_plugin_udata_cb_t cb,
496                                     void *udata)
497 {
498     plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata);
499 }
500 
501 /*
502  * Handle exit from linux-user. Unlike the normal atexit() mechanism
503  * we need to handle the clean-up manually as it's possible threads
504  * are still running. We need to remove all callbacks from code
505  * generation, flush the current translations and then we can safely
506  * trigger the exit callbacks.
507  */
508 
509 void qemu_plugin_user_exit(void)
510 {
511     enum qemu_plugin_event ev;
512     CPUState *cpu;
513 
514     /*
515      * Locking order: we must acquire locks in an order that is consistent
516      * with the one in fork_start(). That is:
517      * - start_exclusive(), which acquires qemu_cpu_list_lock,
518      *   must be called before acquiring plugin.lock.
519      * - tb_flush(), which acquires mmap_lock(), must be called
520      *   while plugin.lock is not held.
521      */
522     start_exclusive();
523 
524     qemu_rec_mutex_lock(&plugin.lock);
525     /* un-register all callbacks except the final AT_EXIT one */
526     for (ev = 0; ev < QEMU_PLUGIN_EV_MAX; ev++) {
527         if (ev != QEMU_PLUGIN_EV_ATEXIT) {
528             struct qemu_plugin_cb *cb, *next;
529 
530             QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
531                 plugin_unregister_cb__locked(cb->ctx, ev);
532             }
533         }
534     }
535     CPU_FOREACH(cpu) {
536         qemu_plugin_disable_mem_helpers(cpu);
537     }
538     qemu_rec_mutex_unlock(&plugin.lock);
539 
540     tb_flush(current_cpu);
541     end_exclusive();
542 
543     /* now it's safe to handle the exit case */
544     qemu_plugin_atexit_cb();
545 }
546 
547 /*
548  * Helpers for *-user to ensure locks are sane across fork() events.
549  */
550 
551 void qemu_plugin_user_prefork_lock(void)
552 {
553     qemu_rec_mutex_lock(&plugin.lock);
554 }
555 
556 void qemu_plugin_user_postfork(bool is_child)
557 {
558     if (is_child) {
559         /* should we just reset via plugin_init? */
560         qemu_rec_mutex_init(&plugin.lock);
561     } else {
562         qemu_rec_mutex_unlock(&plugin.lock);
563     }
564 }
565 
566 static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp)
567 {
568     return ap == bp;
569 }
570 
571 static void __attribute__((__constructor__)) plugin_init(void)
572 {
573     int i;
574 
575     for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) {
576         QLIST_INIT(&plugin.cb_lists[i]);
577     }
578     qemu_rec_mutex_init(&plugin.lock);
579     plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal);
580     plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal);
581     QTAILQ_INIT(&plugin.ctxs);
582     qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16,
583              QHT_MODE_AUTO_RESIZE);
584     atexit(qemu_plugin_atexit_cb);
585 }
586 
587 int plugin_num_vcpus(void)
588 {
589     return plugin.num_vcpus;
590 }
591