1 /* 2 * QEMU Plugin Core code 3 * 4 * This is the core code that deals with injecting instrumentation into the code 5 * 6 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> 7 * Copyright (C) 2019, Linaro 8 * 9 * License: GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 * SPDX-License-Identifier: GPL-2.0-or-later 13 */ 14 #include "qemu/osdep.h" 15 #include "qemu/error-report.h" 16 #include "qemu/config-file.h" 17 #include "qapi/error.h" 18 #include "qemu/lockable.h" 19 #include "qemu/option.h" 20 #include "qemu/rcu_queue.h" 21 #include "qemu/xxhash.h" 22 #include "qemu/rcu.h" 23 #include "hw/core/cpu.h" 24 #include "exec/cpu-common.h" 25 26 #include "cpu.h" 27 #include "exec/exec-all.h" 28 #include "exec/helper-proto.h" 29 #include "sysemu/sysemu.h" 30 #include "tcg/tcg.h" 31 #include "tcg/tcg-op.h" 32 #include "trace/mem-internal.h" /* mem_info macros */ 33 #include "plugin.h" 34 35 struct qemu_plugin_cb { 36 struct qemu_plugin_ctx *ctx; 37 union qemu_plugin_cb_sig f; 38 void *udata; 39 QLIST_ENTRY(qemu_plugin_cb) entry; 40 }; 41 42 struct qemu_plugin_state plugin; 43 44 struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id) 45 { 46 struct qemu_plugin_ctx *ctx; 47 qemu_plugin_id_t *id_p; 48 49 id_p = g_hash_table_lookup(plugin.id_ht, &id); 50 ctx = container_of(id_p, struct qemu_plugin_ctx, id); 51 if (ctx == NULL) { 52 error_report("plugin: invalid plugin id %" PRIu64, id); 53 abort(); 54 } 55 return ctx; 56 } 57 58 static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data) 59 { 60 bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX); 61 cpu_tb_jmp_cache_clear(cpu); 62 } 63 64 static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata) 65 { 66 CPUState *cpu = container_of(k, CPUState, cpu_index); 67 run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask); 68 69 if (cpu->created) { 70 async_run_on_cpu(cpu, plugin_cpu_update__async, mask); 71 } else { 72 plugin_cpu_update__async(cpu, mask); 73 } 74 } 75 76 void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx, 77 enum qemu_plugin_event ev) 78 { 79 struct qemu_plugin_cb *cb = ctx->callbacks[ev]; 80 81 if (cb == NULL) { 82 return; 83 } 84 QLIST_REMOVE_RCU(cb, entry); 85 g_free(cb); 86 ctx->callbacks[ev] = NULL; 87 if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) { 88 clear_bit(ev, plugin.mask); 89 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL); 90 } 91 } 92 93 static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev) 94 { 95 struct qemu_plugin_cb *cb, *next; 96 97 switch (ev) { 98 case QEMU_PLUGIN_EV_VCPU_INIT: 99 case QEMU_PLUGIN_EV_VCPU_EXIT: 100 case QEMU_PLUGIN_EV_VCPU_IDLE: 101 case QEMU_PLUGIN_EV_VCPU_RESUME: 102 /* iterate safely; plugins might uninstall themselves at any time */ 103 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 104 qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple; 105 106 func(cb->ctx->id, cpu->cpu_index); 107 } 108 break; 109 default: 110 g_assert_not_reached(); 111 } 112 } 113 114 static void plugin_cb__simple(enum qemu_plugin_event ev) 115 { 116 struct qemu_plugin_cb *cb, *next; 117 118 switch (ev) { 119 case QEMU_PLUGIN_EV_FLUSH: 120 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 121 qemu_plugin_simple_cb_t func = cb->f.simple; 122 123 func(cb->ctx->id); 124 } 125 break; 126 default: 127 g_assert_not_reached(); 128 } 129 } 130 131 static void plugin_cb__udata(enum qemu_plugin_event ev) 132 { 133 struct qemu_plugin_cb *cb, *next; 134 135 switch (ev) { 136 case QEMU_PLUGIN_EV_ATEXIT: 137 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 138 qemu_plugin_udata_cb_t func = cb->f.udata; 139 140 func(cb->ctx->id, cb->udata); 141 } 142 break; 143 default: 144 g_assert_not_reached(); 145 } 146 } 147 148 static void 149 do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, 150 void *func, void *udata) 151 { 152 struct qemu_plugin_ctx *ctx; 153 154 QEMU_LOCK_GUARD(&plugin.lock); 155 ctx = plugin_id_to_ctx_locked(id); 156 /* if the plugin is on its way out, ignore this request */ 157 if (unlikely(ctx->uninstalling)) { 158 return; 159 } 160 if (func) { 161 struct qemu_plugin_cb *cb = ctx->callbacks[ev]; 162 163 if (cb) { 164 cb->f.generic = func; 165 cb->udata = udata; 166 } else { 167 cb = g_new(struct qemu_plugin_cb, 1); 168 cb->ctx = ctx; 169 cb->f.generic = func; 170 cb->udata = udata; 171 ctx->callbacks[ev] = cb; 172 QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry); 173 if (!test_bit(ev, plugin.mask)) { 174 set_bit(ev, plugin.mask); 175 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, 176 NULL); 177 } 178 } 179 } else { 180 plugin_unregister_cb__locked(ctx, ev); 181 } 182 } 183 184 void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, 185 void *func) 186 { 187 do_plugin_register_cb(id, ev, func, NULL); 188 } 189 190 void 191 plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev, 192 void *func, void *udata) 193 { 194 do_plugin_register_cb(id, ev, func, udata); 195 } 196 197 void qemu_plugin_vcpu_init_hook(CPUState *cpu) 198 { 199 bool success; 200 201 qemu_rec_mutex_lock(&plugin.lock); 202 plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL); 203 success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index, 204 &cpu->cpu_index); 205 g_assert(success); 206 qemu_rec_mutex_unlock(&plugin.lock); 207 208 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT); 209 } 210 211 void qemu_plugin_vcpu_exit_hook(CPUState *cpu) 212 { 213 bool success; 214 215 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT); 216 217 qemu_rec_mutex_lock(&plugin.lock); 218 success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index); 219 g_assert(success); 220 qemu_rec_mutex_unlock(&plugin.lock); 221 } 222 223 struct plugin_for_each_args { 224 struct qemu_plugin_ctx *ctx; 225 qemu_plugin_vcpu_simple_cb_t cb; 226 }; 227 228 static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata) 229 { 230 struct plugin_for_each_args *args = udata; 231 int cpu_index = *(int *)k; 232 233 args->cb(args->ctx->id, cpu_index); 234 } 235 236 void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id, 237 qemu_plugin_vcpu_simple_cb_t cb) 238 { 239 struct plugin_for_each_args args; 240 241 if (cb == NULL) { 242 return; 243 } 244 qemu_rec_mutex_lock(&plugin.lock); 245 args.ctx = plugin_id_to_ctx_locked(id); 246 args.cb = cb; 247 g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args); 248 qemu_rec_mutex_unlock(&plugin.lock); 249 } 250 251 /* Allocate and return a callback record */ 252 static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr) 253 { 254 GArray *cbs = *arr; 255 256 if (!cbs) { 257 cbs = g_array_sized_new(false, false, 258 sizeof(struct qemu_plugin_dyn_cb), 1); 259 *arr = cbs; 260 } 261 262 g_array_set_size(cbs, cbs->len + 1); 263 return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1); 264 } 265 266 void plugin_register_inline_op(GArray **arr, 267 enum qemu_plugin_mem_rw rw, 268 enum qemu_plugin_op op, void *ptr, 269 uint64_t imm) 270 { 271 struct qemu_plugin_dyn_cb *dyn_cb; 272 273 dyn_cb = plugin_get_dyn_cb(arr); 274 dyn_cb->userp = ptr; 275 dyn_cb->type = PLUGIN_CB_INLINE; 276 dyn_cb->rw = rw; 277 dyn_cb->inline_insn.op = op; 278 dyn_cb->inline_insn.imm = imm; 279 } 280 281 static inline uint32_t cb_to_tcg_flags(enum qemu_plugin_cb_flags flags) 282 { 283 uint32_t ret; 284 285 switch (flags) { 286 case QEMU_PLUGIN_CB_RW_REGS: 287 ret = 0; 288 break; 289 case QEMU_PLUGIN_CB_R_REGS: 290 ret = TCG_CALL_NO_WG; 291 break; 292 case QEMU_PLUGIN_CB_NO_REGS: 293 default: 294 ret = TCG_CALL_NO_RWG; 295 } 296 return ret; 297 } 298 299 inline void 300 plugin_register_dyn_cb__udata(GArray **arr, 301 qemu_plugin_vcpu_udata_cb_t cb, 302 enum qemu_plugin_cb_flags flags, void *udata) 303 { 304 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); 305 306 dyn_cb->userp = udata; 307 dyn_cb->tcg_flags = cb_to_tcg_flags(flags); 308 dyn_cb->f.vcpu_udata = cb; 309 dyn_cb->type = PLUGIN_CB_REGULAR; 310 } 311 312 void plugin_register_vcpu_mem_cb(GArray **arr, 313 void *cb, 314 enum qemu_plugin_cb_flags flags, 315 enum qemu_plugin_mem_rw rw, 316 void *udata) 317 { 318 struct qemu_plugin_dyn_cb *dyn_cb; 319 320 dyn_cb = plugin_get_dyn_cb(arr); 321 dyn_cb->userp = udata; 322 dyn_cb->tcg_flags = cb_to_tcg_flags(flags); 323 dyn_cb->type = PLUGIN_CB_REGULAR; 324 dyn_cb->rw = rw; 325 dyn_cb->f.generic = cb; 326 } 327 328 void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb) 329 { 330 struct qemu_plugin_cb *cb, *next; 331 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS; 332 333 /* no plugin_mask check here; caller should have checked */ 334 335 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 336 qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans; 337 338 func(cb->ctx->id, tb); 339 } 340 } 341 342 void 343 qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2, 344 uint64_t a3, uint64_t a4, uint64_t a5, 345 uint64_t a6, uint64_t a7, uint64_t a8) 346 { 347 struct qemu_plugin_cb *cb, *next; 348 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL; 349 350 if (!test_bit(ev, cpu->plugin_mask)) { 351 return; 352 } 353 354 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 355 qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall; 356 357 func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8); 358 } 359 } 360 361 void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) 362 { 363 struct qemu_plugin_cb *cb, *next; 364 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET; 365 366 if (!test_bit(ev, cpu->plugin_mask)) { 367 return; 368 } 369 370 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 371 qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret; 372 373 func(cb->ctx->id, cpu->cpu_index, num, ret); 374 } 375 } 376 377 void qemu_plugin_vcpu_idle_cb(CPUState *cpu) 378 { 379 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE); 380 } 381 382 void qemu_plugin_vcpu_resume_cb(CPUState *cpu) 383 { 384 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME); 385 } 386 387 void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id, 388 qemu_plugin_vcpu_simple_cb_t cb) 389 { 390 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb); 391 } 392 393 void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id, 394 qemu_plugin_vcpu_simple_cb_t cb) 395 { 396 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb); 397 } 398 399 void qemu_plugin_register_flush_cb(qemu_plugin_id_t id, 400 qemu_plugin_simple_cb_t cb) 401 { 402 plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb); 403 } 404 405 static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp) 406 { 407 g_array_free((GArray *) p, true); 408 return true; 409 } 410 411 void qemu_plugin_flush_cb(void) 412 { 413 qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL); 414 qht_reset(&plugin.dyn_cb_arr_ht); 415 416 plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH); 417 } 418 419 void exec_inline_op(struct qemu_plugin_dyn_cb *cb) 420 { 421 uint64_t *val = cb->userp; 422 423 switch (cb->inline_insn.op) { 424 case QEMU_PLUGIN_INLINE_ADD_U64: 425 *val += cb->inline_insn.imm; 426 break; 427 default: 428 g_assert_not_reached(); 429 } 430 } 431 432 void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info) 433 { 434 GArray *arr = cpu->plugin_mem_cbs; 435 size_t i; 436 437 if (arr == NULL) { 438 return; 439 } 440 for (i = 0; i < arr->len; i++) { 441 struct qemu_plugin_dyn_cb *cb = 442 &g_array_index(arr, struct qemu_plugin_dyn_cb, i); 443 int w = !!(info & TRACE_MEM_ST) + 1; 444 445 if (!(w & cb->rw)) { 446 break; 447 } 448 switch (cb->type) { 449 case PLUGIN_CB_REGULAR: 450 cb->f.vcpu_mem(cpu->cpu_index, info, vaddr, cb->userp); 451 break; 452 case PLUGIN_CB_INLINE: 453 exec_inline_op(cb); 454 break; 455 default: 456 g_assert_not_reached(); 457 } 458 } 459 } 460 461 void qemu_plugin_atexit_cb(void) 462 { 463 plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT); 464 } 465 466 void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id, 467 qemu_plugin_udata_cb_t cb, 468 void *udata) 469 { 470 plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata); 471 } 472 473 /* 474 * Call this function after longjmp'ing to the main loop. It's possible that the 475 * last instruction of a TB might have used helpers, and therefore the 476 * "disable" instruction will never execute because it ended up as dead code. 477 */ 478 void qemu_plugin_disable_mem_helpers(CPUState *cpu) 479 { 480 cpu->plugin_mem_cbs = NULL; 481 } 482 483 static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp) 484 { 485 return ap == bp; 486 } 487 488 static void __attribute__((__constructor__)) plugin_init(void) 489 { 490 int i; 491 492 for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) { 493 QLIST_INIT(&plugin.cb_lists[i]); 494 } 495 qemu_rec_mutex_init(&plugin.lock); 496 plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal); 497 plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal); 498 QTAILQ_INIT(&plugin.ctxs); 499 qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16, 500 QHT_MODE_AUTO_RESIZE); 501 atexit(qemu_plugin_atexit_cb); 502 } 503