1 /* 2 * QEMU Plugin Core code 3 * 4 * This is the core code that deals with injecting instrumentation into the code 5 * 6 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> 7 * Copyright (C) 2019, Linaro 8 * 9 * License: GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 * SPDX-License-Identifier: GPL-2.0-or-later 13 */ 14 #include "qemu/osdep.h" 15 #include "qemu/error-report.h" 16 #include "qemu/config-file.h" 17 #include "qapi/error.h" 18 #include "qemu/lockable.h" 19 #include "qemu/option.h" 20 #include "qemu/rcu_queue.h" 21 #include "qemu/xxhash.h" 22 #include "qemu/rcu.h" 23 #include "hw/core/cpu.h" 24 #include "exec/cpu-common.h" 25 26 #include "exec/exec-all.h" 27 #include "exec/helper-proto.h" 28 #include "tcg/tcg.h" 29 #include "tcg/tcg-op.h" 30 #include "trace/mem-internal.h" /* mem_info macros */ 31 #include "plugin.h" 32 #include "qemu/compiler.h" 33 34 struct qemu_plugin_cb { 35 struct qemu_plugin_ctx *ctx; 36 union qemu_plugin_cb_sig f; 37 void *udata; 38 QLIST_ENTRY(qemu_plugin_cb) entry; 39 }; 40 41 struct qemu_plugin_state plugin; 42 43 struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id) 44 { 45 struct qemu_plugin_ctx *ctx; 46 qemu_plugin_id_t *id_p; 47 48 id_p = g_hash_table_lookup(plugin.id_ht, &id); 49 ctx = container_of(id_p, struct qemu_plugin_ctx, id); 50 if (ctx == NULL) { 51 error_report("plugin: invalid plugin id %" PRIu64, id); 52 abort(); 53 } 54 return ctx; 55 } 56 57 static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data) 58 { 59 bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX); 60 cpu_tb_jmp_cache_clear(cpu); 61 } 62 63 static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata) 64 { 65 CPUState *cpu = container_of(k, CPUState, cpu_index); 66 run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask); 67 68 if (cpu->created) { 69 async_run_on_cpu(cpu, plugin_cpu_update__async, mask); 70 } else { 71 plugin_cpu_update__async(cpu, mask); 72 } 73 } 74 75 void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx, 76 enum qemu_plugin_event ev) 77 { 78 struct qemu_plugin_cb *cb = ctx->callbacks[ev]; 79 80 if (cb == NULL) { 81 return; 82 } 83 QLIST_REMOVE_RCU(cb, entry); 84 g_free(cb); 85 ctx->callbacks[ev] = NULL; 86 if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) { 87 clear_bit(ev, plugin.mask); 88 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL); 89 } 90 } 91 92 /* 93 * Disable CFI checks. 94 * The callback function has been loaded from an external library so we do not 95 * have type information 96 */ 97 QEMU_DISABLE_CFI 98 static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev) 99 { 100 struct qemu_plugin_cb *cb, *next; 101 102 switch (ev) { 103 case QEMU_PLUGIN_EV_VCPU_INIT: 104 case QEMU_PLUGIN_EV_VCPU_EXIT: 105 case QEMU_PLUGIN_EV_VCPU_IDLE: 106 case QEMU_PLUGIN_EV_VCPU_RESUME: 107 /* iterate safely; plugins might uninstall themselves at any time */ 108 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 109 qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple; 110 111 func(cb->ctx->id, cpu->cpu_index); 112 } 113 break; 114 default: 115 g_assert_not_reached(); 116 } 117 } 118 119 /* 120 * Disable CFI checks. 121 * The callback function has been loaded from an external library so we do not 122 * have type information 123 */ 124 QEMU_DISABLE_CFI 125 static void plugin_cb__simple(enum qemu_plugin_event ev) 126 { 127 struct qemu_plugin_cb *cb, *next; 128 129 switch (ev) { 130 case QEMU_PLUGIN_EV_FLUSH: 131 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 132 qemu_plugin_simple_cb_t func = cb->f.simple; 133 134 func(cb->ctx->id); 135 } 136 break; 137 default: 138 g_assert_not_reached(); 139 } 140 } 141 142 /* 143 * Disable CFI checks. 144 * The callback function has been loaded from an external library so we do not 145 * have type information 146 */ 147 QEMU_DISABLE_CFI 148 static void plugin_cb__udata(enum qemu_plugin_event ev) 149 { 150 struct qemu_plugin_cb *cb, *next; 151 152 switch (ev) { 153 case QEMU_PLUGIN_EV_ATEXIT: 154 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 155 qemu_plugin_udata_cb_t func = cb->f.udata; 156 157 func(cb->ctx->id, cb->udata); 158 } 159 break; 160 default: 161 g_assert_not_reached(); 162 } 163 } 164 165 static void 166 do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, 167 void *func, void *udata) 168 { 169 struct qemu_plugin_ctx *ctx; 170 171 QEMU_LOCK_GUARD(&plugin.lock); 172 ctx = plugin_id_to_ctx_locked(id); 173 /* if the plugin is on its way out, ignore this request */ 174 if (unlikely(ctx->uninstalling)) { 175 return; 176 } 177 if (func) { 178 struct qemu_plugin_cb *cb = ctx->callbacks[ev]; 179 180 if (cb) { 181 cb->f.generic = func; 182 cb->udata = udata; 183 } else { 184 cb = g_new(struct qemu_plugin_cb, 1); 185 cb->ctx = ctx; 186 cb->f.generic = func; 187 cb->udata = udata; 188 ctx->callbacks[ev] = cb; 189 QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry); 190 if (!test_bit(ev, plugin.mask)) { 191 set_bit(ev, plugin.mask); 192 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, 193 NULL); 194 } 195 } 196 } else { 197 plugin_unregister_cb__locked(ctx, ev); 198 } 199 } 200 201 void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, 202 void *func) 203 { 204 do_plugin_register_cb(id, ev, func, NULL); 205 } 206 207 void 208 plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev, 209 void *func, void *udata) 210 { 211 do_plugin_register_cb(id, ev, func, udata); 212 } 213 214 void qemu_plugin_vcpu_init_hook(CPUState *cpu) 215 { 216 bool success; 217 218 qemu_rec_mutex_lock(&plugin.lock); 219 plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL); 220 success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index, 221 &cpu->cpu_index); 222 g_assert(success); 223 qemu_rec_mutex_unlock(&plugin.lock); 224 225 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT); 226 } 227 228 void qemu_plugin_vcpu_exit_hook(CPUState *cpu) 229 { 230 bool success; 231 232 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT); 233 234 qemu_rec_mutex_lock(&plugin.lock); 235 success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index); 236 g_assert(success); 237 qemu_rec_mutex_unlock(&plugin.lock); 238 } 239 240 struct plugin_for_each_args { 241 struct qemu_plugin_ctx *ctx; 242 qemu_plugin_vcpu_simple_cb_t cb; 243 }; 244 245 static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata) 246 { 247 struct plugin_for_each_args *args = udata; 248 int cpu_index = *(int *)k; 249 250 args->cb(args->ctx->id, cpu_index); 251 } 252 253 void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id, 254 qemu_plugin_vcpu_simple_cb_t cb) 255 { 256 struct plugin_for_each_args args; 257 258 if (cb == NULL) { 259 return; 260 } 261 qemu_rec_mutex_lock(&plugin.lock); 262 args.ctx = plugin_id_to_ctx_locked(id); 263 args.cb = cb; 264 g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args); 265 qemu_rec_mutex_unlock(&plugin.lock); 266 } 267 268 /* Allocate and return a callback record */ 269 static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr) 270 { 271 GArray *cbs = *arr; 272 273 if (!cbs) { 274 cbs = g_array_sized_new(false, false, 275 sizeof(struct qemu_plugin_dyn_cb), 1); 276 *arr = cbs; 277 } 278 279 g_array_set_size(cbs, cbs->len + 1); 280 return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1); 281 } 282 283 void plugin_register_inline_op(GArray **arr, 284 enum qemu_plugin_mem_rw rw, 285 enum qemu_plugin_op op, void *ptr, 286 uint64_t imm) 287 { 288 struct qemu_plugin_dyn_cb *dyn_cb; 289 290 dyn_cb = plugin_get_dyn_cb(arr); 291 dyn_cb->userp = ptr; 292 dyn_cb->type = PLUGIN_CB_INLINE; 293 dyn_cb->rw = rw; 294 dyn_cb->inline_insn.op = op; 295 dyn_cb->inline_insn.imm = imm; 296 } 297 298 static inline uint32_t cb_to_tcg_flags(enum qemu_plugin_cb_flags flags) 299 { 300 uint32_t ret; 301 302 switch (flags) { 303 case QEMU_PLUGIN_CB_RW_REGS: 304 ret = 0; 305 break; 306 case QEMU_PLUGIN_CB_R_REGS: 307 ret = TCG_CALL_NO_WG; 308 break; 309 case QEMU_PLUGIN_CB_NO_REGS: 310 default: 311 ret = TCG_CALL_NO_RWG; 312 } 313 return ret; 314 } 315 316 inline void 317 plugin_register_dyn_cb__udata(GArray **arr, 318 qemu_plugin_vcpu_udata_cb_t cb, 319 enum qemu_plugin_cb_flags flags, void *udata) 320 { 321 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); 322 323 dyn_cb->userp = udata; 324 dyn_cb->tcg_flags = cb_to_tcg_flags(flags); 325 dyn_cb->f.vcpu_udata = cb; 326 dyn_cb->type = PLUGIN_CB_REGULAR; 327 } 328 329 void plugin_register_vcpu_mem_cb(GArray **arr, 330 void *cb, 331 enum qemu_plugin_cb_flags flags, 332 enum qemu_plugin_mem_rw rw, 333 void *udata) 334 { 335 struct qemu_plugin_dyn_cb *dyn_cb; 336 337 dyn_cb = plugin_get_dyn_cb(arr); 338 dyn_cb->userp = udata; 339 dyn_cb->tcg_flags = cb_to_tcg_flags(flags); 340 dyn_cb->type = PLUGIN_CB_REGULAR; 341 dyn_cb->rw = rw; 342 dyn_cb->f.generic = cb; 343 } 344 345 /* 346 * Disable CFI checks. 347 * The callback function has been loaded from an external library so we do not 348 * have type information 349 */ 350 QEMU_DISABLE_CFI 351 void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb) 352 { 353 struct qemu_plugin_cb *cb, *next; 354 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS; 355 356 /* no plugin_mask check here; caller should have checked */ 357 358 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 359 qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans; 360 361 func(cb->ctx->id, tb); 362 } 363 } 364 365 /* 366 * Disable CFI checks. 367 * The callback function has been loaded from an external library so we do not 368 * have type information 369 */ 370 QEMU_DISABLE_CFI 371 void 372 qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2, 373 uint64_t a3, uint64_t a4, uint64_t a5, 374 uint64_t a6, uint64_t a7, uint64_t a8) 375 { 376 struct qemu_plugin_cb *cb, *next; 377 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL; 378 379 if (!test_bit(ev, cpu->plugin_mask)) { 380 return; 381 } 382 383 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 384 qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall; 385 386 func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8); 387 } 388 } 389 390 /* 391 * Disable CFI checks. 392 * The callback function has been loaded from an external library so we do not 393 * have type information 394 */ 395 QEMU_DISABLE_CFI 396 void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) 397 { 398 struct qemu_plugin_cb *cb, *next; 399 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET; 400 401 if (!test_bit(ev, cpu->plugin_mask)) { 402 return; 403 } 404 405 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 406 qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret; 407 408 func(cb->ctx->id, cpu->cpu_index, num, ret); 409 } 410 } 411 412 void qemu_plugin_vcpu_idle_cb(CPUState *cpu) 413 { 414 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE); 415 } 416 417 void qemu_plugin_vcpu_resume_cb(CPUState *cpu) 418 { 419 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME); 420 } 421 422 void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id, 423 qemu_plugin_vcpu_simple_cb_t cb) 424 { 425 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb); 426 } 427 428 void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id, 429 qemu_plugin_vcpu_simple_cb_t cb) 430 { 431 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb); 432 } 433 434 void qemu_plugin_register_flush_cb(qemu_plugin_id_t id, 435 qemu_plugin_simple_cb_t cb) 436 { 437 plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb); 438 } 439 440 static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp) 441 { 442 g_array_free((GArray *) p, true); 443 return true; 444 } 445 446 void qemu_plugin_flush_cb(void) 447 { 448 qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL); 449 qht_reset(&plugin.dyn_cb_arr_ht); 450 451 plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH); 452 } 453 454 void exec_inline_op(struct qemu_plugin_dyn_cb *cb) 455 { 456 uint64_t *val = cb->userp; 457 458 switch (cb->inline_insn.op) { 459 case QEMU_PLUGIN_INLINE_ADD_U64: 460 *val += cb->inline_insn.imm; 461 break; 462 default: 463 g_assert_not_reached(); 464 } 465 } 466 467 void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info) 468 { 469 GArray *arr = cpu->plugin_mem_cbs; 470 size_t i; 471 472 if (arr == NULL) { 473 return; 474 } 475 for (i = 0; i < arr->len; i++) { 476 struct qemu_plugin_dyn_cb *cb = 477 &g_array_index(arr, struct qemu_plugin_dyn_cb, i); 478 int w = !!(info & TRACE_MEM_ST) + 1; 479 480 if (!(w & cb->rw)) { 481 break; 482 } 483 switch (cb->type) { 484 case PLUGIN_CB_REGULAR: 485 cb->f.vcpu_mem(cpu->cpu_index, info, vaddr, cb->userp); 486 break; 487 case PLUGIN_CB_INLINE: 488 exec_inline_op(cb); 489 break; 490 default: 491 g_assert_not_reached(); 492 } 493 } 494 } 495 496 void qemu_plugin_atexit_cb(void) 497 { 498 plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT); 499 } 500 501 void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id, 502 qemu_plugin_udata_cb_t cb, 503 void *udata) 504 { 505 plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata); 506 } 507 508 /* 509 * Call this function after longjmp'ing to the main loop. It's possible that the 510 * last instruction of a TB might have used helpers, and therefore the 511 * "disable" instruction will never execute because it ended up as dead code. 512 */ 513 void qemu_plugin_disable_mem_helpers(CPUState *cpu) 514 { 515 cpu->plugin_mem_cbs = NULL; 516 } 517 518 static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp) 519 { 520 return ap == bp; 521 } 522 523 static void __attribute__((__constructor__)) plugin_init(void) 524 { 525 int i; 526 527 for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) { 528 QLIST_INIT(&plugin.cb_lists[i]); 529 } 530 qemu_rec_mutex_init(&plugin.lock); 531 plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal); 532 plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal); 533 QTAILQ_INIT(&plugin.ctxs); 534 qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16, 535 QHT_MODE_AUTO_RESIZE); 536 atexit(qemu_plugin_atexit_cb); 537 } 538