1 /* 2 * QEMU Plugin Core code 3 * 4 * This is the core code that deals with injecting instrumentation into the code 5 * 6 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> 7 * Copyright (C) 2019, Linaro 8 * 9 * License: GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 * SPDX-License-Identifier: GPL-2.0-or-later 13 */ 14 #include "qemu/osdep.h" 15 #include "qemu/error-report.h" 16 #include "qemu/config-file.h" 17 #include "qapi/error.h" 18 #include "qemu/lockable.h" 19 #include "qemu/option.h" 20 #include "qemu/plugin.h" 21 #include "qemu/queue.h" 22 #include "qemu/rcu_queue.h" 23 #include "qemu/xxhash.h" 24 #include "qemu/rcu.h" 25 #include "hw/core/cpu.h" 26 27 #include "exec/exec-all.h" 28 #include "exec/tb-flush.h" 29 #include "tcg/tcg.h" 30 #include "tcg/tcg-op.h" 31 #include "plugin.h" 32 33 struct qemu_plugin_cb { 34 struct qemu_plugin_ctx *ctx; 35 union qemu_plugin_cb_sig f; 36 void *udata; 37 QLIST_ENTRY(qemu_plugin_cb) entry; 38 }; 39 40 struct qemu_plugin_state plugin; 41 42 struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id) 43 { 44 struct qemu_plugin_ctx *ctx; 45 qemu_plugin_id_t *id_p; 46 47 id_p = g_hash_table_lookup(plugin.id_ht, &id); 48 ctx = container_of(id_p, struct qemu_plugin_ctx, id); 49 if (ctx == NULL) { 50 error_report("plugin: invalid plugin id %" PRIu64, id); 51 abort(); 52 } 53 return ctx; 54 } 55 56 static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data) 57 { 58 bitmap_copy(cpu->plugin_state->event_mask, 59 &data.host_ulong, QEMU_PLUGIN_EV_MAX); 60 tcg_flush_jmp_cache(cpu); 61 } 62 63 static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata) 64 { 65 CPUState *cpu = container_of(k, CPUState, cpu_index); 66 run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask); 67 68 async_run_on_cpu(cpu, plugin_cpu_update__async, mask); 69 } 70 71 void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx, 72 enum qemu_plugin_event ev) 73 { 74 struct qemu_plugin_cb *cb = ctx->callbacks[ev]; 75 76 if (cb == NULL) { 77 return; 78 } 79 QLIST_REMOVE_RCU(cb, entry); 80 g_free(cb); 81 ctx->callbacks[ev] = NULL; 82 if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) { 83 clear_bit(ev, plugin.mask); 84 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL); 85 } 86 } 87 88 /* 89 * Disable CFI checks. 90 * The callback function has been loaded from an external library so we do not 91 * have type information 92 */ 93 QEMU_DISABLE_CFI 94 static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev) 95 { 96 struct qemu_plugin_cb *cb, *next; 97 98 switch (ev) { 99 case QEMU_PLUGIN_EV_VCPU_INIT: 100 case QEMU_PLUGIN_EV_VCPU_EXIT: 101 case QEMU_PLUGIN_EV_VCPU_IDLE: 102 case QEMU_PLUGIN_EV_VCPU_RESUME: 103 /* iterate safely; plugins might uninstall themselves at any time */ 104 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 105 qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple; 106 107 func(cb->ctx->id, cpu->cpu_index); 108 } 109 break; 110 default: 111 g_assert_not_reached(); 112 } 113 } 114 115 /* 116 * Disable CFI checks. 117 * The callback function has been loaded from an external library so we do not 118 * have type information 119 */ 120 QEMU_DISABLE_CFI 121 static void plugin_cb__simple(enum qemu_plugin_event ev) 122 { 123 struct qemu_plugin_cb *cb, *next; 124 125 switch (ev) { 126 case QEMU_PLUGIN_EV_FLUSH: 127 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 128 qemu_plugin_simple_cb_t func = cb->f.simple; 129 130 func(cb->ctx->id); 131 } 132 break; 133 default: 134 g_assert_not_reached(); 135 } 136 } 137 138 /* 139 * Disable CFI checks. 140 * The callback function has been loaded from an external library so we do not 141 * have type information 142 */ 143 QEMU_DISABLE_CFI 144 static void plugin_cb__udata(enum qemu_plugin_event ev) 145 { 146 struct qemu_plugin_cb *cb, *next; 147 148 switch (ev) { 149 case QEMU_PLUGIN_EV_ATEXIT: 150 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 151 qemu_plugin_udata_cb_t func = cb->f.udata; 152 153 func(cb->ctx->id, cb->udata); 154 } 155 break; 156 default: 157 g_assert_not_reached(); 158 } 159 } 160 161 static void 162 do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, 163 void *func, void *udata) 164 { 165 struct qemu_plugin_ctx *ctx; 166 167 QEMU_LOCK_GUARD(&plugin.lock); 168 ctx = plugin_id_to_ctx_locked(id); 169 /* if the plugin is on its way out, ignore this request */ 170 if (unlikely(ctx->uninstalling)) { 171 return; 172 } 173 if (func) { 174 struct qemu_plugin_cb *cb = ctx->callbacks[ev]; 175 176 if (cb) { 177 cb->f.generic = func; 178 cb->udata = udata; 179 } else { 180 cb = g_new(struct qemu_plugin_cb, 1); 181 cb->ctx = ctx; 182 cb->f.generic = func; 183 cb->udata = udata; 184 ctx->callbacks[ev] = cb; 185 QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry); 186 if (!test_bit(ev, plugin.mask)) { 187 set_bit(ev, plugin.mask); 188 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, 189 NULL); 190 } 191 } 192 } else { 193 plugin_unregister_cb__locked(ctx, ev); 194 } 195 } 196 197 void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, 198 void *func) 199 { 200 do_plugin_register_cb(id, ev, func, NULL); 201 } 202 203 void 204 plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev, 205 void *func, void *udata) 206 { 207 do_plugin_register_cb(id, ev, func, udata); 208 } 209 210 CPUPluginState *qemu_plugin_create_vcpu_state(void) 211 { 212 return g_new0(CPUPluginState, 1); 213 } 214 215 static void plugin_grow_scoreboards__locked(CPUState *cpu) 216 { 217 if (cpu->cpu_index < plugin.scoreboard_alloc_size) { 218 return; 219 } 220 221 bool need_realloc = FALSE; 222 while (cpu->cpu_index >= plugin.scoreboard_alloc_size) { 223 plugin.scoreboard_alloc_size *= 2; 224 need_realloc = TRUE; 225 } 226 227 228 if (!need_realloc || QLIST_EMPTY(&plugin.scoreboards)) { 229 /* nothing to do, we just updated sizes for future scoreboards */ 230 return; 231 } 232 233 /* cpus must be stopped, as tb might still use an existing scoreboard. */ 234 start_exclusive(); 235 struct qemu_plugin_scoreboard *score; 236 QLIST_FOREACH(score, &plugin.scoreboards, entry) { 237 g_array_set_size(score->data, plugin.scoreboard_alloc_size); 238 } 239 /* force all tb to be flushed, as scoreboard pointers were changed. */ 240 tb_flush(cpu); 241 end_exclusive(); 242 } 243 244 static void qemu_plugin_vcpu_init__async(CPUState *cpu, run_on_cpu_data unused) 245 { 246 bool success; 247 248 assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); 249 qemu_rec_mutex_lock(&plugin.lock); 250 plugin.num_vcpus = MAX(plugin.num_vcpus, cpu->cpu_index + 1); 251 plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL); 252 success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index, 253 &cpu->cpu_index); 254 g_assert(success); 255 plugin_grow_scoreboards__locked(cpu); 256 qemu_rec_mutex_unlock(&plugin.lock); 257 258 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT); 259 } 260 261 void qemu_plugin_vcpu_init_hook(CPUState *cpu) 262 { 263 /* Plugin initialization must wait until the cpu start executing code */ 264 async_run_on_cpu(cpu, qemu_plugin_vcpu_init__async, RUN_ON_CPU_NULL); 265 } 266 267 void qemu_plugin_vcpu_exit_hook(CPUState *cpu) 268 { 269 bool success; 270 271 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT); 272 273 assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); 274 qemu_rec_mutex_lock(&plugin.lock); 275 success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index); 276 g_assert(success); 277 qemu_rec_mutex_unlock(&plugin.lock); 278 } 279 280 struct plugin_for_each_args { 281 struct qemu_plugin_ctx *ctx; 282 qemu_plugin_vcpu_simple_cb_t cb; 283 }; 284 285 static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata) 286 { 287 struct plugin_for_each_args *args = udata; 288 int cpu_index = *(int *)k; 289 290 args->cb(args->ctx->id, cpu_index); 291 } 292 293 void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id, 294 qemu_plugin_vcpu_simple_cb_t cb) 295 { 296 struct plugin_for_each_args args; 297 298 if (cb == NULL) { 299 return; 300 } 301 qemu_rec_mutex_lock(&plugin.lock); 302 args.ctx = plugin_id_to_ctx_locked(id); 303 args.cb = cb; 304 g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args); 305 qemu_rec_mutex_unlock(&plugin.lock); 306 } 307 308 /* Allocate and return a callback record */ 309 static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr) 310 { 311 GArray *cbs = *arr; 312 313 if (!cbs) { 314 cbs = g_array_sized_new(false, true, 315 sizeof(struct qemu_plugin_dyn_cb), 1); 316 *arr = cbs; 317 } 318 319 g_array_set_size(cbs, cbs->len + 1); 320 return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1); 321 } 322 323 static enum plugin_dyn_cb_type op_to_cb_type(enum qemu_plugin_op op) 324 { 325 switch (op) { 326 case QEMU_PLUGIN_INLINE_ADD_U64: 327 return PLUGIN_CB_INLINE_ADD_U64; 328 case QEMU_PLUGIN_INLINE_STORE_U64: 329 return PLUGIN_CB_INLINE_STORE_U64; 330 default: 331 g_assert_not_reached(); 332 } 333 } 334 335 void plugin_register_inline_op_on_entry(GArray **arr, 336 enum qemu_plugin_mem_rw rw, 337 enum qemu_plugin_op op, 338 qemu_plugin_u64 entry, 339 uint64_t imm) 340 { 341 struct qemu_plugin_dyn_cb *dyn_cb; 342 343 struct qemu_plugin_inline_cb inline_cb = { .rw = rw, 344 .entry = entry, 345 .imm = imm }; 346 dyn_cb = plugin_get_dyn_cb(arr); 347 dyn_cb->type = op_to_cb_type(op); 348 dyn_cb->inline_insn = inline_cb; 349 } 350 351 void plugin_register_dyn_cb__udata(GArray **arr, 352 qemu_plugin_vcpu_udata_cb_t cb, 353 enum qemu_plugin_cb_flags flags, 354 void *udata) 355 { 356 static TCGHelperInfo info[3] = { 357 [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG, 358 [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG, 359 /* 360 * Match qemu_plugin_vcpu_udata_cb_t: 361 * void (*)(uint32_t, void *) 362 */ 363 [0 ... 2].typemask = (dh_typemask(void, 0) | 364 dh_typemask(i32, 1) | 365 dh_typemask(ptr, 2)) 366 }; 367 assert((unsigned)flags < ARRAY_SIZE(info)); 368 369 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); 370 struct qemu_plugin_regular_cb regular_cb = { .f.vcpu_udata = cb, 371 .userp = udata, 372 .info = &info[flags] }; 373 dyn_cb->type = PLUGIN_CB_REGULAR; 374 dyn_cb->regular = regular_cb; 375 } 376 377 void plugin_register_dyn_cond_cb__udata(GArray **arr, 378 qemu_plugin_vcpu_udata_cb_t cb, 379 enum qemu_plugin_cb_flags flags, 380 enum qemu_plugin_cond cond, 381 qemu_plugin_u64 entry, 382 uint64_t imm, 383 void *udata) 384 { 385 static TCGHelperInfo info[3] = { 386 [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG, 387 [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG, 388 /* 389 * Match qemu_plugin_vcpu_udata_cb_t: 390 * void (*)(uint32_t, void *) 391 */ 392 [0 ... 2].typemask = (dh_typemask(void, 0) | 393 dh_typemask(i32, 1) | 394 dh_typemask(ptr, 2)) 395 }; 396 assert((unsigned)flags < ARRAY_SIZE(info)); 397 398 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); 399 struct qemu_plugin_conditional_cb cond_cb = { .userp = udata, 400 .f.vcpu_udata = cb, 401 .cond = cond, 402 .entry = entry, 403 .imm = imm, 404 .info = &info[flags] }; 405 dyn_cb->type = PLUGIN_CB_COND; 406 dyn_cb->cond = cond_cb; 407 } 408 409 void plugin_register_vcpu_mem_cb(GArray **arr, 410 void *cb, 411 enum qemu_plugin_cb_flags flags, 412 enum qemu_plugin_mem_rw rw, 413 void *udata) 414 { 415 /* 416 * Expect that the underlying type for enum qemu_plugin_meminfo_t 417 * is either int32_t or uint32_t, aka int or unsigned int. 418 */ 419 QEMU_BUILD_BUG_ON( 420 !__builtin_types_compatible_p(qemu_plugin_meminfo_t, uint32_t) && 421 !__builtin_types_compatible_p(qemu_plugin_meminfo_t, int32_t)); 422 423 static TCGHelperInfo info[3] = { 424 [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG, 425 [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG, 426 /* 427 * Match qemu_plugin_vcpu_mem_cb_t: 428 * void (*)(uint32_t, qemu_plugin_meminfo_t, uint64_t, void *) 429 */ 430 [0 ... 2].typemask = 431 (dh_typemask(void, 0) | 432 dh_typemask(i32, 1) | 433 (__builtin_types_compatible_p(qemu_plugin_meminfo_t, uint32_t) 434 ? dh_typemask(i32, 2) : dh_typemask(s32, 2)) | 435 dh_typemask(i64, 3) | 436 dh_typemask(ptr, 4)) 437 }; 438 assert((unsigned)flags < ARRAY_SIZE(info)); 439 440 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); 441 struct qemu_plugin_regular_cb regular_cb = { .userp = udata, 442 .rw = rw, 443 .f.vcpu_mem = cb, 444 .info = &info[flags] }; 445 dyn_cb->type = PLUGIN_CB_MEM_REGULAR; 446 dyn_cb->regular = regular_cb; 447 } 448 449 /* 450 * Disable CFI checks. 451 * The callback function has been loaded from an external library so we do not 452 * have type information 453 */ 454 QEMU_DISABLE_CFI 455 void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb) 456 { 457 struct qemu_plugin_cb *cb, *next; 458 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS; 459 460 /* no plugin_state->event_mask check here; caller should have checked */ 461 462 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 463 qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans; 464 465 func(cb->ctx->id, tb); 466 } 467 } 468 469 /* 470 * Disable CFI checks. 471 * The callback function has been loaded from an external library so we do not 472 * have type information 473 */ 474 QEMU_DISABLE_CFI 475 void 476 qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2, 477 uint64_t a3, uint64_t a4, uint64_t a5, 478 uint64_t a6, uint64_t a7, uint64_t a8) 479 { 480 struct qemu_plugin_cb *cb, *next; 481 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL; 482 483 if (!test_bit(ev, cpu->plugin_state->event_mask)) { 484 return; 485 } 486 487 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 488 qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall; 489 490 func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8); 491 } 492 } 493 494 /* 495 * Disable CFI checks. 496 * The callback function has been loaded from an external library so we do not 497 * have type information 498 */ 499 QEMU_DISABLE_CFI 500 void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) 501 { 502 struct qemu_plugin_cb *cb, *next; 503 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET; 504 505 if (!test_bit(ev, cpu->plugin_state->event_mask)) { 506 return; 507 } 508 509 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 510 qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret; 511 512 func(cb->ctx->id, cpu->cpu_index, num, ret); 513 } 514 } 515 516 void qemu_plugin_vcpu_idle_cb(CPUState *cpu) 517 { 518 /* idle and resume cb may be called before init, ignore in this case */ 519 if (cpu->cpu_index < plugin.num_vcpus) { 520 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE); 521 } 522 } 523 524 void qemu_plugin_vcpu_resume_cb(CPUState *cpu) 525 { 526 if (cpu->cpu_index < plugin.num_vcpus) { 527 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME); 528 } 529 } 530 531 void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id, 532 qemu_plugin_vcpu_simple_cb_t cb) 533 { 534 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb); 535 } 536 537 void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id, 538 qemu_plugin_vcpu_simple_cb_t cb) 539 { 540 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb); 541 } 542 543 void qemu_plugin_register_flush_cb(qemu_plugin_id_t id, 544 qemu_plugin_simple_cb_t cb) 545 { 546 plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb); 547 } 548 549 static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp) 550 { 551 g_array_free((GArray *) p, true); 552 return true; 553 } 554 555 void qemu_plugin_flush_cb(void) 556 { 557 qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL); 558 qht_reset(&plugin.dyn_cb_arr_ht); 559 560 plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH); 561 } 562 563 void exec_inline_op(enum plugin_dyn_cb_type type, 564 struct qemu_plugin_inline_cb *cb, 565 int cpu_index) 566 { 567 char *ptr = cb->entry.score->data->data; 568 size_t elem_size = g_array_get_element_size( 569 cb->entry.score->data); 570 size_t offset = cb->entry.offset; 571 uint64_t *val = (uint64_t *)(ptr + offset + cpu_index * elem_size); 572 573 switch (type) { 574 case PLUGIN_CB_INLINE_ADD_U64: 575 *val += cb->imm; 576 break; 577 case PLUGIN_CB_INLINE_STORE_U64: 578 *val = cb->imm; 579 break; 580 default: 581 g_assert_not_reached(); 582 } 583 } 584 585 void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, 586 MemOpIdx oi, enum qemu_plugin_mem_rw rw) 587 { 588 GArray *arr = cpu->neg.plugin_mem_cbs; 589 size_t i; 590 591 if (arr == NULL) { 592 return; 593 } 594 for (i = 0; i < arr->len; i++) { 595 struct qemu_plugin_dyn_cb *cb = 596 &g_array_index(arr, struct qemu_plugin_dyn_cb, i); 597 598 switch (cb->type) { 599 case PLUGIN_CB_MEM_REGULAR: 600 if (rw & cb->regular.rw) { 601 cb->regular.f.vcpu_mem(cpu->cpu_index, 602 make_plugin_meminfo(oi, rw), 603 vaddr, cb->regular.userp); 604 } 605 break; 606 case PLUGIN_CB_INLINE_ADD_U64: 607 case PLUGIN_CB_INLINE_STORE_U64: 608 if (rw & cb->inline_insn.rw) { 609 exec_inline_op(cb->type, &cb->inline_insn, cpu->cpu_index); 610 } 611 break; 612 default: 613 g_assert_not_reached(); 614 } 615 } 616 } 617 618 void qemu_plugin_atexit_cb(void) 619 { 620 plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT); 621 } 622 623 void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id, 624 qemu_plugin_udata_cb_t cb, 625 void *udata) 626 { 627 plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata); 628 } 629 630 /* 631 * Handle exit from linux-user. Unlike the normal atexit() mechanism 632 * we need to handle the clean-up manually as it's possible threads 633 * are still running. We need to remove all callbacks from code 634 * generation, flush the current translations and then we can safely 635 * trigger the exit callbacks. 636 */ 637 638 void qemu_plugin_user_exit(void) 639 { 640 enum qemu_plugin_event ev; 641 CPUState *cpu; 642 643 /* 644 * Locking order: we must acquire locks in an order that is consistent 645 * with the one in fork_start(). That is: 646 * - start_exclusive(), which acquires qemu_cpu_list_lock, 647 * must be called before acquiring plugin.lock. 648 * - tb_flush(), which acquires mmap_lock(), must be called 649 * while plugin.lock is not held. 650 */ 651 start_exclusive(); 652 653 qemu_rec_mutex_lock(&plugin.lock); 654 /* un-register all callbacks except the final AT_EXIT one */ 655 for (ev = 0; ev < QEMU_PLUGIN_EV_MAX; ev++) { 656 if (ev != QEMU_PLUGIN_EV_ATEXIT) { 657 struct qemu_plugin_cb *cb, *next; 658 659 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 660 plugin_unregister_cb__locked(cb->ctx, ev); 661 } 662 } 663 } 664 CPU_FOREACH(cpu) { 665 qemu_plugin_disable_mem_helpers(cpu); 666 } 667 qemu_rec_mutex_unlock(&plugin.lock); 668 669 tb_flush(current_cpu); 670 end_exclusive(); 671 672 /* now it's safe to handle the exit case */ 673 qemu_plugin_atexit_cb(); 674 } 675 676 /* 677 * Helpers for *-user to ensure locks are sane across fork() events. 678 */ 679 680 void qemu_plugin_user_prefork_lock(void) 681 { 682 qemu_rec_mutex_lock(&plugin.lock); 683 } 684 685 void qemu_plugin_user_postfork(bool is_child) 686 { 687 if (is_child) { 688 /* should we just reset via plugin_init? */ 689 qemu_rec_mutex_init(&plugin.lock); 690 } else { 691 qemu_rec_mutex_unlock(&plugin.lock); 692 } 693 } 694 695 static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp) 696 { 697 return ap == bp; 698 } 699 700 static void __attribute__((__constructor__)) plugin_init(void) 701 { 702 int i; 703 704 for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) { 705 QLIST_INIT(&plugin.cb_lists[i]); 706 } 707 qemu_rec_mutex_init(&plugin.lock); 708 plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal); 709 plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal); 710 QLIST_INIT(&plugin.scoreboards); 711 plugin.scoreboard_alloc_size = 16; /* avoid frequent reallocation */ 712 QTAILQ_INIT(&plugin.ctxs); 713 qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16, 714 QHT_MODE_AUTO_RESIZE); 715 atexit(qemu_plugin_atexit_cb); 716 } 717 718 int plugin_num_vcpus(void) 719 { 720 return plugin.num_vcpus; 721 } 722 723 struct qemu_plugin_scoreboard *plugin_scoreboard_new(size_t element_size) 724 { 725 struct qemu_plugin_scoreboard *score = 726 g_malloc0(sizeof(struct qemu_plugin_scoreboard)); 727 score->data = g_array_new(FALSE, TRUE, element_size); 728 g_array_set_size(score->data, plugin.scoreboard_alloc_size); 729 730 qemu_rec_mutex_lock(&plugin.lock); 731 QLIST_INSERT_HEAD(&plugin.scoreboards, score, entry); 732 qemu_rec_mutex_unlock(&plugin.lock); 733 734 return score; 735 } 736 737 void plugin_scoreboard_free(struct qemu_plugin_scoreboard *score) 738 { 739 qemu_rec_mutex_lock(&plugin.lock); 740 QLIST_REMOVE(score, entry); 741 qemu_rec_mutex_unlock(&plugin.lock); 742 743 g_array_free(score->data, TRUE); 744 g_free(score); 745 } 746