1 /* 2 * QEMU Plugin Core code 3 * 4 * This is the core code that deals with injecting instrumentation into the code 5 * 6 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> 7 * Copyright (C) 2019, Linaro 8 * 9 * License: GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 * SPDX-License-Identifier: GPL-2.0-or-later 13 */ 14 #include "qemu/osdep.h" 15 #include "qemu/error-report.h" 16 #include "qemu/config-file.h" 17 #include "qapi/error.h" 18 #include "qemu/lockable.h" 19 #include "qemu/option.h" 20 #include "qemu/plugin.h" 21 #include "qemu/queue.h" 22 #include "qemu/rcu_queue.h" 23 #include "qemu/xxhash.h" 24 #include "qemu/rcu.h" 25 #include "hw/core/cpu.h" 26 27 #include "exec/exec-all.h" 28 #include "exec/tb-flush.h" 29 #include "tcg/tcg.h" 30 #include "tcg/tcg-op.h" 31 #include "plugin.h" 32 33 struct qemu_plugin_cb { 34 struct qemu_plugin_ctx *ctx; 35 union qemu_plugin_cb_sig f; 36 void *udata; 37 QLIST_ENTRY(qemu_plugin_cb) entry; 38 }; 39 40 struct qemu_plugin_state plugin; 41 42 struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id) 43 { 44 struct qemu_plugin_ctx *ctx; 45 qemu_plugin_id_t *id_p; 46 47 id_p = g_hash_table_lookup(plugin.id_ht, &id); 48 ctx = container_of(id_p, struct qemu_plugin_ctx, id); 49 if (ctx == NULL) { 50 error_report("plugin: invalid plugin id %" PRIu64, id); 51 abort(); 52 } 53 return ctx; 54 } 55 56 static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data) 57 { 58 bitmap_copy(cpu->plugin_state->event_mask, 59 &data.host_ulong, QEMU_PLUGIN_EV_MAX); 60 tcg_flush_jmp_cache(cpu); 61 } 62 63 static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata) 64 { 65 CPUState *cpu = container_of(k, CPUState, cpu_index); 66 run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask); 67 68 async_run_on_cpu(cpu, plugin_cpu_update__async, mask); 69 } 70 71 void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx, 72 enum qemu_plugin_event ev) 73 { 74 struct qemu_plugin_cb *cb = ctx->callbacks[ev]; 75 76 if (cb == NULL) { 77 return; 78 } 79 QLIST_REMOVE_RCU(cb, entry); 80 g_free(cb); 81 ctx->callbacks[ev] = NULL; 82 if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) { 83 clear_bit(ev, plugin.mask); 84 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL); 85 } 86 } 87 88 /* 89 * Disable CFI checks. 90 * The callback function has been loaded from an external library so we do not 91 * have type information 92 */ 93 QEMU_DISABLE_CFI 94 static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev) 95 { 96 struct qemu_plugin_cb *cb, *next; 97 98 switch (ev) { 99 case QEMU_PLUGIN_EV_VCPU_INIT: 100 case QEMU_PLUGIN_EV_VCPU_EXIT: 101 case QEMU_PLUGIN_EV_VCPU_IDLE: 102 case QEMU_PLUGIN_EV_VCPU_RESUME: 103 /* iterate safely; plugins might uninstall themselves at any time */ 104 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 105 qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple; 106 107 func(cb->ctx->id, cpu->cpu_index); 108 } 109 break; 110 default: 111 g_assert_not_reached(); 112 } 113 } 114 115 /* 116 * Disable CFI checks. 117 * The callback function has been loaded from an external library so we do not 118 * have type information 119 */ 120 QEMU_DISABLE_CFI 121 static void plugin_cb__simple(enum qemu_plugin_event ev) 122 { 123 struct qemu_plugin_cb *cb, *next; 124 125 switch (ev) { 126 case QEMU_PLUGIN_EV_FLUSH: 127 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 128 qemu_plugin_simple_cb_t func = cb->f.simple; 129 130 func(cb->ctx->id); 131 } 132 break; 133 default: 134 g_assert_not_reached(); 135 } 136 } 137 138 /* 139 * Disable CFI checks. 140 * The callback function has been loaded from an external library so we do not 141 * have type information 142 */ 143 QEMU_DISABLE_CFI 144 static void plugin_cb__udata(enum qemu_plugin_event ev) 145 { 146 struct qemu_plugin_cb *cb, *next; 147 148 switch (ev) { 149 case QEMU_PLUGIN_EV_ATEXIT: 150 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 151 qemu_plugin_udata_cb_t func = cb->f.udata; 152 153 func(cb->ctx->id, cb->udata); 154 } 155 break; 156 default: 157 g_assert_not_reached(); 158 } 159 } 160 161 static void 162 do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, 163 void *func, void *udata) 164 { 165 struct qemu_plugin_ctx *ctx; 166 167 QEMU_LOCK_GUARD(&plugin.lock); 168 ctx = plugin_id_to_ctx_locked(id); 169 /* if the plugin is on its way out, ignore this request */ 170 if (unlikely(ctx->uninstalling)) { 171 return; 172 } 173 if (func) { 174 struct qemu_plugin_cb *cb = ctx->callbacks[ev]; 175 176 if (cb) { 177 cb->f.generic = func; 178 cb->udata = udata; 179 } else { 180 cb = g_new(struct qemu_plugin_cb, 1); 181 cb->ctx = ctx; 182 cb->f.generic = func; 183 cb->udata = udata; 184 ctx->callbacks[ev] = cb; 185 QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry); 186 if (!test_bit(ev, plugin.mask)) { 187 set_bit(ev, plugin.mask); 188 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, 189 NULL); 190 } 191 } 192 } else { 193 plugin_unregister_cb__locked(ctx, ev); 194 } 195 } 196 197 void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, 198 void *func) 199 { 200 do_plugin_register_cb(id, ev, func, NULL); 201 } 202 203 void 204 plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev, 205 void *func, void *udata) 206 { 207 do_plugin_register_cb(id, ev, func, udata); 208 } 209 210 CPUPluginState *qemu_plugin_create_vcpu_state(void) 211 { 212 return g_new0(CPUPluginState, 1); 213 } 214 215 static void plugin_grow_scoreboards__locked(CPUState *cpu) 216 { 217 if (cpu->cpu_index < plugin.scoreboard_alloc_size) { 218 return; 219 } 220 221 bool need_realloc = FALSE; 222 while (cpu->cpu_index >= plugin.scoreboard_alloc_size) { 223 plugin.scoreboard_alloc_size *= 2; 224 need_realloc = TRUE; 225 } 226 227 228 if (!need_realloc || QLIST_EMPTY(&plugin.scoreboards)) { 229 /* nothing to do, we just updated sizes for future scoreboards */ 230 return; 231 } 232 233 /* cpus must be stopped, as tb might still use an existing scoreboard. */ 234 start_exclusive(); 235 struct qemu_plugin_scoreboard *score; 236 QLIST_FOREACH(score, &plugin.scoreboards, entry) { 237 g_array_set_size(score->data, plugin.scoreboard_alloc_size); 238 } 239 /* force all tb to be flushed, as scoreboard pointers were changed. */ 240 tb_flush(cpu); 241 end_exclusive(); 242 } 243 244 void qemu_plugin_vcpu_init_hook(CPUState *cpu) 245 { 246 bool success; 247 248 qemu_rec_mutex_lock(&plugin.lock); 249 plugin.num_vcpus = MAX(plugin.num_vcpus, cpu->cpu_index + 1); 250 plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL); 251 success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index, 252 &cpu->cpu_index); 253 g_assert(success); 254 plugin_grow_scoreboards__locked(cpu); 255 qemu_rec_mutex_unlock(&plugin.lock); 256 257 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT); 258 } 259 260 void qemu_plugin_vcpu_exit_hook(CPUState *cpu) 261 { 262 bool success; 263 264 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT); 265 266 qemu_rec_mutex_lock(&plugin.lock); 267 success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index); 268 g_assert(success); 269 qemu_rec_mutex_unlock(&plugin.lock); 270 } 271 272 struct plugin_for_each_args { 273 struct qemu_plugin_ctx *ctx; 274 qemu_plugin_vcpu_simple_cb_t cb; 275 }; 276 277 static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata) 278 { 279 struct plugin_for_each_args *args = udata; 280 int cpu_index = *(int *)k; 281 282 args->cb(args->ctx->id, cpu_index); 283 } 284 285 void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id, 286 qemu_plugin_vcpu_simple_cb_t cb) 287 { 288 struct plugin_for_each_args args; 289 290 if (cb == NULL) { 291 return; 292 } 293 qemu_rec_mutex_lock(&plugin.lock); 294 args.ctx = plugin_id_to_ctx_locked(id); 295 args.cb = cb; 296 g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args); 297 qemu_rec_mutex_unlock(&plugin.lock); 298 } 299 300 /* Allocate and return a callback record */ 301 static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr) 302 { 303 GArray *cbs = *arr; 304 305 if (!cbs) { 306 cbs = g_array_sized_new(false, true, 307 sizeof(struct qemu_plugin_dyn_cb), 1); 308 *arr = cbs; 309 } 310 311 g_array_set_size(cbs, cbs->len + 1); 312 return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1); 313 } 314 315 static enum plugin_dyn_cb_type op_to_cb_type(enum qemu_plugin_op op) 316 { 317 switch (op) { 318 case QEMU_PLUGIN_INLINE_ADD_U64: 319 return PLUGIN_CB_INLINE_ADD_U64; 320 case QEMU_PLUGIN_INLINE_STORE_U64: 321 return PLUGIN_CB_INLINE_STORE_U64; 322 default: 323 g_assert_not_reached(); 324 } 325 } 326 327 void plugin_register_inline_op_on_entry(GArray **arr, 328 enum qemu_plugin_mem_rw rw, 329 enum qemu_plugin_op op, 330 qemu_plugin_u64 entry, 331 uint64_t imm) 332 { 333 struct qemu_plugin_dyn_cb *dyn_cb; 334 335 struct qemu_plugin_inline_cb inline_cb = { .rw = rw, 336 .entry = entry, 337 .imm = imm }; 338 dyn_cb = plugin_get_dyn_cb(arr); 339 dyn_cb->type = op_to_cb_type(op); 340 dyn_cb->inline_insn = inline_cb; 341 } 342 343 void plugin_register_dyn_cb__udata(GArray **arr, 344 qemu_plugin_vcpu_udata_cb_t cb, 345 enum qemu_plugin_cb_flags flags, 346 void *udata) 347 { 348 static TCGHelperInfo info[3] = { 349 [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG, 350 [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG, 351 /* 352 * Match qemu_plugin_vcpu_udata_cb_t: 353 * void (*)(uint32_t, void *) 354 */ 355 [0 ... 2].typemask = (dh_typemask(void, 0) | 356 dh_typemask(i32, 1) | 357 dh_typemask(ptr, 2)) 358 }; 359 assert((unsigned)flags < ARRAY_SIZE(info)); 360 361 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); 362 struct qemu_plugin_regular_cb regular_cb = { .f.vcpu_udata = cb, 363 .userp = udata, 364 .info = &info[flags] }; 365 dyn_cb->type = PLUGIN_CB_REGULAR; 366 dyn_cb->regular = regular_cb; 367 } 368 369 void plugin_register_dyn_cond_cb__udata(GArray **arr, 370 qemu_plugin_vcpu_udata_cb_t cb, 371 enum qemu_plugin_cb_flags flags, 372 enum qemu_plugin_cond cond, 373 qemu_plugin_u64 entry, 374 uint64_t imm, 375 void *udata) 376 { 377 static TCGHelperInfo info[3] = { 378 [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG, 379 [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG, 380 /* 381 * Match qemu_plugin_vcpu_udata_cb_t: 382 * void (*)(uint32_t, void *) 383 */ 384 [0 ... 2].typemask = (dh_typemask(void, 0) | 385 dh_typemask(i32, 1) | 386 dh_typemask(ptr, 2)) 387 }; 388 assert((unsigned)flags < ARRAY_SIZE(info)); 389 390 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); 391 struct qemu_plugin_conditional_cb cond_cb = { .userp = udata, 392 .f.vcpu_udata = cb, 393 .cond = cond, 394 .entry = entry, 395 .imm = imm, 396 .info = &info[flags] }; 397 dyn_cb->type = PLUGIN_CB_COND; 398 dyn_cb->cond = cond_cb; 399 } 400 401 void plugin_register_vcpu_mem_cb(GArray **arr, 402 void *cb, 403 enum qemu_plugin_cb_flags flags, 404 enum qemu_plugin_mem_rw rw, 405 void *udata) 406 { 407 /* 408 * Expect that the underlying type for enum qemu_plugin_meminfo_t 409 * is either int32_t or uint32_t, aka int or unsigned int. 410 */ 411 QEMU_BUILD_BUG_ON( 412 !__builtin_types_compatible_p(qemu_plugin_meminfo_t, uint32_t) && 413 !__builtin_types_compatible_p(qemu_plugin_meminfo_t, int32_t)); 414 415 static TCGHelperInfo info[3] = { 416 [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG, 417 [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG, 418 /* 419 * Match qemu_plugin_vcpu_mem_cb_t: 420 * void (*)(uint32_t, qemu_plugin_meminfo_t, uint64_t, void *) 421 */ 422 [0 ... 2].typemask = 423 (dh_typemask(void, 0) | 424 dh_typemask(i32, 1) | 425 (__builtin_types_compatible_p(qemu_plugin_meminfo_t, uint32_t) 426 ? dh_typemask(i32, 2) : dh_typemask(s32, 2)) | 427 dh_typemask(i64, 3) | 428 dh_typemask(ptr, 4)) 429 }; 430 assert((unsigned)flags < ARRAY_SIZE(info)); 431 432 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); 433 struct qemu_plugin_regular_cb regular_cb = { .userp = udata, 434 .rw = rw, 435 .f.vcpu_mem = cb, 436 .info = &info[flags] }; 437 dyn_cb->type = PLUGIN_CB_MEM_REGULAR; 438 dyn_cb->regular = regular_cb; 439 } 440 441 /* 442 * Disable CFI checks. 443 * The callback function has been loaded from an external library so we do not 444 * have type information 445 */ 446 QEMU_DISABLE_CFI 447 void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb) 448 { 449 struct qemu_plugin_cb *cb, *next; 450 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS; 451 452 /* no plugin_state->event_mask check here; caller should have checked */ 453 454 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 455 qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans; 456 457 func(cb->ctx->id, tb); 458 } 459 } 460 461 /* 462 * Disable CFI checks. 463 * The callback function has been loaded from an external library so we do not 464 * have type information 465 */ 466 QEMU_DISABLE_CFI 467 void 468 qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2, 469 uint64_t a3, uint64_t a4, uint64_t a5, 470 uint64_t a6, uint64_t a7, uint64_t a8) 471 { 472 struct qemu_plugin_cb *cb, *next; 473 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL; 474 475 if (!test_bit(ev, cpu->plugin_state->event_mask)) { 476 return; 477 } 478 479 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 480 qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall; 481 482 func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8); 483 } 484 } 485 486 /* 487 * Disable CFI checks. 488 * The callback function has been loaded from an external library so we do not 489 * have type information 490 */ 491 QEMU_DISABLE_CFI 492 void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) 493 { 494 struct qemu_plugin_cb *cb, *next; 495 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET; 496 497 if (!test_bit(ev, cpu->plugin_state->event_mask)) { 498 return; 499 } 500 501 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 502 qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret; 503 504 func(cb->ctx->id, cpu->cpu_index, num, ret); 505 } 506 } 507 508 void qemu_plugin_vcpu_idle_cb(CPUState *cpu) 509 { 510 /* idle and resume cb may be called before init, ignore in this case */ 511 if (cpu->cpu_index < plugin.num_vcpus) { 512 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE); 513 } 514 } 515 516 void qemu_plugin_vcpu_resume_cb(CPUState *cpu) 517 { 518 if (cpu->cpu_index < plugin.num_vcpus) { 519 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME); 520 } 521 } 522 523 void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id, 524 qemu_plugin_vcpu_simple_cb_t cb) 525 { 526 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb); 527 } 528 529 void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id, 530 qemu_plugin_vcpu_simple_cb_t cb) 531 { 532 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb); 533 } 534 535 void qemu_plugin_register_flush_cb(qemu_plugin_id_t id, 536 qemu_plugin_simple_cb_t cb) 537 { 538 plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb); 539 } 540 541 static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp) 542 { 543 g_array_free((GArray *) p, true); 544 return true; 545 } 546 547 void qemu_plugin_flush_cb(void) 548 { 549 qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL); 550 qht_reset(&plugin.dyn_cb_arr_ht); 551 552 plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH); 553 } 554 555 void exec_inline_op(enum plugin_dyn_cb_type type, 556 struct qemu_plugin_inline_cb *cb, 557 int cpu_index) 558 { 559 char *ptr = cb->entry.score->data->data; 560 size_t elem_size = g_array_get_element_size( 561 cb->entry.score->data); 562 size_t offset = cb->entry.offset; 563 uint64_t *val = (uint64_t *)(ptr + offset + cpu_index * elem_size); 564 565 switch (type) { 566 case PLUGIN_CB_INLINE_ADD_U64: 567 *val += cb->imm; 568 break; 569 case PLUGIN_CB_INLINE_STORE_U64: 570 *val = cb->imm; 571 break; 572 default: 573 g_assert_not_reached(); 574 } 575 } 576 577 void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, 578 MemOpIdx oi, enum qemu_plugin_mem_rw rw) 579 { 580 GArray *arr = cpu->neg.plugin_mem_cbs; 581 size_t i; 582 583 if (arr == NULL) { 584 return; 585 } 586 for (i = 0; i < arr->len; i++) { 587 struct qemu_plugin_dyn_cb *cb = 588 &g_array_index(arr, struct qemu_plugin_dyn_cb, i); 589 590 switch (cb->type) { 591 case PLUGIN_CB_MEM_REGULAR: 592 if (rw && cb->regular.rw) { 593 cb->regular.f.vcpu_mem(cpu->cpu_index, 594 make_plugin_meminfo(oi, rw), 595 vaddr, cb->regular.userp); 596 } 597 break; 598 case PLUGIN_CB_INLINE_ADD_U64: 599 case PLUGIN_CB_INLINE_STORE_U64: 600 if (rw && cb->inline_insn.rw) { 601 exec_inline_op(cb->type, &cb->inline_insn, cpu->cpu_index); 602 } 603 break; 604 default: 605 g_assert_not_reached(); 606 } 607 } 608 } 609 610 void qemu_plugin_atexit_cb(void) 611 { 612 plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT); 613 } 614 615 void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id, 616 qemu_plugin_udata_cb_t cb, 617 void *udata) 618 { 619 plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata); 620 } 621 622 /* 623 * Handle exit from linux-user. Unlike the normal atexit() mechanism 624 * we need to handle the clean-up manually as it's possible threads 625 * are still running. We need to remove all callbacks from code 626 * generation, flush the current translations and then we can safely 627 * trigger the exit callbacks. 628 */ 629 630 void qemu_plugin_user_exit(void) 631 { 632 enum qemu_plugin_event ev; 633 CPUState *cpu; 634 635 /* 636 * Locking order: we must acquire locks in an order that is consistent 637 * with the one in fork_start(). That is: 638 * - start_exclusive(), which acquires qemu_cpu_list_lock, 639 * must be called before acquiring plugin.lock. 640 * - tb_flush(), which acquires mmap_lock(), must be called 641 * while plugin.lock is not held. 642 */ 643 start_exclusive(); 644 645 qemu_rec_mutex_lock(&plugin.lock); 646 /* un-register all callbacks except the final AT_EXIT one */ 647 for (ev = 0; ev < QEMU_PLUGIN_EV_MAX; ev++) { 648 if (ev != QEMU_PLUGIN_EV_ATEXIT) { 649 struct qemu_plugin_cb *cb, *next; 650 651 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 652 plugin_unregister_cb__locked(cb->ctx, ev); 653 } 654 } 655 } 656 CPU_FOREACH(cpu) { 657 qemu_plugin_disable_mem_helpers(cpu); 658 } 659 qemu_rec_mutex_unlock(&plugin.lock); 660 661 tb_flush(current_cpu); 662 end_exclusive(); 663 664 /* now it's safe to handle the exit case */ 665 qemu_plugin_atexit_cb(); 666 } 667 668 /* 669 * Helpers for *-user to ensure locks are sane across fork() events. 670 */ 671 672 void qemu_plugin_user_prefork_lock(void) 673 { 674 qemu_rec_mutex_lock(&plugin.lock); 675 } 676 677 void qemu_plugin_user_postfork(bool is_child) 678 { 679 if (is_child) { 680 /* should we just reset via plugin_init? */ 681 qemu_rec_mutex_init(&plugin.lock); 682 } else { 683 qemu_rec_mutex_unlock(&plugin.lock); 684 } 685 } 686 687 static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp) 688 { 689 return ap == bp; 690 } 691 692 static void __attribute__((__constructor__)) plugin_init(void) 693 { 694 int i; 695 696 for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) { 697 QLIST_INIT(&plugin.cb_lists[i]); 698 } 699 qemu_rec_mutex_init(&plugin.lock); 700 plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal); 701 plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal); 702 QLIST_INIT(&plugin.scoreboards); 703 plugin.scoreboard_alloc_size = 16; /* avoid frequent reallocation */ 704 QTAILQ_INIT(&plugin.ctxs); 705 qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16, 706 QHT_MODE_AUTO_RESIZE); 707 atexit(qemu_plugin_atexit_cb); 708 } 709 710 int plugin_num_vcpus(void) 711 { 712 return plugin.num_vcpus; 713 } 714 715 struct qemu_plugin_scoreboard *plugin_scoreboard_new(size_t element_size) 716 { 717 struct qemu_plugin_scoreboard *score = 718 g_malloc0(sizeof(struct qemu_plugin_scoreboard)); 719 score->data = g_array_new(FALSE, TRUE, element_size); 720 g_array_set_size(score->data, plugin.scoreboard_alloc_size); 721 722 qemu_rec_mutex_lock(&plugin.lock); 723 QLIST_INSERT_HEAD(&plugin.scoreboards, score, entry); 724 qemu_rec_mutex_unlock(&plugin.lock); 725 726 return score; 727 } 728 729 void plugin_scoreboard_free(struct qemu_plugin_scoreboard *score) 730 { 731 qemu_rec_mutex_lock(&plugin.lock); 732 QLIST_REMOVE(score, entry); 733 qemu_rec_mutex_unlock(&plugin.lock); 734 735 g_array_free(score->data, TRUE); 736 g_free(score); 737 } 738