1 /* 2 * QEMU Plugin Core code 3 * 4 * This is the core code that deals with injecting instrumentation into the code 5 * 6 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> 7 * Copyright (C) 2019, Linaro 8 * 9 * License: GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 * SPDX-License-Identifier: GPL-2.0-or-later 13 */ 14 #include "qemu/osdep.h" 15 #include "qemu/error-report.h" 16 #include "qemu/config-file.h" 17 #include "qapi/error.h" 18 #include "qemu/lockable.h" 19 #include "qemu/option.h" 20 #include "qemu/plugin.h" 21 #include "qemu/queue.h" 22 #include "qemu/rcu_queue.h" 23 #include "qemu/xxhash.h" 24 #include "qemu/rcu.h" 25 #include "hw/core/cpu.h" 26 27 #include "exec/exec-all.h" 28 #include "exec/tb-flush.h" 29 #include "tcg/tcg.h" 30 #include "tcg/tcg-op.h" 31 #include "plugin.h" 32 33 struct qemu_plugin_cb { 34 struct qemu_plugin_ctx *ctx; 35 union qemu_plugin_cb_sig f; 36 void *udata; 37 QLIST_ENTRY(qemu_plugin_cb) entry; 38 }; 39 40 struct qemu_plugin_state plugin; 41 42 struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id) 43 { 44 struct qemu_plugin_ctx *ctx; 45 qemu_plugin_id_t *id_p; 46 47 id_p = g_hash_table_lookup(plugin.id_ht, &id); 48 ctx = container_of(id_p, struct qemu_plugin_ctx, id); 49 if (ctx == NULL) { 50 error_report("plugin: invalid plugin id %" PRIu64, id); 51 abort(); 52 } 53 return ctx; 54 } 55 56 static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data) 57 { 58 bitmap_copy(cpu->plugin_state->event_mask, 59 &data.host_ulong, QEMU_PLUGIN_EV_MAX); 60 tcg_flush_jmp_cache(cpu); 61 } 62 63 static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata) 64 { 65 CPUState *cpu = container_of(k, CPUState, cpu_index); 66 run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask); 67 68 if (DEVICE(cpu)->realized) { 69 async_run_on_cpu(cpu, plugin_cpu_update__async, mask); 70 } else { 71 plugin_cpu_update__async(cpu, mask); 72 } 73 } 74 75 void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx, 76 enum qemu_plugin_event ev) 77 { 78 struct qemu_plugin_cb *cb = ctx->callbacks[ev]; 79 80 if (cb == NULL) { 81 return; 82 } 83 QLIST_REMOVE_RCU(cb, entry); 84 g_free(cb); 85 ctx->callbacks[ev] = NULL; 86 if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) { 87 clear_bit(ev, plugin.mask); 88 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL); 89 } 90 } 91 92 /* 93 * Disable CFI checks. 94 * The callback function has been loaded from an external library so we do not 95 * have type information 96 */ 97 QEMU_DISABLE_CFI 98 static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev) 99 { 100 struct qemu_plugin_cb *cb, *next; 101 102 switch (ev) { 103 case QEMU_PLUGIN_EV_VCPU_INIT: 104 case QEMU_PLUGIN_EV_VCPU_EXIT: 105 case QEMU_PLUGIN_EV_VCPU_IDLE: 106 case QEMU_PLUGIN_EV_VCPU_RESUME: 107 /* iterate safely; plugins might uninstall themselves at any time */ 108 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 109 qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple; 110 111 func(cb->ctx->id, cpu->cpu_index); 112 } 113 break; 114 default: 115 g_assert_not_reached(); 116 } 117 } 118 119 /* 120 * Disable CFI checks. 121 * The callback function has been loaded from an external library so we do not 122 * have type information 123 */ 124 QEMU_DISABLE_CFI 125 static void plugin_cb__simple(enum qemu_plugin_event ev) 126 { 127 struct qemu_plugin_cb *cb, *next; 128 129 switch (ev) { 130 case QEMU_PLUGIN_EV_FLUSH: 131 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 132 qemu_plugin_simple_cb_t func = cb->f.simple; 133 134 func(cb->ctx->id); 135 } 136 break; 137 default: 138 g_assert_not_reached(); 139 } 140 } 141 142 /* 143 * Disable CFI checks. 144 * The callback function has been loaded from an external library so we do not 145 * have type information 146 */ 147 QEMU_DISABLE_CFI 148 static void plugin_cb__udata(enum qemu_plugin_event ev) 149 { 150 struct qemu_plugin_cb *cb, *next; 151 152 switch (ev) { 153 case QEMU_PLUGIN_EV_ATEXIT: 154 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 155 qemu_plugin_udata_cb_t func = cb->f.udata; 156 157 func(cb->ctx->id, cb->udata); 158 } 159 break; 160 default: 161 g_assert_not_reached(); 162 } 163 } 164 165 static void 166 do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, 167 void *func, void *udata) 168 { 169 struct qemu_plugin_ctx *ctx; 170 171 QEMU_LOCK_GUARD(&plugin.lock); 172 ctx = plugin_id_to_ctx_locked(id); 173 /* if the plugin is on its way out, ignore this request */ 174 if (unlikely(ctx->uninstalling)) { 175 return; 176 } 177 if (func) { 178 struct qemu_plugin_cb *cb = ctx->callbacks[ev]; 179 180 if (cb) { 181 cb->f.generic = func; 182 cb->udata = udata; 183 } else { 184 cb = g_new(struct qemu_plugin_cb, 1); 185 cb->ctx = ctx; 186 cb->f.generic = func; 187 cb->udata = udata; 188 ctx->callbacks[ev] = cb; 189 QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry); 190 if (!test_bit(ev, plugin.mask)) { 191 set_bit(ev, plugin.mask); 192 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, 193 NULL); 194 } 195 } 196 } else { 197 plugin_unregister_cb__locked(ctx, ev); 198 } 199 } 200 201 void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, 202 void *func) 203 { 204 do_plugin_register_cb(id, ev, func, NULL); 205 } 206 207 void 208 plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev, 209 void *func, void *udata) 210 { 211 do_plugin_register_cb(id, ev, func, udata); 212 } 213 214 CPUPluginState *qemu_plugin_create_vcpu_state(void) 215 { 216 return g_new0(CPUPluginState, 1); 217 } 218 219 static void plugin_grow_scoreboards__locked(CPUState *cpu) 220 { 221 if (cpu->cpu_index < plugin.scoreboard_alloc_size) { 222 return; 223 } 224 225 bool need_realloc = FALSE; 226 while (cpu->cpu_index >= plugin.scoreboard_alloc_size) { 227 plugin.scoreboard_alloc_size *= 2; 228 need_realloc = TRUE; 229 } 230 231 232 if (!need_realloc || QLIST_EMPTY(&plugin.scoreboards)) { 233 /* nothing to do, we just updated sizes for future scoreboards */ 234 return; 235 } 236 237 /* cpus must be stopped, as tb might still use an existing scoreboard. */ 238 start_exclusive(); 239 struct qemu_plugin_scoreboard *score; 240 QLIST_FOREACH(score, &plugin.scoreboards, entry) { 241 g_array_set_size(score->data, plugin.scoreboard_alloc_size); 242 } 243 /* force all tb to be flushed, as scoreboard pointers were changed. */ 244 tb_flush(cpu); 245 end_exclusive(); 246 } 247 248 void qemu_plugin_vcpu_init_hook(CPUState *cpu) 249 { 250 bool success; 251 252 qemu_rec_mutex_lock(&plugin.lock); 253 plugin.num_vcpus = MAX(plugin.num_vcpus, cpu->cpu_index + 1); 254 plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL); 255 success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index, 256 &cpu->cpu_index); 257 g_assert(success); 258 plugin_grow_scoreboards__locked(cpu); 259 qemu_rec_mutex_unlock(&plugin.lock); 260 261 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT); 262 } 263 264 void qemu_plugin_vcpu_exit_hook(CPUState *cpu) 265 { 266 bool success; 267 268 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT); 269 270 qemu_rec_mutex_lock(&plugin.lock); 271 success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index); 272 g_assert(success); 273 qemu_rec_mutex_unlock(&plugin.lock); 274 } 275 276 struct plugin_for_each_args { 277 struct qemu_plugin_ctx *ctx; 278 qemu_plugin_vcpu_simple_cb_t cb; 279 }; 280 281 static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata) 282 { 283 struct plugin_for_each_args *args = udata; 284 int cpu_index = *(int *)k; 285 286 args->cb(args->ctx->id, cpu_index); 287 } 288 289 void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id, 290 qemu_plugin_vcpu_simple_cb_t cb) 291 { 292 struct plugin_for_each_args args; 293 294 if (cb == NULL) { 295 return; 296 } 297 qemu_rec_mutex_lock(&plugin.lock); 298 args.ctx = plugin_id_to_ctx_locked(id); 299 args.cb = cb; 300 g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args); 301 qemu_rec_mutex_unlock(&plugin.lock); 302 } 303 304 /* Allocate and return a callback record */ 305 static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr) 306 { 307 GArray *cbs = *arr; 308 309 if (!cbs) { 310 cbs = g_array_sized_new(false, true, 311 sizeof(struct qemu_plugin_dyn_cb), 1); 312 *arr = cbs; 313 } 314 315 g_array_set_size(cbs, cbs->len + 1); 316 return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1); 317 } 318 319 static enum plugin_dyn_cb_type op_to_cb_type(enum qemu_plugin_op op) 320 { 321 switch (op) { 322 case QEMU_PLUGIN_INLINE_ADD_U64: 323 return PLUGIN_CB_INLINE_ADD_U64; 324 case QEMU_PLUGIN_INLINE_STORE_U64: 325 return PLUGIN_CB_INLINE_STORE_U64; 326 default: 327 g_assert_not_reached(); 328 } 329 } 330 331 void plugin_register_inline_op_on_entry(GArray **arr, 332 enum qemu_plugin_mem_rw rw, 333 enum qemu_plugin_op op, 334 qemu_plugin_u64 entry, 335 uint64_t imm) 336 { 337 struct qemu_plugin_dyn_cb *dyn_cb; 338 339 struct qemu_plugin_inline_cb inline_cb = { .rw = rw, 340 .entry = entry, 341 .imm = imm }; 342 dyn_cb = plugin_get_dyn_cb(arr); 343 dyn_cb->type = op_to_cb_type(op); 344 dyn_cb->inline_insn = inline_cb; 345 } 346 347 void plugin_register_dyn_cb__udata(GArray **arr, 348 qemu_plugin_vcpu_udata_cb_t cb, 349 enum qemu_plugin_cb_flags flags, 350 void *udata) 351 { 352 static TCGHelperInfo info[3] = { 353 [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG, 354 [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG, 355 /* 356 * Match qemu_plugin_vcpu_udata_cb_t: 357 * void (*)(uint32_t, void *) 358 */ 359 [0 ... 2].typemask = (dh_typemask(void, 0) | 360 dh_typemask(i32, 1) | 361 dh_typemask(ptr, 2)) 362 }; 363 assert((unsigned)flags < ARRAY_SIZE(info)); 364 365 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); 366 struct qemu_plugin_regular_cb regular_cb = { .f.vcpu_udata = cb, 367 .userp = udata, 368 .info = &info[flags] }; 369 dyn_cb->type = PLUGIN_CB_REGULAR; 370 dyn_cb->regular = regular_cb; 371 } 372 373 void plugin_register_dyn_cond_cb__udata(GArray **arr, 374 qemu_plugin_vcpu_udata_cb_t cb, 375 enum qemu_plugin_cb_flags flags, 376 enum qemu_plugin_cond cond, 377 qemu_plugin_u64 entry, 378 uint64_t imm, 379 void *udata) 380 { 381 static TCGHelperInfo info[3] = { 382 [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG, 383 [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG, 384 /* 385 * Match qemu_plugin_vcpu_udata_cb_t: 386 * void (*)(uint32_t, void *) 387 */ 388 [0 ... 2].typemask = (dh_typemask(void, 0) | 389 dh_typemask(i32, 1) | 390 dh_typemask(ptr, 2)) 391 }; 392 assert((unsigned)flags < ARRAY_SIZE(info)); 393 394 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); 395 struct qemu_plugin_conditional_cb cond_cb = { .userp = udata, 396 .f.vcpu_udata = cb, 397 .cond = cond, 398 .entry = entry, 399 .imm = imm, 400 .info = &info[flags] }; 401 dyn_cb->type = PLUGIN_CB_COND; 402 dyn_cb->cond = cond_cb; 403 } 404 405 void plugin_register_vcpu_mem_cb(GArray **arr, 406 void *cb, 407 enum qemu_plugin_cb_flags flags, 408 enum qemu_plugin_mem_rw rw, 409 void *udata) 410 { 411 /* 412 * Expect that the underlying type for enum qemu_plugin_meminfo_t 413 * is either int32_t or uint32_t, aka int or unsigned int. 414 */ 415 QEMU_BUILD_BUG_ON( 416 !__builtin_types_compatible_p(qemu_plugin_meminfo_t, uint32_t) && 417 !__builtin_types_compatible_p(qemu_plugin_meminfo_t, int32_t)); 418 419 static TCGHelperInfo info[3] = { 420 [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG, 421 [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG, 422 /* 423 * Match qemu_plugin_vcpu_mem_cb_t: 424 * void (*)(uint32_t, qemu_plugin_meminfo_t, uint64_t, void *) 425 */ 426 [0 ... 2].typemask = 427 (dh_typemask(void, 0) | 428 dh_typemask(i32, 1) | 429 (__builtin_types_compatible_p(qemu_plugin_meminfo_t, uint32_t) 430 ? dh_typemask(i32, 2) : dh_typemask(s32, 2)) | 431 dh_typemask(i64, 3) | 432 dh_typemask(ptr, 4)) 433 }; 434 assert((unsigned)flags < ARRAY_SIZE(info)); 435 436 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); 437 struct qemu_plugin_regular_cb regular_cb = { .userp = udata, 438 .rw = rw, 439 .f.vcpu_mem = cb, 440 .info = &info[flags] }; 441 dyn_cb->type = PLUGIN_CB_MEM_REGULAR; 442 dyn_cb->regular = regular_cb; 443 } 444 445 /* 446 * Disable CFI checks. 447 * The callback function has been loaded from an external library so we do not 448 * have type information 449 */ 450 QEMU_DISABLE_CFI 451 void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb) 452 { 453 struct qemu_plugin_cb *cb, *next; 454 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS; 455 456 /* no plugin_state->event_mask check here; caller should have checked */ 457 458 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 459 qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans; 460 461 func(cb->ctx->id, tb); 462 } 463 } 464 465 /* 466 * Disable CFI checks. 467 * The callback function has been loaded from an external library so we do not 468 * have type information 469 */ 470 QEMU_DISABLE_CFI 471 void 472 qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2, 473 uint64_t a3, uint64_t a4, uint64_t a5, 474 uint64_t a6, uint64_t a7, uint64_t a8) 475 { 476 struct qemu_plugin_cb *cb, *next; 477 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL; 478 479 if (!test_bit(ev, cpu->plugin_state->event_mask)) { 480 return; 481 } 482 483 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 484 qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall; 485 486 func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8); 487 } 488 } 489 490 /* 491 * Disable CFI checks. 492 * The callback function has been loaded from an external library so we do not 493 * have type information 494 */ 495 QEMU_DISABLE_CFI 496 void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) 497 { 498 struct qemu_plugin_cb *cb, *next; 499 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET; 500 501 if (!test_bit(ev, cpu->plugin_state->event_mask)) { 502 return; 503 } 504 505 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 506 qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret; 507 508 func(cb->ctx->id, cpu->cpu_index, num, ret); 509 } 510 } 511 512 void qemu_plugin_vcpu_idle_cb(CPUState *cpu) 513 { 514 /* idle and resume cb may be called before init, ignore in this case */ 515 if (cpu->cpu_index < plugin.num_vcpus) { 516 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE); 517 } 518 } 519 520 void qemu_plugin_vcpu_resume_cb(CPUState *cpu) 521 { 522 if (cpu->cpu_index < plugin.num_vcpus) { 523 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME); 524 } 525 } 526 527 void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id, 528 qemu_plugin_vcpu_simple_cb_t cb) 529 { 530 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb); 531 } 532 533 void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id, 534 qemu_plugin_vcpu_simple_cb_t cb) 535 { 536 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb); 537 } 538 539 void qemu_plugin_register_flush_cb(qemu_plugin_id_t id, 540 qemu_plugin_simple_cb_t cb) 541 { 542 plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb); 543 } 544 545 static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp) 546 { 547 g_array_free((GArray *) p, true); 548 return true; 549 } 550 551 void qemu_plugin_flush_cb(void) 552 { 553 qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL); 554 qht_reset(&plugin.dyn_cb_arr_ht); 555 556 plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH); 557 } 558 559 void exec_inline_op(enum plugin_dyn_cb_type type, 560 struct qemu_plugin_inline_cb *cb, 561 int cpu_index) 562 { 563 char *ptr = cb->entry.score->data->data; 564 size_t elem_size = g_array_get_element_size( 565 cb->entry.score->data); 566 size_t offset = cb->entry.offset; 567 uint64_t *val = (uint64_t *)(ptr + offset + cpu_index * elem_size); 568 569 switch (type) { 570 case PLUGIN_CB_INLINE_ADD_U64: 571 *val += cb->imm; 572 break; 573 case PLUGIN_CB_INLINE_STORE_U64: 574 *val = cb->imm; 575 break; 576 default: 577 g_assert_not_reached(); 578 } 579 } 580 581 void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, 582 MemOpIdx oi, enum qemu_plugin_mem_rw rw) 583 { 584 GArray *arr = cpu->neg.plugin_mem_cbs; 585 size_t i; 586 587 if (arr == NULL) { 588 return; 589 } 590 for (i = 0; i < arr->len; i++) { 591 struct qemu_plugin_dyn_cb *cb = 592 &g_array_index(arr, struct qemu_plugin_dyn_cb, i); 593 594 switch (cb->type) { 595 case PLUGIN_CB_MEM_REGULAR: 596 if (rw && cb->regular.rw) { 597 cb->regular.f.vcpu_mem(cpu->cpu_index, 598 make_plugin_meminfo(oi, rw), 599 vaddr, cb->regular.userp); 600 } 601 break; 602 case PLUGIN_CB_INLINE_ADD_U64: 603 case PLUGIN_CB_INLINE_STORE_U64: 604 if (rw && cb->inline_insn.rw) { 605 exec_inline_op(cb->type, &cb->inline_insn, cpu->cpu_index); 606 } 607 break; 608 default: 609 g_assert_not_reached(); 610 } 611 } 612 } 613 614 void qemu_plugin_atexit_cb(void) 615 { 616 plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT); 617 } 618 619 void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id, 620 qemu_plugin_udata_cb_t cb, 621 void *udata) 622 { 623 plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata); 624 } 625 626 /* 627 * Handle exit from linux-user. Unlike the normal atexit() mechanism 628 * we need to handle the clean-up manually as it's possible threads 629 * are still running. We need to remove all callbacks from code 630 * generation, flush the current translations and then we can safely 631 * trigger the exit callbacks. 632 */ 633 634 void qemu_plugin_user_exit(void) 635 { 636 enum qemu_plugin_event ev; 637 CPUState *cpu; 638 639 /* 640 * Locking order: we must acquire locks in an order that is consistent 641 * with the one in fork_start(). That is: 642 * - start_exclusive(), which acquires qemu_cpu_list_lock, 643 * must be called before acquiring plugin.lock. 644 * - tb_flush(), which acquires mmap_lock(), must be called 645 * while plugin.lock is not held. 646 */ 647 start_exclusive(); 648 649 qemu_rec_mutex_lock(&plugin.lock); 650 /* un-register all callbacks except the final AT_EXIT one */ 651 for (ev = 0; ev < QEMU_PLUGIN_EV_MAX; ev++) { 652 if (ev != QEMU_PLUGIN_EV_ATEXIT) { 653 struct qemu_plugin_cb *cb, *next; 654 655 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 656 plugin_unregister_cb__locked(cb->ctx, ev); 657 } 658 } 659 } 660 CPU_FOREACH(cpu) { 661 qemu_plugin_disable_mem_helpers(cpu); 662 } 663 qemu_rec_mutex_unlock(&plugin.lock); 664 665 tb_flush(current_cpu); 666 end_exclusive(); 667 668 /* now it's safe to handle the exit case */ 669 qemu_plugin_atexit_cb(); 670 } 671 672 /* 673 * Helpers for *-user to ensure locks are sane across fork() events. 674 */ 675 676 void qemu_plugin_user_prefork_lock(void) 677 { 678 qemu_rec_mutex_lock(&plugin.lock); 679 } 680 681 void qemu_plugin_user_postfork(bool is_child) 682 { 683 if (is_child) { 684 /* should we just reset via plugin_init? */ 685 qemu_rec_mutex_init(&plugin.lock); 686 } else { 687 qemu_rec_mutex_unlock(&plugin.lock); 688 } 689 } 690 691 static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp) 692 { 693 return ap == bp; 694 } 695 696 static void __attribute__((__constructor__)) plugin_init(void) 697 { 698 int i; 699 700 for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) { 701 QLIST_INIT(&plugin.cb_lists[i]); 702 } 703 qemu_rec_mutex_init(&plugin.lock); 704 plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal); 705 plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal); 706 QLIST_INIT(&plugin.scoreboards); 707 plugin.scoreboard_alloc_size = 16; /* avoid frequent reallocation */ 708 QTAILQ_INIT(&plugin.ctxs); 709 qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16, 710 QHT_MODE_AUTO_RESIZE); 711 atexit(qemu_plugin_atexit_cb); 712 } 713 714 int plugin_num_vcpus(void) 715 { 716 return plugin.num_vcpus; 717 } 718 719 struct qemu_plugin_scoreboard *plugin_scoreboard_new(size_t element_size) 720 { 721 struct qemu_plugin_scoreboard *score = 722 g_malloc0(sizeof(struct qemu_plugin_scoreboard)); 723 score->data = g_array_new(FALSE, TRUE, element_size); 724 g_array_set_size(score->data, plugin.scoreboard_alloc_size); 725 726 qemu_rec_mutex_lock(&plugin.lock); 727 QLIST_INSERT_HEAD(&plugin.scoreboards, score, entry); 728 qemu_rec_mutex_unlock(&plugin.lock); 729 730 return score; 731 } 732 733 void plugin_scoreboard_free(struct qemu_plugin_scoreboard *score) 734 { 735 qemu_rec_mutex_lock(&plugin.lock); 736 QLIST_REMOVE(score, entry); 737 qemu_rec_mutex_unlock(&plugin.lock); 738 739 g_array_free(score->data, TRUE); 740 g_free(score); 741 } 742