1 /* 2 * plugin-gen.c - TCG-related bits of plugin infrastructure 3 * 4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org> 5 * License: GNU GPL, version 2 or later. 6 * See the COPYING file in the top-level directory. 7 * 8 * We support instrumentation at an instruction granularity. That is, 9 * if a plugin wants to instrument the memory accesses performed by a 10 * particular instruction, it can just do that instead of instrumenting 11 * all memory accesses. Thus, in order to do this we first have to 12 * translate a TB, so that plugins can decide what/where to instrument. 13 * 14 * Injecting the desired instrumentation could be done with a second 15 * translation pass that combined the instrumentation requests, but that 16 * would be ugly and inefficient since we would decode the guest code twice. 17 * Instead, during TB translation we add "empty" instrumentation calls for all 18 * possible instrumentation events, and then once we collect the instrumentation 19 * requests from plugins, we either "fill in" those empty events or remove them 20 * if they have no requests. 21 * 22 * When "filling in" an event we first copy the empty callback's TCG ops. This 23 * might seem unnecessary, but it is done to support an arbitrary number 24 * of callbacks per event. Take for example a regular instruction callback. 25 * We first generate a callback to an empty helper function. Then, if two 26 * plugins register one callback each for this instruction, we make two copies 27 * of the TCG ops generated for the empty callback, substituting the function 28 * pointer that points to the empty helper function with the plugins' desired 29 * callback functions. After that we remove the empty callback's ops. 30 * 31 * Note that the location in TCGOp.args[] of the pointer to a helper function 32 * varies across different guest and host architectures. Instead of duplicating 33 * the logic that figures this out, we rely on the fact that the empty 34 * callbacks point to empty functions that are unique pointers in the program. 35 * Thus, to find the right location we just have to look for a match in 36 * TCGOp.args[]. This is the main reason why we first copy an empty callback's 37 * TCG ops and then fill them in; regardless of whether we have one or many 38 * callbacks for that event, the logic to add all of them is the same. 39 * 40 * When generating more than one callback per event, we make a small 41 * optimization to avoid generating redundant operations. For instance, for the 42 * second and all subsequent callbacks of an event, we do not need to reload the 43 * CPU's index into a TCG temp, since the first callback did it already. 44 */ 45 #include "qemu/osdep.h" 46 #include "cpu.h" 47 #include "tcg/tcg.h" 48 #include "tcg/tcg-temp-internal.h" 49 #include "tcg/tcg-op.h" 50 #include "exec/exec-all.h" 51 #include "exec/plugin-gen.h" 52 #include "exec/translator.h" 53 #include "exec/helper-proto-common.h" 54 55 #define HELPER_H "accel/tcg/plugin-helpers.h" 56 #include "exec/helper-info.c.inc" 57 #undef HELPER_H 58 59 #ifdef CONFIG_SOFTMMU 60 # define CONFIG_SOFTMMU_GATE 1 61 #else 62 # define CONFIG_SOFTMMU_GATE 0 63 #endif 64 65 /* 66 * plugin_cb_start TCG op args[]: 67 * 0: enum plugin_gen_from 68 * 1: enum plugin_gen_cb 69 * 2: set to 1 for mem callback that is a write, 0 otherwise. 70 */ 71 72 enum plugin_gen_from { 73 PLUGIN_GEN_FROM_TB, 74 PLUGIN_GEN_FROM_INSN, 75 PLUGIN_GEN_FROM_MEM, 76 PLUGIN_GEN_AFTER_INSN, 77 PLUGIN_GEN_N_FROMS, 78 }; 79 80 enum plugin_gen_cb { 81 PLUGIN_GEN_CB_UDATA, 82 PLUGIN_GEN_CB_INLINE, 83 PLUGIN_GEN_CB_MEM, 84 PLUGIN_GEN_ENABLE_MEM_HELPER, 85 PLUGIN_GEN_DISABLE_MEM_HELPER, 86 PLUGIN_GEN_N_CBS, 87 }; 88 89 /* 90 * These helpers are stubs that get dynamically switched out for calls 91 * direct to the plugin if they are subscribed to. 92 */ 93 void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata) 94 { } 95 96 void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index, 97 qemu_plugin_meminfo_t info, uint64_t vaddr, 98 void *userdata) 99 { } 100 101 static void gen_empty_udata_cb(void) 102 { 103 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); 104 TCGv_ptr udata = tcg_temp_ebb_new_ptr(); 105 106 tcg_gen_movi_ptr(udata, 0); 107 tcg_gen_ld_i32(cpu_index, tcg_env, 108 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); 109 gen_helper_plugin_vcpu_udata_cb(cpu_index, udata); 110 111 tcg_temp_free_ptr(udata); 112 tcg_temp_free_i32(cpu_index); 113 } 114 115 /* 116 * For now we only support addi_i64. 117 * When we support more ops, we can generate one empty inline cb for each. 118 */ 119 static void gen_empty_inline_cb(void) 120 { 121 TCGv_i64 val = tcg_temp_ebb_new_i64(); 122 TCGv_ptr ptr = tcg_temp_ebb_new_ptr(); 123 124 tcg_gen_movi_ptr(ptr, 0); 125 tcg_gen_ld_i64(val, ptr, 0); 126 /* pass an immediate != 0 so that it doesn't get optimized away */ 127 tcg_gen_addi_i64(val, val, 0xdeadface); 128 tcg_gen_st_i64(val, ptr, 0); 129 tcg_temp_free_ptr(ptr); 130 tcg_temp_free_i64(val); 131 } 132 133 static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info) 134 { 135 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); 136 TCGv_i32 meminfo = tcg_temp_ebb_new_i32(); 137 TCGv_ptr udata = tcg_temp_ebb_new_ptr(); 138 139 tcg_gen_movi_i32(meminfo, info); 140 tcg_gen_movi_ptr(udata, 0); 141 tcg_gen_ld_i32(cpu_index, tcg_env, 142 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); 143 144 gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata); 145 146 tcg_temp_free_ptr(udata); 147 tcg_temp_free_i32(meminfo); 148 tcg_temp_free_i32(cpu_index); 149 } 150 151 /* 152 * Share the same function for enable/disable. When enabling, the NULL 153 * pointer will be overwritten later. 154 */ 155 static void gen_empty_mem_helper(void) 156 { 157 TCGv_ptr ptr = tcg_temp_ebb_new_ptr(); 158 159 tcg_gen_movi_ptr(ptr, 0); 160 tcg_gen_st_ptr(ptr, tcg_env, offsetof(CPUState, plugin_mem_cbs) - 161 offsetof(ArchCPU, env)); 162 tcg_temp_free_ptr(ptr); 163 } 164 165 static void gen_plugin_cb_start(enum plugin_gen_from from, 166 enum plugin_gen_cb type, unsigned wr) 167 { 168 tcg_gen_plugin_cb_start(from, type, wr); 169 } 170 171 static void gen_wrapped(enum plugin_gen_from from, 172 enum plugin_gen_cb type, void (*func)(void)) 173 { 174 gen_plugin_cb_start(from, type, 0); 175 func(); 176 tcg_gen_plugin_cb_end(); 177 } 178 179 static void plugin_gen_empty_callback(enum plugin_gen_from from) 180 { 181 switch (from) { 182 case PLUGIN_GEN_AFTER_INSN: 183 gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER, 184 gen_empty_mem_helper); 185 break; 186 case PLUGIN_GEN_FROM_INSN: 187 /* 188 * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being 189 * the first callback of an instruction 190 */ 191 gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER, 192 gen_empty_mem_helper); 193 /* fall through */ 194 case PLUGIN_GEN_FROM_TB: 195 gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb); 196 gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb); 197 break; 198 default: 199 g_assert_not_reached(); 200 } 201 } 202 203 void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info) 204 { 205 enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); 206 207 gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_MEM, rw); 208 gen_empty_mem_cb(addr, info); 209 tcg_gen_plugin_cb_end(); 210 211 gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_INLINE, rw); 212 gen_empty_inline_cb(); 213 tcg_gen_plugin_cb_end(); 214 } 215 216 static TCGOp *find_op(TCGOp *op, TCGOpcode opc) 217 { 218 while (op) { 219 if (op->opc == opc) { 220 return op; 221 } 222 op = QTAILQ_NEXT(op, link); 223 } 224 return NULL; 225 } 226 227 static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end) 228 { 229 TCGOp *ret = QTAILQ_NEXT(end, link); 230 231 QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link); 232 return ret; 233 } 234 235 /* remove all ops until (and including) plugin_cb_end */ 236 static TCGOp *rm_ops(TCGOp *op) 237 { 238 TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end); 239 240 tcg_debug_assert(end_op); 241 return rm_ops_range(op, end_op); 242 } 243 244 static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op) 245 { 246 TCGOp *old_op = QTAILQ_NEXT(*begin_op, link); 247 unsigned nargs = old_op->nargs; 248 249 *begin_op = old_op; 250 op = tcg_op_insert_after(tcg_ctx, op, old_op->opc, nargs); 251 memcpy(op->args, old_op->args, sizeof(op->args[0]) * nargs); 252 253 return op; 254 } 255 256 static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc) 257 { 258 op = copy_op_nocheck(begin_op, op); 259 tcg_debug_assert((*begin_op)->opc == opc); 260 return op; 261 } 262 263 static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) 264 { 265 if (UINTPTR_MAX == UINT32_MAX) { 266 /* mov_i32 */ 267 op = copy_op(begin_op, op, INDEX_op_mov_i32); 268 op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr)); 269 } else { 270 /* mov_i64 */ 271 op = copy_op(begin_op, op, INDEX_op_mov_i64); 272 op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr)); 273 } 274 return op; 275 } 276 277 static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op) 278 { 279 if (TCG_TARGET_REG_BITS == 32) { 280 /* 2x ld_i32 */ 281 op = copy_op(begin_op, op, INDEX_op_ld_i32); 282 op = copy_op(begin_op, op, INDEX_op_ld_i32); 283 } else { 284 /* ld_i64 */ 285 op = copy_op(begin_op, op, INDEX_op_ld_i64); 286 } 287 return op; 288 } 289 290 static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op) 291 { 292 if (TCG_TARGET_REG_BITS == 32) { 293 /* 2x st_i32 */ 294 op = copy_op(begin_op, op, INDEX_op_st_i32); 295 op = copy_op(begin_op, op, INDEX_op_st_i32); 296 } else { 297 /* st_i64 */ 298 op = copy_op(begin_op, op, INDEX_op_st_i64); 299 } 300 return op; 301 } 302 303 static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v) 304 { 305 if (TCG_TARGET_REG_BITS == 32) { 306 /* all 32-bit backends must implement add2_i32 */ 307 g_assert(TCG_TARGET_HAS_add2_i32); 308 op = copy_op(begin_op, op, INDEX_op_add2_i32); 309 op->args[4] = tcgv_i32_arg(tcg_constant_i32(v)); 310 op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32)); 311 } else { 312 op = copy_op(begin_op, op, INDEX_op_add_i64); 313 op->args[2] = tcgv_i64_arg(tcg_constant_i64(v)); 314 } 315 return op; 316 } 317 318 static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op) 319 { 320 if (UINTPTR_MAX == UINT32_MAX) { 321 /* st_i32 */ 322 op = copy_op(begin_op, op, INDEX_op_st_i32); 323 } else { 324 /* st_i64 */ 325 op = copy_st_i64(begin_op, op); 326 } 327 return op; 328 } 329 330 static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *func, int *cb_idx) 331 { 332 TCGOp *old_op; 333 int func_idx; 334 335 /* copy all ops until the call */ 336 do { 337 op = copy_op_nocheck(begin_op, op); 338 } while (op->opc != INDEX_op_call); 339 340 /* fill in the op call */ 341 old_op = *begin_op; 342 TCGOP_CALLI(op) = TCGOP_CALLI(old_op); 343 TCGOP_CALLO(op) = TCGOP_CALLO(old_op); 344 tcg_debug_assert(op->life == 0); 345 346 func_idx = TCGOP_CALLO(op) + TCGOP_CALLI(op); 347 *cb_idx = func_idx; 348 op->args[func_idx] = (uintptr_t)func; 349 350 return op; 351 } 352 353 /* 354 * When we append/replace ops here we are sensitive to changing patterns of 355 * TCGOps generated by the tcg_gen_FOO calls when we generated the 356 * empty callbacks. This will assert very quickly in a debug build as 357 * we assert the ops we are replacing are the correct ones. 358 */ 359 static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb, 360 TCGOp *begin_op, TCGOp *op, int *cb_idx) 361 { 362 /* const_ptr */ 363 op = copy_const_ptr(&begin_op, op, cb->userp); 364 365 /* copy the ld_i32, but note that we only have to copy it once */ 366 if (*cb_idx == -1) { 367 op = copy_op(&begin_op, op, INDEX_op_ld_i32); 368 } else { 369 begin_op = QTAILQ_NEXT(begin_op, link); 370 tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); 371 } 372 373 /* call */ 374 op = copy_call(&begin_op, op, cb->f.vcpu_udata, cb_idx); 375 376 return op; 377 } 378 379 static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb, 380 TCGOp *begin_op, TCGOp *op, 381 int *unused) 382 { 383 /* const_ptr */ 384 op = copy_const_ptr(&begin_op, op, cb->userp); 385 386 /* ld_i64 */ 387 op = copy_ld_i64(&begin_op, op); 388 389 /* add_i64 */ 390 op = copy_add_i64(&begin_op, op, cb->inline_insn.imm); 391 392 /* st_i64 */ 393 op = copy_st_i64(&begin_op, op); 394 395 return op; 396 } 397 398 static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb, 399 TCGOp *begin_op, TCGOp *op, int *cb_idx) 400 { 401 enum plugin_gen_cb type = begin_op->args[1]; 402 403 tcg_debug_assert(type == PLUGIN_GEN_CB_MEM); 404 405 /* const_i32 == mov_i32 ("info", so it remains as is) */ 406 op = copy_op(&begin_op, op, INDEX_op_mov_i32); 407 408 /* const_ptr */ 409 op = copy_const_ptr(&begin_op, op, cb->userp); 410 411 /* copy the ld_i32, but note that we only have to copy it once */ 412 if (*cb_idx == -1) { 413 op = copy_op(&begin_op, op, INDEX_op_ld_i32); 414 } else { 415 begin_op = QTAILQ_NEXT(begin_op, link); 416 tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); 417 } 418 419 if (type == PLUGIN_GEN_CB_MEM) { 420 /* call */ 421 op = copy_call(&begin_op, op, cb->f.vcpu_udata, cb_idx); 422 } 423 424 return op; 425 } 426 427 typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb, 428 TCGOp *begin_op, TCGOp *op, int *intp); 429 typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb); 430 431 static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb) 432 { 433 return true; 434 } 435 436 static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb) 437 { 438 int w; 439 440 w = op->args[2]; 441 return !!(cb->rw & (w + 1)); 442 } 443 444 static void inject_cb_type(const GArray *cbs, TCGOp *begin_op, 445 inject_fn inject, op_ok_fn ok) 446 { 447 TCGOp *end_op; 448 TCGOp *op; 449 int cb_idx = -1; 450 int i; 451 452 if (!cbs || cbs->len == 0) { 453 rm_ops(begin_op); 454 return; 455 } 456 457 end_op = find_op(begin_op, INDEX_op_plugin_cb_end); 458 tcg_debug_assert(end_op); 459 460 op = end_op; 461 for (i = 0; i < cbs->len; i++) { 462 struct qemu_plugin_dyn_cb *cb = 463 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i); 464 465 if (!ok(begin_op, cb)) { 466 continue; 467 } 468 op = inject(cb, begin_op, op, &cb_idx); 469 } 470 rm_ops_range(begin_op, end_op); 471 } 472 473 static void 474 inject_udata_cb(const GArray *cbs, TCGOp *begin_op) 475 { 476 inject_cb_type(cbs, begin_op, append_udata_cb, op_ok); 477 } 478 479 static void 480 inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok) 481 { 482 inject_cb_type(cbs, begin_op, append_inline_cb, ok); 483 } 484 485 static void 486 inject_mem_cb(const GArray *cbs, TCGOp *begin_op) 487 { 488 inject_cb_type(cbs, begin_op, append_mem_cb, op_rw); 489 } 490 491 /* we could change the ops in place, but we can reuse more code by copying */ 492 static void inject_mem_helper(TCGOp *begin_op, GArray *arr) 493 { 494 TCGOp *orig_op = begin_op; 495 TCGOp *end_op; 496 TCGOp *op; 497 498 end_op = find_op(begin_op, INDEX_op_plugin_cb_end); 499 tcg_debug_assert(end_op); 500 501 /* const ptr */ 502 op = copy_const_ptr(&begin_op, end_op, arr); 503 504 /* st_ptr */ 505 op = copy_st_ptr(&begin_op, op); 506 507 rm_ops_range(orig_op, end_op); 508 } 509 510 /* 511 * Tracking memory accesses performed from helpers requires extra work. 512 * If an instruction is emulated with helpers, we do two things: 513 * (1) copy the CB descriptors, and keep track of it so that they can be 514 * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so 515 * that we can read them at run-time (i.e. when the helper executes). 516 * This run-time access is performed from qemu_plugin_vcpu_mem_cb. 517 * 518 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it 519 * is possible that the code we generate after the instruction is 520 * dead, we also add checks before generating tb_exit etc. 521 */ 522 static void inject_mem_enable_helper(struct qemu_plugin_tb *ptb, 523 struct qemu_plugin_insn *plugin_insn, 524 TCGOp *begin_op) 525 { 526 GArray *cbs[2]; 527 GArray *arr; 528 size_t n_cbs, i; 529 530 cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR]; 531 cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE]; 532 533 n_cbs = 0; 534 for (i = 0; i < ARRAY_SIZE(cbs); i++) { 535 n_cbs += cbs[i]->len; 536 } 537 538 plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs; 539 if (likely(!plugin_insn->mem_helper)) { 540 rm_ops(begin_op); 541 return; 542 } 543 ptb->mem_helper = true; 544 545 arr = g_array_sized_new(false, false, 546 sizeof(struct qemu_plugin_dyn_cb), n_cbs); 547 548 for (i = 0; i < ARRAY_SIZE(cbs); i++) { 549 g_array_append_vals(arr, cbs[i]->data, cbs[i]->len); 550 } 551 552 qemu_plugin_add_dyn_cb_arr(arr); 553 inject_mem_helper(begin_op, arr); 554 } 555 556 static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn, 557 TCGOp *begin_op) 558 { 559 if (likely(!plugin_insn->mem_helper)) { 560 rm_ops(begin_op); 561 return; 562 } 563 inject_mem_helper(begin_op, NULL); 564 } 565 566 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */ 567 void plugin_gen_disable_mem_helpers(void) 568 { 569 /* 570 * We could emit the clearing unconditionally and be done. However, this can 571 * be wasteful if for instance plugins don't track memory accesses, or if 572 * most TBs don't use helpers. Instead, emit the clearing iff the TB calls 573 * helpers that might access guest memory. 574 * 575 * Note: we do not reset plugin_tb->mem_helper here; a TB might have several 576 * exit points, and we want to emit the clearing from all of them. 577 */ 578 if (!tcg_ctx->plugin_tb->mem_helper) { 579 return; 580 } 581 tcg_gen_st_ptr(tcg_constant_ptr(NULL), tcg_env, 582 offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env)); 583 } 584 585 static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb, 586 TCGOp *begin_op) 587 { 588 inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op); 589 } 590 591 static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb, 592 TCGOp *begin_op) 593 { 594 inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok); 595 } 596 597 static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb, 598 TCGOp *begin_op, int insn_idx) 599 { 600 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); 601 602 inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op); 603 } 604 605 static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb, 606 TCGOp *begin_op, int insn_idx) 607 { 608 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); 609 inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], 610 begin_op, op_ok); 611 } 612 613 static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb, 614 TCGOp *begin_op, int insn_idx) 615 { 616 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); 617 inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op); 618 } 619 620 static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb, 621 TCGOp *begin_op, int insn_idx) 622 { 623 const GArray *cbs; 624 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); 625 626 cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE]; 627 inject_inline_cb(cbs, begin_op, op_rw); 628 } 629 630 static void plugin_gen_enable_mem_helper(struct qemu_plugin_tb *ptb, 631 TCGOp *begin_op, int insn_idx) 632 { 633 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); 634 inject_mem_enable_helper(ptb, insn, begin_op); 635 } 636 637 static void plugin_gen_disable_mem_helper(struct qemu_plugin_tb *ptb, 638 TCGOp *begin_op, int insn_idx) 639 { 640 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); 641 inject_mem_disable_helper(insn, begin_op); 642 } 643 644 /* #define DEBUG_PLUGIN_GEN_OPS */ 645 static void pr_ops(void) 646 { 647 #ifdef DEBUG_PLUGIN_GEN_OPS 648 TCGOp *op; 649 int i = 0; 650 651 QTAILQ_FOREACH(op, &tcg_ctx->ops, link) { 652 const char *name = ""; 653 const char *type = ""; 654 655 if (op->opc == INDEX_op_plugin_cb_start) { 656 switch (op->args[0]) { 657 case PLUGIN_GEN_FROM_TB: 658 name = "tb"; 659 break; 660 case PLUGIN_GEN_FROM_INSN: 661 name = "insn"; 662 break; 663 case PLUGIN_GEN_FROM_MEM: 664 name = "mem"; 665 break; 666 case PLUGIN_GEN_AFTER_INSN: 667 name = "after insn"; 668 break; 669 default: 670 break; 671 } 672 switch (op->args[1]) { 673 case PLUGIN_GEN_CB_UDATA: 674 type = "udata"; 675 break; 676 case PLUGIN_GEN_CB_INLINE: 677 type = "inline"; 678 break; 679 case PLUGIN_GEN_CB_MEM: 680 type = "mem"; 681 break; 682 case PLUGIN_GEN_ENABLE_MEM_HELPER: 683 type = "enable mem helper"; 684 break; 685 case PLUGIN_GEN_DISABLE_MEM_HELPER: 686 type = "disable mem helper"; 687 break; 688 default: 689 break; 690 } 691 } 692 printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type); 693 i++; 694 } 695 #endif 696 } 697 698 static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb) 699 { 700 TCGOp *op; 701 int insn_idx = -1; 702 703 pr_ops(); 704 705 QTAILQ_FOREACH(op, &tcg_ctx->ops, link) { 706 switch (op->opc) { 707 case INDEX_op_insn_start: 708 insn_idx++; 709 break; 710 case INDEX_op_plugin_cb_start: 711 { 712 enum plugin_gen_from from = op->args[0]; 713 enum plugin_gen_cb type = op->args[1]; 714 715 switch (from) { 716 case PLUGIN_GEN_FROM_TB: 717 { 718 g_assert(insn_idx == -1); 719 720 switch (type) { 721 case PLUGIN_GEN_CB_UDATA: 722 plugin_gen_tb_udata(plugin_tb, op); 723 break; 724 case PLUGIN_GEN_CB_INLINE: 725 plugin_gen_tb_inline(plugin_tb, op); 726 break; 727 default: 728 g_assert_not_reached(); 729 } 730 break; 731 } 732 case PLUGIN_GEN_FROM_INSN: 733 { 734 g_assert(insn_idx >= 0); 735 736 switch (type) { 737 case PLUGIN_GEN_CB_UDATA: 738 plugin_gen_insn_udata(plugin_tb, op, insn_idx); 739 break; 740 case PLUGIN_GEN_CB_INLINE: 741 plugin_gen_insn_inline(plugin_tb, op, insn_idx); 742 break; 743 case PLUGIN_GEN_ENABLE_MEM_HELPER: 744 plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx); 745 break; 746 default: 747 g_assert_not_reached(); 748 } 749 break; 750 } 751 case PLUGIN_GEN_FROM_MEM: 752 { 753 g_assert(insn_idx >= 0); 754 755 switch (type) { 756 case PLUGIN_GEN_CB_MEM: 757 plugin_gen_mem_regular(plugin_tb, op, insn_idx); 758 break; 759 case PLUGIN_GEN_CB_INLINE: 760 plugin_gen_mem_inline(plugin_tb, op, insn_idx); 761 break; 762 default: 763 g_assert_not_reached(); 764 } 765 766 break; 767 } 768 case PLUGIN_GEN_AFTER_INSN: 769 { 770 g_assert(insn_idx >= 0); 771 772 switch (type) { 773 case PLUGIN_GEN_DISABLE_MEM_HELPER: 774 plugin_gen_disable_mem_helper(plugin_tb, op, insn_idx); 775 break; 776 default: 777 g_assert_not_reached(); 778 } 779 break; 780 } 781 default: 782 g_assert_not_reached(); 783 } 784 break; 785 } 786 default: 787 /* plugins don't care about any other ops */ 788 break; 789 } 790 } 791 pr_ops(); 792 } 793 794 bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db, 795 bool mem_only) 796 { 797 bool ret = false; 798 799 if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) { 800 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; 801 int i; 802 803 /* reset callbacks */ 804 for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) { 805 if (ptb->cbs[i]) { 806 g_array_set_size(ptb->cbs[i], 0); 807 } 808 } 809 ptb->n = 0; 810 811 ret = true; 812 813 ptb->vaddr = db->pc_first; 814 ptb->vaddr2 = -1; 815 ptb->haddr1 = db->host_addr[0]; 816 ptb->haddr2 = NULL; 817 ptb->mem_only = mem_only; 818 ptb->mem_helper = false; 819 820 plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB); 821 } 822 823 tcg_ctx->plugin_insn = NULL; 824 825 return ret; 826 } 827 828 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db) 829 { 830 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; 831 struct qemu_plugin_insn *pinsn; 832 833 pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next); 834 tcg_ctx->plugin_insn = pinsn; 835 plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN); 836 837 /* 838 * Detect page crossing to get the new host address. 839 * Note that we skip this when haddr1 == NULL, e.g. when we're 840 * fetching instructions from a region not backed by RAM. 841 */ 842 if (ptb->haddr1 == NULL) { 843 pinsn->haddr = NULL; 844 } else if (is_same_page(db, db->pc_next)) { 845 pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr; 846 } else { 847 if (ptb->vaddr2 == -1) { 848 ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first); 849 get_page_addr_code_hostp(cpu_env(cpu), ptb->vaddr2, &ptb->haddr2); 850 } 851 pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2; 852 } 853 } 854 855 void plugin_gen_insn_end(void) 856 { 857 plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN); 858 } 859 860 /* 861 * There are cases where we never get to finalise a translation - for 862 * example a page fault during translation. As a result we shouldn't 863 * do any clean-up here and make sure things are reset in 864 * plugin_gen_tb_start. 865 */ 866 void plugin_gen_tb_end(CPUState *cpu, size_t num_insns) 867 { 868 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; 869 870 /* translator may have removed instructions, update final count */ 871 g_assert(num_insns <= ptb->n); 872 ptb->n = num_insns; 873 874 /* collect instrumentation requests */ 875 qemu_plugin_tb_trans_cb(cpu, ptb); 876 877 /* inject the instrumentation at the appropriate places */ 878 plugin_gen_inject(ptb); 879 } 880