1 /* 2 * plugin-gen.c - TCG-related bits of plugin infrastructure 3 * 4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org> 5 * License: GNU GPL, version 2 or later. 6 * See the COPYING file in the top-level directory. 7 * 8 * We support instrumentation at an instruction granularity. That is, 9 * if a plugin wants to instrument the memory accesses performed by a 10 * particular instruction, it can just do that instead of instrumenting 11 * all memory accesses. Thus, in order to do this we first have to 12 * translate a TB, so that plugins can decide what/where to instrument. 13 * 14 * Injecting the desired instrumentation could be done with a second 15 * translation pass that combined the instrumentation requests, but that 16 * would be ugly and inefficient since we would decode the guest code twice. 17 * Instead, during TB translation we add "empty" instrumentation calls for all 18 * possible instrumentation events, and then once we collect the instrumentation 19 * requests from plugins, we either "fill in" those empty events or remove them 20 * if they have no requests. 21 * 22 * When "filling in" an event we first copy the empty callback's TCG ops. This 23 * might seem unnecessary, but it is done to support an arbitrary number 24 * of callbacks per event. Take for example a regular instruction callback. 25 * We first generate a callback to an empty helper function. Then, if two 26 * plugins register one callback each for this instruction, we make two copies 27 * of the TCG ops generated for the empty callback, substituting the function 28 * pointer that points to the empty helper function with the plugins' desired 29 * callback functions. After that we remove the empty callback's ops. 30 * 31 * Note that the location in TCGOp.args[] of the pointer to a helper function 32 * varies across different guest and host architectures. Instead of duplicating 33 * the logic that figures this out, we rely on the fact that the empty 34 * callbacks point to empty functions that are unique pointers in the program. 35 * Thus, to find the right location we just have to look for a match in 36 * TCGOp.args[]. This is the main reason why we first copy an empty callback's 37 * TCG ops and then fill them in; regardless of whether we have one or many 38 * callbacks for that event, the logic to add all of them is the same. 39 * 40 * When generating more than one callback per event, we make a small 41 * optimization to avoid generating redundant operations. For instance, for the 42 * second and all subsequent callbacks of an event, we do not need to reload the 43 * CPU's index into a TCG temp, since the first callback did it already. 44 */ 45 #include "qemu/osdep.h" 46 #include "tcg/tcg.h" 47 #include "tcg/tcg-op.h" 48 #include "exec/exec-all.h" 49 #include "exec/plugin-gen.h" 50 #include "exec/translator.h" 51 52 #ifdef CONFIG_SOFTMMU 53 # define CONFIG_SOFTMMU_GATE 1 54 #else 55 # define CONFIG_SOFTMMU_GATE 0 56 #endif 57 58 /* 59 * plugin_cb_start TCG op args[]: 60 * 0: enum plugin_gen_from 61 * 1: enum plugin_gen_cb 62 * 2: set to 1 for mem callback that is a write, 0 otherwise. 63 */ 64 65 enum plugin_gen_from { 66 PLUGIN_GEN_FROM_TB, 67 PLUGIN_GEN_FROM_INSN, 68 PLUGIN_GEN_FROM_MEM, 69 PLUGIN_GEN_AFTER_INSN, 70 PLUGIN_GEN_N_FROMS, 71 }; 72 73 enum plugin_gen_cb { 74 PLUGIN_GEN_CB_UDATA, 75 PLUGIN_GEN_CB_INLINE, 76 PLUGIN_GEN_CB_MEM, 77 PLUGIN_GEN_ENABLE_MEM_HELPER, 78 PLUGIN_GEN_DISABLE_MEM_HELPER, 79 PLUGIN_GEN_N_CBS, 80 }; 81 82 /* 83 * These helpers are stubs that get dynamically switched out for calls 84 * direct to the plugin if they are subscribed to. 85 */ 86 void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata) 87 { } 88 89 void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index, 90 qemu_plugin_meminfo_t info, uint64_t vaddr, 91 void *userdata) 92 { } 93 94 static void do_gen_mem_cb(TCGv vaddr, uint32_t info) 95 { 96 TCGv_i32 cpu_index = tcg_temp_new_i32(); 97 TCGv_i32 meminfo = tcg_const_i32(info); 98 TCGv_i64 vaddr64 = tcg_temp_new_i64(); 99 TCGv_ptr udata = tcg_const_ptr(NULL); 100 101 tcg_gen_ld_i32(cpu_index, cpu_env, 102 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); 103 tcg_gen_extu_tl_i64(vaddr64, vaddr); 104 105 gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata); 106 107 tcg_temp_free_ptr(udata); 108 tcg_temp_free_i64(vaddr64); 109 tcg_temp_free_i32(meminfo); 110 tcg_temp_free_i32(cpu_index); 111 } 112 113 static void gen_empty_udata_cb(void) 114 { 115 TCGv_i32 cpu_index = tcg_temp_new_i32(); 116 TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */ 117 118 tcg_gen_ld_i32(cpu_index, cpu_env, 119 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); 120 gen_helper_plugin_vcpu_udata_cb(cpu_index, udata); 121 122 tcg_temp_free_ptr(udata); 123 tcg_temp_free_i32(cpu_index); 124 } 125 126 /* 127 * For now we only support addi_i64. 128 * When we support more ops, we can generate one empty inline cb for each. 129 */ 130 static void gen_empty_inline_cb(void) 131 { 132 TCGv_i64 val = tcg_temp_new_i64(); 133 TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */ 134 135 tcg_gen_ld_i64(val, ptr, 0); 136 /* pass an immediate != 0 so that it doesn't get optimized away */ 137 tcg_gen_addi_i64(val, val, 0xdeadface); 138 tcg_gen_st_i64(val, ptr, 0); 139 tcg_temp_free_ptr(ptr); 140 tcg_temp_free_i64(val); 141 } 142 143 static void gen_empty_mem_cb(TCGv addr, uint32_t info) 144 { 145 do_gen_mem_cb(addr, info); 146 } 147 148 /* 149 * Share the same function for enable/disable. When enabling, the NULL 150 * pointer will be overwritten later. 151 */ 152 static void gen_empty_mem_helper(void) 153 { 154 TCGv_ptr ptr; 155 156 ptr = tcg_const_ptr(NULL); 157 tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - 158 offsetof(ArchCPU, env)); 159 tcg_temp_free_ptr(ptr); 160 } 161 162 static void gen_plugin_cb_start(enum plugin_gen_from from, 163 enum plugin_gen_cb type, unsigned wr) 164 { 165 tcg_gen_plugin_cb_start(from, type, wr); 166 } 167 168 static void gen_wrapped(enum plugin_gen_from from, 169 enum plugin_gen_cb type, void (*func)(void)) 170 { 171 gen_plugin_cb_start(from, type, 0); 172 func(); 173 tcg_gen_plugin_cb_end(); 174 } 175 176 static void plugin_gen_empty_callback(enum plugin_gen_from from) 177 { 178 switch (from) { 179 case PLUGIN_GEN_AFTER_INSN: 180 gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER, 181 gen_empty_mem_helper); 182 break; 183 case PLUGIN_GEN_FROM_INSN: 184 /* 185 * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being 186 * the first callback of an instruction 187 */ 188 gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER, 189 gen_empty_mem_helper); 190 /* fall through */ 191 case PLUGIN_GEN_FROM_TB: 192 gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb); 193 gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb); 194 break; 195 default: 196 g_assert_not_reached(); 197 } 198 } 199 200 union mem_gen_fn { 201 void (*mem_fn)(TCGv, uint32_t); 202 void (*inline_fn)(void); 203 }; 204 205 static void gen_mem_wrapped(enum plugin_gen_cb type, 206 const union mem_gen_fn *f, TCGv addr, 207 uint32_t info, bool is_mem) 208 { 209 enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); 210 211 gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw); 212 if (is_mem) { 213 f->mem_fn(addr, info); 214 } else { 215 f->inline_fn(); 216 } 217 tcg_gen_plugin_cb_end(); 218 } 219 220 void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) 221 { 222 union mem_gen_fn fn; 223 224 fn.mem_fn = gen_empty_mem_cb; 225 gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true); 226 227 fn.inline_fn = gen_empty_inline_cb; 228 gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false); 229 } 230 231 static TCGOp *find_op(TCGOp *op, TCGOpcode opc) 232 { 233 while (op) { 234 if (op->opc == opc) { 235 return op; 236 } 237 op = QTAILQ_NEXT(op, link); 238 } 239 return NULL; 240 } 241 242 static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end) 243 { 244 TCGOp *ret = QTAILQ_NEXT(end, link); 245 246 QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link); 247 return ret; 248 } 249 250 /* remove all ops until (and including) plugin_cb_end */ 251 static TCGOp *rm_ops(TCGOp *op) 252 { 253 TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end); 254 255 tcg_debug_assert(end_op); 256 return rm_ops_range(op, end_op); 257 } 258 259 static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op) 260 { 261 TCGOp *old_op = QTAILQ_NEXT(*begin_op, link); 262 unsigned nargs = old_op->nargs; 263 264 *begin_op = old_op; 265 op = tcg_op_insert_after(tcg_ctx, op, old_op->opc, nargs); 266 memcpy(op->args, old_op->args, sizeof(op->args[0]) * nargs); 267 268 return op; 269 } 270 271 static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc) 272 { 273 op = copy_op_nocheck(begin_op, op); 274 tcg_debug_assert((*begin_op)->opc == opc); 275 return op; 276 } 277 278 static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op) 279 { 280 if (TCG_TARGET_REG_BITS == 32) { 281 /* mov_i32 */ 282 op = copy_op(begin_op, op, INDEX_op_mov_i32); 283 /* mov_i32 w/ $0 */ 284 op = copy_op(begin_op, op, INDEX_op_mov_i32); 285 } else { 286 /* extu_i32_i64 */ 287 op = copy_op(begin_op, op, INDEX_op_extu_i32_i64); 288 } 289 return op; 290 } 291 292 static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op) 293 { 294 if (TCG_TARGET_REG_BITS == 32) { 295 /* 2x mov_i32 */ 296 op = copy_op(begin_op, op, INDEX_op_mov_i32); 297 op = copy_op(begin_op, op, INDEX_op_mov_i32); 298 } else { 299 /* mov_i64 */ 300 op = copy_op(begin_op, op, INDEX_op_mov_i64); 301 } 302 return op; 303 } 304 305 static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) 306 { 307 if (UINTPTR_MAX == UINT32_MAX) { 308 /* mov_i32 */ 309 op = copy_op(begin_op, op, INDEX_op_mov_i32); 310 op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr)); 311 } else { 312 /* mov_i64 */ 313 op = copy_op(begin_op, op, INDEX_op_mov_i64); 314 op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr)); 315 } 316 return op; 317 } 318 319 static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op) 320 { 321 if (TARGET_LONG_BITS == 32) { 322 /* extu_i32_i64 */ 323 op = copy_extu_i32_i64(begin_op, op); 324 } else { 325 /* mov_i64 */ 326 op = copy_mov_i64(begin_op, op); 327 } 328 return op; 329 } 330 331 static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op) 332 { 333 if (TCG_TARGET_REG_BITS == 32) { 334 /* 2x ld_i32 */ 335 op = copy_op(begin_op, op, INDEX_op_ld_i32); 336 op = copy_op(begin_op, op, INDEX_op_ld_i32); 337 } else { 338 /* ld_i64 */ 339 op = copy_op(begin_op, op, INDEX_op_ld_i64); 340 } 341 return op; 342 } 343 344 static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op) 345 { 346 if (TCG_TARGET_REG_BITS == 32) { 347 /* 2x st_i32 */ 348 op = copy_op(begin_op, op, INDEX_op_st_i32); 349 op = copy_op(begin_op, op, INDEX_op_st_i32); 350 } else { 351 /* st_i64 */ 352 op = copy_op(begin_op, op, INDEX_op_st_i64); 353 } 354 return op; 355 } 356 357 static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v) 358 { 359 if (TCG_TARGET_REG_BITS == 32) { 360 /* all 32-bit backends must implement add2_i32 */ 361 g_assert(TCG_TARGET_HAS_add2_i32); 362 op = copy_op(begin_op, op, INDEX_op_add2_i32); 363 op->args[4] = tcgv_i32_arg(tcg_constant_i32(v)); 364 op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32)); 365 } else { 366 op = copy_op(begin_op, op, INDEX_op_add_i64); 367 op->args[2] = tcgv_i64_arg(tcg_constant_i64(v)); 368 } 369 return op; 370 } 371 372 static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op) 373 { 374 if (UINTPTR_MAX == UINT32_MAX) { 375 /* st_i32 */ 376 op = copy_op(begin_op, op, INDEX_op_st_i32); 377 } else { 378 /* st_i64 */ 379 op = copy_st_i64(begin_op, op); 380 } 381 return op; 382 } 383 384 static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func, 385 void *func, int *cb_idx) 386 { 387 TCGOp *old_op; 388 int func_idx; 389 390 /* copy all ops until the call */ 391 do { 392 op = copy_op_nocheck(begin_op, op); 393 } while (op->opc != INDEX_op_call); 394 395 /* fill in the op call */ 396 old_op = *begin_op; 397 TCGOP_CALLI(op) = TCGOP_CALLI(old_op); 398 TCGOP_CALLO(op) = TCGOP_CALLO(old_op); 399 tcg_debug_assert(op->life == 0); 400 401 func_idx = TCGOP_CALLO(op) + TCGOP_CALLI(op); 402 *cb_idx = func_idx; 403 op->args[func_idx] = (uintptr_t)func; 404 405 return op; 406 } 407 408 /* 409 * When we append/replace ops here we are sensitive to changing patterns of 410 * TCGOps generated by the tcg_gen_FOO calls when we generated the 411 * empty callbacks. This will assert very quickly in a debug build as 412 * we assert the ops we are replacing are the correct ones. 413 */ 414 static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb, 415 TCGOp *begin_op, TCGOp *op, int *cb_idx) 416 { 417 /* const_ptr */ 418 op = copy_const_ptr(&begin_op, op, cb->userp); 419 420 /* copy the ld_i32, but note that we only have to copy it once */ 421 if (*cb_idx == -1) { 422 op = copy_op(&begin_op, op, INDEX_op_ld_i32); 423 } else { 424 begin_op = QTAILQ_NEXT(begin_op, link); 425 tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); 426 } 427 428 /* call */ 429 op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb), 430 cb->f.vcpu_udata, cb_idx); 431 432 return op; 433 } 434 435 static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb, 436 TCGOp *begin_op, TCGOp *op, 437 int *unused) 438 { 439 /* const_ptr */ 440 op = copy_const_ptr(&begin_op, op, cb->userp); 441 442 /* ld_i64 */ 443 op = copy_ld_i64(&begin_op, op); 444 445 /* add_i64 */ 446 op = copy_add_i64(&begin_op, op, cb->inline_insn.imm); 447 448 /* st_i64 */ 449 op = copy_st_i64(&begin_op, op); 450 451 return op; 452 } 453 454 static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb, 455 TCGOp *begin_op, TCGOp *op, int *cb_idx) 456 { 457 enum plugin_gen_cb type = begin_op->args[1]; 458 459 tcg_debug_assert(type == PLUGIN_GEN_CB_MEM); 460 461 /* const_i32 == mov_i32 ("info", so it remains as is) */ 462 op = copy_op(&begin_op, op, INDEX_op_mov_i32); 463 464 /* const_ptr */ 465 op = copy_const_ptr(&begin_op, op, cb->userp); 466 467 /* copy the ld_i32, but note that we only have to copy it once */ 468 if (*cb_idx == -1) { 469 op = copy_op(&begin_op, op, INDEX_op_ld_i32); 470 } else { 471 begin_op = QTAILQ_NEXT(begin_op, link); 472 tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); 473 } 474 475 /* extu_tl_i64 */ 476 op = copy_extu_tl_i64(&begin_op, op); 477 478 if (type == PLUGIN_GEN_CB_MEM) { 479 /* call */ 480 op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb), 481 cb->f.vcpu_udata, cb_idx); 482 } 483 484 return op; 485 } 486 487 typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb, 488 TCGOp *begin_op, TCGOp *op, int *intp); 489 typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb); 490 491 static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb) 492 { 493 return true; 494 } 495 496 static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb) 497 { 498 int w; 499 500 w = op->args[2]; 501 return !!(cb->rw & (w + 1)); 502 } 503 504 static void inject_cb_type(const GArray *cbs, TCGOp *begin_op, 505 inject_fn inject, op_ok_fn ok) 506 { 507 TCGOp *end_op; 508 TCGOp *op; 509 int cb_idx = -1; 510 int i; 511 512 if (!cbs || cbs->len == 0) { 513 rm_ops(begin_op); 514 return; 515 } 516 517 end_op = find_op(begin_op, INDEX_op_plugin_cb_end); 518 tcg_debug_assert(end_op); 519 520 op = end_op; 521 for (i = 0; i < cbs->len; i++) { 522 struct qemu_plugin_dyn_cb *cb = 523 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i); 524 525 if (!ok(begin_op, cb)) { 526 continue; 527 } 528 op = inject(cb, begin_op, op, &cb_idx); 529 } 530 rm_ops_range(begin_op, end_op); 531 } 532 533 static void 534 inject_udata_cb(const GArray *cbs, TCGOp *begin_op) 535 { 536 inject_cb_type(cbs, begin_op, append_udata_cb, op_ok); 537 } 538 539 static void 540 inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok) 541 { 542 inject_cb_type(cbs, begin_op, append_inline_cb, ok); 543 } 544 545 static void 546 inject_mem_cb(const GArray *cbs, TCGOp *begin_op) 547 { 548 inject_cb_type(cbs, begin_op, append_mem_cb, op_rw); 549 } 550 551 /* we could change the ops in place, but we can reuse more code by copying */ 552 static void inject_mem_helper(TCGOp *begin_op, GArray *arr) 553 { 554 TCGOp *orig_op = begin_op; 555 TCGOp *end_op; 556 TCGOp *op; 557 558 end_op = find_op(begin_op, INDEX_op_plugin_cb_end); 559 tcg_debug_assert(end_op); 560 561 /* const ptr */ 562 op = copy_const_ptr(&begin_op, end_op, arr); 563 564 /* st_ptr */ 565 op = copy_st_ptr(&begin_op, op); 566 567 rm_ops_range(orig_op, end_op); 568 } 569 570 /* 571 * Tracking memory accesses performed from helpers requires extra work. 572 * If an instruction is emulated with helpers, we do two things: 573 * (1) copy the CB descriptors, and keep track of it so that they can be 574 * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so 575 * that we can read them at run-time (i.e. when the helper executes). 576 * This run-time access is performed from qemu_plugin_vcpu_mem_cb. 577 * 578 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it 579 * is possible that the code we generate after the instruction is 580 * dead, we also add checks before generating tb_exit etc. 581 */ 582 static void inject_mem_enable_helper(struct qemu_plugin_tb *ptb, 583 struct qemu_plugin_insn *plugin_insn, 584 TCGOp *begin_op) 585 { 586 GArray *cbs[2]; 587 GArray *arr; 588 size_t n_cbs, i; 589 590 cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR]; 591 cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE]; 592 593 n_cbs = 0; 594 for (i = 0; i < ARRAY_SIZE(cbs); i++) { 595 n_cbs += cbs[i]->len; 596 } 597 598 plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs; 599 if (likely(!plugin_insn->mem_helper)) { 600 rm_ops(begin_op); 601 return; 602 } 603 ptb->mem_helper = true; 604 605 arr = g_array_sized_new(false, false, 606 sizeof(struct qemu_plugin_dyn_cb), n_cbs); 607 608 for (i = 0; i < ARRAY_SIZE(cbs); i++) { 609 g_array_append_vals(arr, cbs[i]->data, cbs[i]->len); 610 } 611 612 qemu_plugin_add_dyn_cb_arr(arr); 613 inject_mem_helper(begin_op, arr); 614 } 615 616 static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn, 617 TCGOp *begin_op) 618 { 619 if (likely(!plugin_insn->mem_helper)) { 620 rm_ops(begin_op); 621 return; 622 } 623 inject_mem_helper(begin_op, NULL); 624 } 625 626 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */ 627 void plugin_gen_disable_mem_helpers(void) 628 { 629 TCGv_ptr ptr; 630 631 /* 632 * We could emit the clearing unconditionally and be done. However, this can 633 * be wasteful if for instance plugins don't track memory accesses, or if 634 * most TBs don't use helpers. Instead, emit the clearing iff the TB calls 635 * helpers that might access guest memory. 636 * 637 * Note: we do not reset plugin_tb->mem_helper here; a TB might have several 638 * exit points, and we want to emit the clearing from all of them. 639 */ 640 if (!tcg_ctx->plugin_tb->mem_helper) { 641 return; 642 } 643 ptr = tcg_const_ptr(NULL); 644 tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - 645 offsetof(ArchCPU, env)); 646 tcg_temp_free_ptr(ptr); 647 } 648 649 static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb, 650 TCGOp *begin_op) 651 { 652 inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op); 653 } 654 655 static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb, 656 TCGOp *begin_op) 657 { 658 inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok); 659 } 660 661 static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb, 662 TCGOp *begin_op, int insn_idx) 663 { 664 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); 665 666 inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op); 667 } 668 669 static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb, 670 TCGOp *begin_op, int insn_idx) 671 { 672 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); 673 inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], 674 begin_op, op_ok); 675 } 676 677 static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb, 678 TCGOp *begin_op, int insn_idx) 679 { 680 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); 681 inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op); 682 } 683 684 static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb, 685 TCGOp *begin_op, int insn_idx) 686 { 687 const GArray *cbs; 688 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); 689 690 cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE]; 691 inject_inline_cb(cbs, begin_op, op_rw); 692 } 693 694 static void plugin_gen_enable_mem_helper(struct qemu_plugin_tb *ptb, 695 TCGOp *begin_op, int insn_idx) 696 { 697 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); 698 inject_mem_enable_helper(ptb, insn, begin_op); 699 } 700 701 static void plugin_gen_disable_mem_helper(struct qemu_plugin_tb *ptb, 702 TCGOp *begin_op, int insn_idx) 703 { 704 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); 705 inject_mem_disable_helper(insn, begin_op); 706 } 707 708 /* #define DEBUG_PLUGIN_GEN_OPS */ 709 static void pr_ops(void) 710 { 711 #ifdef DEBUG_PLUGIN_GEN_OPS 712 TCGOp *op; 713 int i = 0; 714 715 QTAILQ_FOREACH(op, &tcg_ctx->ops, link) { 716 const char *name = ""; 717 const char *type = ""; 718 719 if (op->opc == INDEX_op_plugin_cb_start) { 720 switch (op->args[0]) { 721 case PLUGIN_GEN_FROM_TB: 722 name = "tb"; 723 break; 724 case PLUGIN_GEN_FROM_INSN: 725 name = "insn"; 726 break; 727 case PLUGIN_GEN_FROM_MEM: 728 name = "mem"; 729 break; 730 case PLUGIN_GEN_AFTER_INSN: 731 name = "after insn"; 732 break; 733 default: 734 break; 735 } 736 switch (op->args[1]) { 737 case PLUGIN_GEN_CB_UDATA: 738 type = "udata"; 739 break; 740 case PLUGIN_GEN_CB_INLINE: 741 type = "inline"; 742 break; 743 case PLUGIN_GEN_CB_MEM: 744 type = "mem"; 745 break; 746 case PLUGIN_GEN_ENABLE_MEM_HELPER: 747 type = "enable mem helper"; 748 break; 749 case PLUGIN_GEN_DISABLE_MEM_HELPER: 750 type = "disable mem helper"; 751 break; 752 default: 753 break; 754 } 755 } 756 printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type); 757 i++; 758 } 759 #endif 760 } 761 762 static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb) 763 { 764 TCGOp *op; 765 int insn_idx = -1; 766 767 pr_ops(); 768 769 QTAILQ_FOREACH(op, &tcg_ctx->ops, link) { 770 switch (op->opc) { 771 case INDEX_op_insn_start: 772 insn_idx++; 773 break; 774 case INDEX_op_plugin_cb_start: 775 { 776 enum plugin_gen_from from = op->args[0]; 777 enum plugin_gen_cb type = op->args[1]; 778 779 switch (from) { 780 case PLUGIN_GEN_FROM_TB: 781 { 782 g_assert(insn_idx == -1); 783 784 switch (type) { 785 case PLUGIN_GEN_CB_UDATA: 786 plugin_gen_tb_udata(plugin_tb, op); 787 break; 788 case PLUGIN_GEN_CB_INLINE: 789 plugin_gen_tb_inline(plugin_tb, op); 790 break; 791 default: 792 g_assert_not_reached(); 793 } 794 break; 795 } 796 case PLUGIN_GEN_FROM_INSN: 797 { 798 g_assert(insn_idx >= 0); 799 800 switch (type) { 801 case PLUGIN_GEN_CB_UDATA: 802 plugin_gen_insn_udata(plugin_tb, op, insn_idx); 803 break; 804 case PLUGIN_GEN_CB_INLINE: 805 plugin_gen_insn_inline(plugin_tb, op, insn_idx); 806 break; 807 case PLUGIN_GEN_ENABLE_MEM_HELPER: 808 plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx); 809 break; 810 default: 811 g_assert_not_reached(); 812 } 813 break; 814 } 815 case PLUGIN_GEN_FROM_MEM: 816 { 817 g_assert(insn_idx >= 0); 818 819 switch (type) { 820 case PLUGIN_GEN_CB_MEM: 821 plugin_gen_mem_regular(plugin_tb, op, insn_idx); 822 break; 823 case PLUGIN_GEN_CB_INLINE: 824 plugin_gen_mem_inline(plugin_tb, op, insn_idx); 825 break; 826 default: 827 g_assert_not_reached(); 828 } 829 830 break; 831 } 832 case PLUGIN_GEN_AFTER_INSN: 833 { 834 g_assert(insn_idx >= 0); 835 836 switch (type) { 837 case PLUGIN_GEN_DISABLE_MEM_HELPER: 838 plugin_gen_disable_mem_helper(plugin_tb, op, insn_idx); 839 break; 840 default: 841 g_assert_not_reached(); 842 } 843 break; 844 } 845 default: 846 g_assert_not_reached(); 847 } 848 break; 849 } 850 default: 851 /* plugins don't care about any other ops */ 852 break; 853 } 854 } 855 pr_ops(); 856 } 857 858 bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db, 859 bool mem_only) 860 { 861 bool ret = false; 862 863 if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) { 864 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; 865 int i; 866 867 /* reset callbacks */ 868 for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) { 869 if (ptb->cbs[i]) { 870 g_array_set_size(ptb->cbs[i], 0); 871 } 872 } 873 ptb->n = 0; 874 875 ret = true; 876 877 ptb->vaddr = db->pc_first; 878 ptb->vaddr2 = -1; 879 ptb->haddr1 = db->host_addr[0]; 880 ptb->haddr2 = NULL; 881 ptb->mem_only = mem_only; 882 ptb->mem_helper = false; 883 884 plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB); 885 } 886 887 tcg_ctx->plugin_insn = NULL; 888 889 return ret; 890 } 891 892 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db) 893 { 894 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; 895 struct qemu_plugin_insn *pinsn; 896 897 pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next); 898 tcg_ctx->plugin_insn = pinsn; 899 plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN); 900 901 /* 902 * Detect page crossing to get the new host address. 903 * Note that we skip this when haddr1 == NULL, e.g. when we're 904 * fetching instructions from a region not backed by RAM. 905 */ 906 if (ptb->haddr1 == NULL) { 907 pinsn->haddr = NULL; 908 } else if (is_same_page(db, db->pc_next)) { 909 pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr; 910 } else { 911 if (ptb->vaddr2 == -1) { 912 ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first); 913 get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2); 914 } 915 pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2; 916 } 917 } 918 919 void plugin_gen_insn_end(void) 920 { 921 plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN); 922 } 923 924 /* 925 * There are cases where we never get to finalise a translation - for 926 * example a page fault during translation. As a result we shouldn't 927 * do any clean-up here and make sure things are reset in 928 * plugin_gen_tb_start. 929 */ 930 void plugin_gen_tb_end(CPUState *cpu) 931 { 932 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; 933 934 /* collect instrumentation requests */ 935 qemu_plugin_tb_trans_cb(cpu, ptb); 936 937 /* inject the instrumentation at the appropriate places */ 938 plugin_gen_inject(ptb); 939 } 940