1 /* 2 * plugin-gen.c - TCG-related bits of plugin infrastructure 3 * 4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org> 5 * License: GNU GPL, version 2 or later. 6 * See the COPYING file in the top-level directory. 7 * 8 * We support instrumentation at an instruction granularity. That is, 9 * if a plugin wants to instrument the memory accesses performed by a 10 * particular instruction, it can just do that instead of instrumenting 11 * all memory accesses. Thus, in order to do this we first have to 12 * translate a TB, so that plugins can decide what/where to instrument. 13 * 14 * Injecting the desired instrumentation could be done with a second 15 * translation pass that combined the instrumentation requests, but that 16 * would be ugly and inefficient since we would decode the guest code twice. 17 * Instead, during TB translation we add "plugin_cb" marker opcodes 18 * for all possible instrumentation events, and then once we collect the 19 * instrumentation requests from plugins, we generate code for those markers 20 * or remove them if they have no requests. 21 */ 22 #include "qemu/osdep.h" 23 #include "qemu/plugin.h" 24 #include "qemu/log.h" 25 #include "cpu.h" 26 #include "tcg/tcg.h" 27 #include "tcg/tcg-temp-internal.h" 28 #include "tcg/tcg-op.h" 29 #include "exec/exec-all.h" 30 #include "exec/plugin-gen.h" 31 #include "exec/translator.h" 32 33 enum plugin_gen_from { 34 PLUGIN_GEN_FROM_TB, 35 PLUGIN_GEN_FROM_INSN, 36 PLUGIN_GEN_AFTER_INSN, 37 PLUGIN_GEN_AFTER_TB, 38 }; 39 40 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */ 41 void plugin_gen_disable_mem_helpers(void) 42 { 43 if (tcg_ctx->plugin_insn) { 44 tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_TB); 45 } 46 } 47 48 static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb, 49 struct qemu_plugin_insn *insn) 50 { 51 GArray *arr; 52 size_t len; 53 54 /* 55 * Tracking memory accesses performed from helpers requires extra work. 56 * If an instruction is emulated with helpers, we do two things: 57 * (1) copy the CB descriptors, and keep track of it so that they can be 58 * freed later on, and (2) point CPUState.plugin_mem_cbs to the 59 * descriptors, so that we can read them at run-time 60 * (i.e. when the helper executes). 61 * This run-time access is performed from qemu_plugin_vcpu_mem_cb. 62 * 63 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it 64 * is possible that the code we generate after the instruction is 65 * dead, we also add checks before generating tb_exit etc. 66 */ 67 if (!insn->calls_helpers) { 68 return; 69 } 70 71 if (!insn->mem_cbs || !insn->mem_cbs->len) { 72 insn->mem_helper = false; 73 return; 74 } 75 insn->mem_helper = true; 76 ptb->mem_helper = true; 77 78 /* 79 * TODO: It seems like we should be able to use ref/unref 80 * to avoid needing to actually copy this array. 81 * Alternately, perhaps we could allocate new memory adjacent 82 * to the TranslationBlock itself, so that we do not have to 83 * actively manage the lifetime after this. 84 */ 85 len = insn->mem_cbs->len; 86 arr = g_array_sized_new(false, false, 87 sizeof(struct qemu_plugin_dyn_cb), len); 88 memcpy(arr->data, insn->mem_cbs->data, 89 len * sizeof(struct qemu_plugin_dyn_cb)); 90 qemu_plugin_add_dyn_cb_arr(arr); 91 92 tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env, 93 offsetof(CPUState, plugin_mem_cbs) - 94 offsetof(ArchCPU, env)); 95 } 96 97 static void gen_disable_mem_helper(void) 98 { 99 tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env, 100 offsetof(CPUState, plugin_mem_cbs) - 101 offsetof(ArchCPU, env)); 102 } 103 104 static void gen_udata_cb(struct qemu_plugin_dyn_cb *cb) 105 { 106 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); 107 108 tcg_gen_ld_i32(cpu_index, tcg_env, 109 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); 110 tcg_gen_call2(cb->regular.f.vcpu_udata, cb->regular.info, NULL, 111 tcgv_i32_temp(cpu_index), 112 tcgv_ptr_temp(tcg_constant_ptr(cb->userp))); 113 tcg_temp_free_i32(cpu_index); 114 } 115 116 static void gen_inline_cb(struct qemu_plugin_dyn_cb *cb) 117 { 118 GArray *arr = cb->inline_insn.entry.score->data; 119 size_t offset = cb->inline_insn.entry.offset; 120 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); 121 TCGv_i64 val = tcg_temp_ebb_new_i64(); 122 TCGv_ptr ptr = tcg_temp_ebb_new_ptr(); 123 124 tcg_gen_ld_i32(cpu_index, tcg_env, 125 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); 126 tcg_gen_muli_i32(cpu_index, cpu_index, g_array_get_element_size(arr)); 127 tcg_gen_ext_i32_ptr(ptr, cpu_index); 128 tcg_temp_free_i32(cpu_index); 129 130 tcg_gen_addi_ptr(ptr, ptr, (intptr_t)arr->data); 131 tcg_gen_ld_i64(val, ptr, offset); 132 tcg_gen_addi_i64(val, val, cb->inline_insn.imm); 133 tcg_gen_st_i64(val, ptr, offset); 134 135 tcg_temp_free_i64(val); 136 tcg_temp_free_ptr(ptr); 137 } 138 139 static void gen_mem_cb(struct qemu_plugin_dyn_cb *cb, 140 qemu_plugin_meminfo_t meminfo, TCGv_i64 addr) 141 { 142 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); 143 144 tcg_gen_ld_i32(cpu_index, tcg_env, 145 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); 146 tcg_gen_call4(cb->regular.f.vcpu_mem, cb->regular.info, NULL, 147 tcgv_i32_temp(cpu_index), 148 tcgv_i32_temp(tcg_constant_i32(meminfo)), 149 tcgv_i64_temp(addr), 150 tcgv_ptr_temp(tcg_constant_ptr(cb->userp))); 151 tcg_temp_free_i32(cpu_index); 152 } 153 154 static void inject_cb(struct qemu_plugin_dyn_cb *cb) 155 156 { 157 switch (cb->type) { 158 case PLUGIN_CB_REGULAR: 159 gen_udata_cb(cb); 160 break; 161 case PLUGIN_CB_INLINE: 162 gen_inline_cb(cb); 163 break; 164 default: 165 g_assert_not_reached(); 166 } 167 } 168 169 static void inject_mem_cb(struct qemu_plugin_dyn_cb *cb, 170 enum qemu_plugin_mem_rw rw, 171 qemu_plugin_meminfo_t meminfo, TCGv_i64 addr) 172 { 173 if (cb->rw & rw) { 174 switch (cb->type) { 175 case PLUGIN_CB_MEM_REGULAR: 176 gen_mem_cb(cb, meminfo, addr); 177 break; 178 default: 179 inject_cb(cb); 180 break; 181 } 182 } 183 } 184 185 static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb) 186 { 187 TCGOp *op, *next; 188 int insn_idx = -1; 189 190 if (unlikely(qemu_loglevel_mask(LOG_TB_OP_PLUGIN) 191 && qemu_log_in_addr_range(plugin_tb->vaddr))) { 192 FILE *logfile = qemu_log_trylock(); 193 if (logfile) { 194 fprintf(logfile, "OP before plugin injection:\n"); 195 tcg_dump_ops(tcg_ctx, logfile, false); 196 fprintf(logfile, "\n"); 197 qemu_log_unlock(logfile); 198 } 199 } 200 201 /* 202 * While injecting code, we cannot afford to reuse any ebb temps 203 * that might be live within the existing opcode stream. 204 * The simplest solution is to release them all and create new. 205 */ 206 memset(tcg_ctx->free_temps, 0, sizeof(tcg_ctx->free_temps)); 207 208 QTAILQ_FOREACH_SAFE(op, &tcg_ctx->ops, link, next) { 209 switch (op->opc) { 210 case INDEX_op_insn_start: 211 insn_idx++; 212 break; 213 214 case INDEX_op_plugin_cb: 215 { 216 enum plugin_gen_from from = op->args[0]; 217 struct qemu_plugin_insn *insn = NULL; 218 const GArray *cbs; 219 int i, n; 220 221 if (insn_idx >= 0) { 222 insn = g_ptr_array_index(plugin_tb->insns, insn_idx); 223 } 224 225 tcg_ctx->emit_before_op = op; 226 227 switch (from) { 228 case PLUGIN_GEN_AFTER_TB: 229 if (plugin_tb->mem_helper) { 230 gen_disable_mem_helper(); 231 } 232 break; 233 234 case PLUGIN_GEN_AFTER_INSN: 235 assert(insn != NULL); 236 if (insn->mem_helper) { 237 gen_disable_mem_helper(); 238 } 239 break; 240 241 case PLUGIN_GEN_FROM_TB: 242 assert(insn == NULL); 243 244 cbs = plugin_tb->cbs; 245 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) { 246 inject_cb( 247 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i)); 248 } 249 break; 250 251 case PLUGIN_GEN_FROM_INSN: 252 assert(insn != NULL); 253 254 gen_enable_mem_helper(plugin_tb, insn); 255 256 cbs = insn->insn_cbs; 257 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) { 258 inject_cb( 259 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i)); 260 } 261 break; 262 263 default: 264 g_assert_not_reached(); 265 } 266 267 tcg_ctx->emit_before_op = NULL; 268 tcg_op_remove(tcg_ctx, op); 269 break; 270 } 271 272 case INDEX_op_plugin_mem_cb: 273 { 274 TCGv_i64 addr = temp_tcgv_i64(arg_temp(op->args[0])); 275 qemu_plugin_meminfo_t meminfo = op->args[1]; 276 enum qemu_plugin_mem_rw rw = 277 (qemu_plugin_mem_is_store(meminfo) 278 ? QEMU_PLUGIN_MEM_W : QEMU_PLUGIN_MEM_R); 279 struct qemu_plugin_insn *insn; 280 const GArray *cbs; 281 int i, n; 282 283 assert(insn_idx >= 0); 284 insn = g_ptr_array_index(plugin_tb->insns, insn_idx); 285 286 tcg_ctx->emit_before_op = op; 287 288 cbs = insn->mem_cbs; 289 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) { 290 inject_mem_cb(&g_array_index(cbs, struct qemu_plugin_dyn_cb, i), 291 rw, meminfo, addr); 292 } 293 294 tcg_ctx->emit_before_op = NULL; 295 tcg_op_remove(tcg_ctx, op); 296 break; 297 } 298 299 default: 300 /* plugins don't care about any other ops */ 301 break; 302 } 303 } 304 } 305 306 bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db, 307 bool mem_only) 308 { 309 bool ret = false; 310 311 if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_state->event_mask)) { 312 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; 313 314 /* reset callbacks */ 315 if (ptb->cbs) { 316 g_array_set_size(ptb->cbs, 0); 317 } 318 ptb->n = 0; 319 320 ret = true; 321 322 ptb->vaddr = db->pc_first; 323 ptb->vaddr2 = -1; 324 ptb->haddr1 = db->host_addr[0]; 325 ptb->haddr2 = NULL; 326 ptb->mem_only = mem_only; 327 ptb->mem_helper = false; 328 329 tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB); 330 } 331 332 tcg_ctx->plugin_insn = NULL; 333 334 return ret; 335 } 336 337 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db) 338 { 339 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; 340 struct qemu_plugin_insn *insn; 341 size_t n = db->num_insns; 342 vaddr pc; 343 344 assert(n >= 1); 345 ptb->n = n; 346 if (n <= ptb->insns->len) { 347 insn = g_ptr_array_index(ptb->insns, n - 1); 348 g_byte_array_set_size(insn->data, 0); 349 } else { 350 assert(n - 1 == ptb->insns->len); 351 insn = g_new0(struct qemu_plugin_insn, 1); 352 insn->data = g_byte_array_sized_new(4); 353 g_ptr_array_add(ptb->insns, insn); 354 } 355 356 tcg_ctx->plugin_insn = insn; 357 insn->calls_helpers = false; 358 insn->mem_helper = false; 359 if (insn->insn_cbs) { 360 g_array_set_size(insn->insn_cbs, 0); 361 } 362 if (insn->mem_cbs) { 363 g_array_set_size(insn->mem_cbs, 0); 364 } 365 366 pc = db->pc_next; 367 insn->vaddr = pc; 368 369 /* 370 * Detect page crossing to get the new host address. 371 * Note that we skip this when haddr1 == NULL, e.g. when we're 372 * fetching instructions from a region not backed by RAM. 373 */ 374 if (ptb->haddr1 == NULL) { 375 insn->haddr = NULL; 376 } else if (is_same_page(db, db->pc_next)) { 377 insn->haddr = ptb->haddr1 + pc - ptb->vaddr; 378 } else { 379 if (ptb->vaddr2 == -1) { 380 ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first); 381 get_page_addr_code_hostp(cpu_env(cpu), ptb->vaddr2, &ptb->haddr2); 382 } 383 insn->haddr = ptb->haddr2 + pc - ptb->vaddr2; 384 } 385 386 tcg_gen_plugin_cb(PLUGIN_GEN_FROM_INSN); 387 } 388 389 void plugin_gen_insn_end(void) 390 { 391 tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_INSN); 392 } 393 394 /* 395 * There are cases where we never get to finalise a translation - for 396 * example a page fault during translation. As a result we shouldn't 397 * do any clean-up here and make sure things are reset in 398 * plugin_gen_tb_start. 399 */ 400 void plugin_gen_tb_end(CPUState *cpu, size_t num_insns) 401 { 402 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; 403 404 /* translator may have removed instructions, update final count */ 405 g_assert(num_insns <= ptb->n); 406 ptb->n = num_insns; 407 408 /* collect instrumentation requests */ 409 qemu_plugin_tb_trans_cb(cpu, ptb); 410 411 /* inject the instrumentation at the appropriate places */ 412 plugin_gen_inject(ptb); 413 } 414