1 /* 2 * plugin-gen.c - TCG-related bits of plugin infrastructure 3 * 4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org> 5 * License: GNU GPL, version 2 or later. 6 * See the COPYING file in the top-level directory. 7 * 8 * We support instrumentation at an instruction granularity. That is, 9 * if a plugin wants to instrument the memory accesses performed by a 10 * particular instruction, it can just do that instead of instrumenting 11 * all memory accesses. Thus, in order to do this we first have to 12 * translate a TB, so that plugins can decide what/where to instrument. 13 * 14 * Injecting the desired instrumentation could be done with a second 15 * translation pass that combined the instrumentation requests, but that 16 * would be ugly and inefficient since we would decode the guest code twice. 17 * Instead, during TB translation we add "plugin_cb" marker opcodes 18 * for all possible instrumentation events, and then once we collect the 19 * instrumentation requests from plugins, we generate code for those markers 20 * or remove them if they have no requests. 21 */ 22 #include "qemu/osdep.h" 23 #include "qemu/plugin.h" 24 #include "qemu/log.h" 25 #include "cpu.h" 26 #include "tcg/tcg.h" 27 #include "tcg/tcg-temp-internal.h" 28 #include "tcg/tcg-op.h" 29 #include "exec/exec-all.h" 30 #include "exec/plugin-gen.h" 31 #include "exec/translator.h" 32 33 enum plugin_gen_from { 34 PLUGIN_GEN_FROM_TB, 35 PLUGIN_GEN_FROM_INSN, 36 PLUGIN_GEN_AFTER_INSN, 37 PLUGIN_GEN_AFTER_TB, 38 }; 39 40 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */ 41 void plugin_gen_disable_mem_helpers(void) 42 { 43 if (tcg_ctx->plugin_insn) { 44 tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_TB); 45 } 46 } 47 48 static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb, 49 struct qemu_plugin_insn *insn) 50 { 51 GArray *arr; 52 size_t len; 53 54 /* 55 * Tracking memory accesses performed from helpers requires extra work. 56 * If an instruction is emulated with helpers, we do two things: 57 * (1) copy the CB descriptors, and keep track of it so that they can be 58 * freed later on, and (2) point CPUState.neg.plugin_mem_cbs to the 59 * descriptors, so that we can read them at run-time 60 * (i.e. when the helper executes). 61 * This run-time access is performed from qemu_plugin_vcpu_mem_cb. 62 * 63 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it 64 * is possible that the code we generate after the instruction is 65 * dead, we also add checks before generating tb_exit etc. 66 */ 67 if (!insn->calls_helpers) { 68 return; 69 } 70 71 if (!insn->mem_cbs || !insn->mem_cbs->len) { 72 insn->mem_helper = false; 73 return; 74 } 75 insn->mem_helper = true; 76 ptb->mem_helper = true; 77 78 /* 79 * TODO: It seems like we should be able to use ref/unref 80 * to avoid needing to actually copy this array. 81 * Alternately, perhaps we could allocate new memory adjacent 82 * to the TranslationBlock itself, so that we do not have to 83 * actively manage the lifetime after this. 84 */ 85 len = insn->mem_cbs->len; 86 arr = g_array_sized_new(false, false, 87 sizeof(struct qemu_plugin_dyn_cb), len); 88 memcpy(arr->data, insn->mem_cbs->data, 89 len * sizeof(struct qemu_plugin_dyn_cb)); 90 qemu_plugin_add_dyn_cb_arr(arr); 91 92 tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env, 93 offsetof(CPUState, neg.plugin_mem_cbs) - 94 offsetof(ArchCPU, env)); 95 } 96 97 static void gen_disable_mem_helper(void) 98 { 99 tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env, 100 offsetof(CPUState, neg.plugin_mem_cbs) - 101 offsetof(ArchCPU, env)); 102 } 103 104 static void gen_udata_cb(struct qemu_plugin_dyn_cb *cb) 105 { 106 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); 107 108 tcg_gen_ld_i32(cpu_index, tcg_env, 109 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); 110 tcg_gen_call2(cb->regular.f.vcpu_udata, cb->regular.info, NULL, 111 tcgv_i32_temp(cpu_index), 112 tcgv_ptr_temp(tcg_constant_ptr(cb->userp))); 113 tcg_temp_free_i32(cpu_index); 114 } 115 116 static TCGv_ptr gen_plugin_u64_ptr(qemu_plugin_u64 entry) 117 { 118 TCGv_ptr ptr = tcg_temp_ebb_new_ptr(); 119 120 GArray *arr = entry.score->data; 121 char *base_ptr = arr->data + entry.offset; 122 size_t entry_size = g_array_get_element_size(arr); 123 124 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); 125 tcg_gen_ld_i32(cpu_index, tcg_env, 126 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); 127 tcg_gen_muli_i32(cpu_index, cpu_index, entry_size); 128 tcg_gen_ext_i32_ptr(ptr, cpu_index); 129 tcg_temp_free_i32(cpu_index); 130 tcg_gen_addi_ptr(ptr, ptr, (intptr_t) base_ptr); 131 132 return ptr; 133 } 134 135 static void gen_inline_add_u64_cb(struct qemu_plugin_dyn_cb *cb) 136 { 137 TCGv_ptr ptr = gen_plugin_u64_ptr(cb->inline_insn.entry); 138 TCGv_i64 val = tcg_temp_ebb_new_i64(); 139 140 tcg_gen_ld_i64(val, ptr, 0); 141 tcg_gen_addi_i64(val, val, cb->inline_insn.imm); 142 tcg_gen_st_i64(val, ptr, 0); 143 144 tcg_temp_free_i64(val); 145 tcg_temp_free_ptr(ptr); 146 } 147 148 static void gen_inline_store_u64_cb(struct qemu_plugin_dyn_cb *cb) 149 { 150 TCGv_ptr ptr = gen_plugin_u64_ptr(cb->inline_insn.entry); 151 TCGv_i64 val = tcg_constant_i64(cb->inline_insn.imm); 152 153 tcg_gen_st_i64(val, ptr, 0); 154 155 tcg_temp_free_ptr(ptr); 156 } 157 158 static void gen_mem_cb(struct qemu_plugin_dyn_cb *cb, 159 qemu_plugin_meminfo_t meminfo, TCGv_i64 addr) 160 { 161 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); 162 163 tcg_gen_ld_i32(cpu_index, tcg_env, 164 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); 165 tcg_gen_call4(cb->regular.f.vcpu_mem, cb->regular.info, NULL, 166 tcgv_i32_temp(cpu_index), 167 tcgv_i32_temp(tcg_constant_i32(meminfo)), 168 tcgv_i64_temp(addr), 169 tcgv_ptr_temp(tcg_constant_ptr(cb->userp))); 170 tcg_temp_free_i32(cpu_index); 171 } 172 173 static void inject_cb(struct qemu_plugin_dyn_cb *cb) 174 175 { 176 switch (cb->type) { 177 case PLUGIN_CB_REGULAR: 178 gen_udata_cb(cb); 179 break; 180 case PLUGIN_CB_INLINE_ADD_U64: 181 gen_inline_add_u64_cb(cb); 182 break; 183 case PLUGIN_CB_INLINE_STORE_U64: 184 gen_inline_store_u64_cb(cb); 185 break; 186 default: 187 g_assert_not_reached(); 188 } 189 } 190 191 static void inject_mem_cb(struct qemu_plugin_dyn_cb *cb, 192 enum qemu_plugin_mem_rw rw, 193 qemu_plugin_meminfo_t meminfo, TCGv_i64 addr) 194 { 195 if (cb->rw & rw) { 196 switch (cb->type) { 197 case PLUGIN_CB_MEM_REGULAR: 198 gen_mem_cb(cb, meminfo, addr); 199 break; 200 default: 201 inject_cb(cb); 202 break; 203 } 204 } 205 } 206 207 static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb) 208 { 209 TCGOp *op, *next; 210 int insn_idx = -1; 211 212 if (unlikely(qemu_loglevel_mask(LOG_TB_OP_PLUGIN) 213 && qemu_log_in_addr_range(tcg_ctx->plugin_db->pc_first))) { 214 FILE *logfile = qemu_log_trylock(); 215 if (logfile) { 216 fprintf(logfile, "OP before plugin injection:\n"); 217 tcg_dump_ops(tcg_ctx, logfile, false); 218 fprintf(logfile, "\n"); 219 qemu_log_unlock(logfile); 220 } 221 } 222 223 /* 224 * While injecting code, we cannot afford to reuse any ebb temps 225 * that might be live within the existing opcode stream. 226 * The simplest solution is to release them all and create new. 227 */ 228 memset(tcg_ctx->free_temps, 0, sizeof(tcg_ctx->free_temps)); 229 230 QTAILQ_FOREACH_SAFE(op, &tcg_ctx->ops, link, next) { 231 switch (op->opc) { 232 case INDEX_op_insn_start: 233 insn_idx++; 234 break; 235 236 case INDEX_op_plugin_cb: 237 { 238 enum plugin_gen_from from = op->args[0]; 239 struct qemu_plugin_insn *insn = NULL; 240 const GArray *cbs; 241 int i, n; 242 243 if (insn_idx >= 0) { 244 insn = g_ptr_array_index(plugin_tb->insns, insn_idx); 245 } 246 247 tcg_ctx->emit_before_op = op; 248 249 switch (from) { 250 case PLUGIN_GEN_AFTER_TB: 251 if (plugin_tb->mem_helper) { 252 gen_disable_mem_helper(); 253 } 254 break; 255 256 case PLUGIN_GEN_AFTER_INSN: 257 assert(insn != NULL); 258 if (insn->mem_helper) { 259 gen_disable_mem_helper(); 260 } 261 break; 262 263 case PLUGIN_GEN_FROM_TB: 264 assert(insn == NULL); 265 266 cbs = plugin_tb->cbs; 267 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) { 268 inject_cb( 269 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i)); 270 } 271 break; 272 273 case PLUGIN_GEN_FROM_INSN: 274 assert(insn != NULL); 275 276 gen_enable_mem_helper(plugin_tb, insn); 277 278 cbs = insn->insn_cbs; 279 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) { 280 inject_cb( 281 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i)); 282 } 283 break; 284 285 default: 286 g_assert_not_reached(); 287 } 288 289 tcg_ctx->emit_before_op = NULL; 290 tcg_op_remove(tcg_ctx, op); 291 break; 292 } 293 294 case INDEX_op_plugin_mem_cb: 295 { 296 TCGv_i64 addr = temp_tcgv_i64(arg_temp(op->args[0])); 297 qemu_plugin_meminfo_t meminfo = op->args[1]; 298 enum qemu_plugin_mem_rw rw = 299 (qemu_plugin_mem_is_store(meminfo) 300 ? QEMU_PLUGIN_MEM_W : QEMU_PLUGIN_MEM_R); 301 struct qemu_plugin_insn *insn; 302 const GArray *cbs; 303 int i, n; 304 305 assert(insn_idx >= 0); 306 insn = g_ptr_array_index(plugin_tb->insns, insn_idx); 307 308 tcg_ctx->emit_before_op = op; 309 310 cbs = insn->mem_cbs; 311 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) { 312 inject_mem_cb(&g_array_index(cbs, struct qemu_plugin_dyn_cb, i), 313 rw, meminfo, addr); 314 } 315 316 tcg_ctx->emit_before_op = NULL; 317 tcg_op_remove(tcg_ctx, op); 318 break; 319 } 320 321 default: 322 /* plugins don't care about any other ops */ 323 break; 324 } 325 } 326 } 327 328 bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db) 329 { 330 struct qemu_plugin_tb *ptb; 331 332 if (!test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, 333 cpu->plugin_state->event_mask)) { 334 return false; 335 } 336 337 tcg_ctx->plugin_db = db; 338 tcg_ctx->plugin_insn = NULL; 339 ptb = tcg_ctx->plugin_tb; 340 341 if (ptb) { 342 /* Reset callbacks */ 343 if (ptb->cbs) { 344 g_array_set_size(ptb->cbs, 0); 345 } 346 ptb->n = 0; 347 ptb->mem_helper = false; 348 } else { 349 ptb = g_new0(struct qemu_plugin_tb, 1); 350 tcg_ctx->plugin_tb = ptb; 351 ptb->insns = g_ptr_array_new(); 352 } 353 354 tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB); 355 return true; 356 } 357 358 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db) 359 { 360 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; 361 struct qemu_plugin_insn *insn; 362 size_t n = db->num_insns; 363 vaddr pc; 364 365 assert(n >= 1); 366 ptb->n = n; 367 if (n <= ptb->insns->len) { 368 insn = g_ptr_array_index(ptb->insns, n - 1); 369 } else { 370 assert(n - 1 == ptb->insns->len); 371 insn = g_new0(struct qemu_plugin_insn, 1); 372 g_ptr_array_add(ptb->insns, insn); 373 } 374 375 tcg_ctx->plugin_insn = insn; 376 insn->calls_helpers = false; 377 insn->mem_helper = false; 378 if (insn->insn_cbs) { 379 g_array_set_size(insn->insn_cbs, 0); 380 } 381 if (insn->mem_cbs) { 382 g_array_set_size(insn->mem_cbs, 0); 383 } 384 385 pc = db->pc_next; 386 insn->vaddr = pc; 387 388 tcg_gen_plugin_cb(PLUGIN_GEN_FROM_INSN); 389 } 390 391 void plugin_gen_insn_end(void) 392 { 393 const DisasContextBase *db = tcg_ctx->plugin_db; 394 struct qemu_plugin_insn *pinsn = tcg_ctx->plugin_insn; 395 396 pinsn->len = db->fake_insn ? db->record_len : db->pc_next - pinsn->vaddr; 397 398 tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_INSN); 399 } 400 401 /* 402 * There are cases where we never get to finalise a translation - for 403 * example a page fault during translation. As a result we shouldn't 404 * do any clean-up here and make sure things are reset in 405 * plugin_gen_tb_start. 406 */ 407 void plugin_gen_tb_end(CPUState *cpu, size_t num_insns) 408 { 409 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; 410 411 /* translator may have removed instructions, update final count */ 412 g_assert(num_insns <= ptb->n); 413 ptb->n = num_insns; 414 415 /* collect instrumentation requests */ 416 qemu_plugin_tb_trans_cb(cpu, ptb); 417 418 /* inject the instrumentation at the appropriate places */ 419 plugin_gen_inject(ptb); 420 } 421