| /openbmc/qemu/hw/timer/ |
| H A D | arm_mptimer.c | 56 static inline void timerblock_update_irq(TimerBlock *tb) in timerblock_update_irq() argument 58 qemu_set_irq(tb->irq, tb->status && (tb->control & 4)); in timerblock_update_irq() 91 TimerBlock *tb = (TimerBlock *)opaque; in timerblock_tick() local 95 if ((tb->control & 2) && (tb->control & 0xff00) == 0 && in timerblock_tick() 96 ptimer_get_limit(tb->timer) == 0) { in timerblock_tick() 97 ptimer_stop(tb->timer); in timerblock_tick() 99 tb->status = 1; in timerblock_tick() 100 timerblock_update_irq(tb); in timerblock_tick() 106 TimerBlock *tb = (TimerBlock *)opaque; in timerblock_read() local 109 return ptimer_get_limit(tb->timer); in timerblock_read() [all …]
|
| /openbmc/qemu/accel/tcg/ |
| H A D | translate-all.c | 100 static int encode_search(TranslationBlock *tb, uint8_t *block) in encode_search() argument 108 for (i = 0, n = tb->icount; i < n; ++i) { in encode_search() 113 prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0); in encode_search() 136 static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc, in cpu_unwind_data_from_tb() argument 139 uintptr_t iter_pc = (uintptr_t)tb->tc.ptr; in cpu_unwind_data_from_tb() 140 const uint8_t *p = tb->tc.ptr + tb->tc.size; in cpu_unwind_data_from_tb() 141 int i, j, num_insns = tb->icount; in cpu_unwind_data_from_tb() 150 if (!(tb_cflags(tb) & CF_PCREL)) { in cpu_unwind_data_from_tb() 151 data[0] = tb->pc; in cpu_unwind_data_from_tb() 174 void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, in cpu_restore_state_from_tb() argument [all …]
|
| H A D | tb-maint.c | 43 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \ argument 44 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \ 45 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \ 46 tb = (TranslationBlock *)((uintptr_t)tb & ~1)) 48 #define TB_FOR_EACH_JMP(head_tb, tb, n) \ argument 49 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next) 80 static inline void tb_lock_pages(const TranslationBlock *tb) { } in tb_lock_pages() argument 96 static void tb_record(TranslationBlock *tb) in tb_record() argument 102 tb->itree.last = tb->itree.start + tb->size - 1; in tb_record() 105 addr = tb_page_addr0(tb); in tb_record() [all …]
|
| H A D | cpu-exec.c | 159 const TranslationBlock *tb = p; in tb_lookup_cmp() local 162 if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->s.pc) && in tb_lookup_cmp() 163 tb_page_addr0(tb) == desc->page_addr0 && in tb_lookup_cmp() 164 tb->cs_base == desc->s.cs_base && in tb_lookup_cmp() 165 tb->flags == desc->s.flags && in tb_lookup_cmp() 166 tb_cflags(tb) == desc->s.cflags) { in tb_lookup_cmp() 168 tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb); in tb_lookup_cmp() 228 TranslationBlock *tb; in tb_lookup() local 238 tb = qatomic_read(&jc->array[hash].tb); in tb_lookup() 239 if (likely(tb && in tb_lookup() [all …]
|
| H A D | trace-events | 5 exec_tb(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR 6 exec_tb_nocache(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR 7 exec_tb_exit(void *last_tb, unsigned int flags) "tb:%p flags=0x%x" 14 translate_block(void *tb, uintptr_t pc, const void *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p"
|
| H A D | translator.c | 88 static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags, in gen_tb_end() argument 102 tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED); in gen_tb_end() 114 if (tb_cflags(db->tb) & CF_NO_GOTO_TB) { in translator_use_goto_tb() 122 void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, in translator_loop() argument 126 uint32_t cflags = tb_cflags(tb); in translator_loop() 132 db->tb = tb; in translator_loop() 206 gen_tb_end(tb, cflags, icount_start_insn, db->num_insns); in translator_loop() 224 tb->size = db->pc_next - db->pc_first; in translator_loop() 225 tb->icount = db->num_insns; in translator_loop() 251 TranslationBlock *tb = db->tb; in translator_ld() local [all …]
|
| H A D | internal-common.h | 53 void tb_reset_jump(TranslationBlock *tb, int n); 54 TranslationBlock *tb_link_page(TranslationBlock *tb); 55 void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, 139 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); 140 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
|
| H A D | tcg-stats.c | 112 const TranslationBlock *tb = value; in tb_tree_stats_iter() local 116 tst->host_size += tb->tc.size; in tb_tree_stats_iter() 117 tst->target_size += tb->size; in tb_tree_stats_iter() 118 if (tb->size > tst->max_target_size) { in tb_tree_stats_iter() 119 tst->max_target_size = tb->size; in tb_tree_stats_iter() 122 if (tb->page_addr[1] != -1) { in tb_tree_stats_iter() 126 if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) { in tb_tree_stats_iter() 128 if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) { in tb_tree_stats_iter()
|
| /openbmc/qemu/include/exec/ |
| H A D | translation-block.h | 153 static inline uint32_t tb_cflags(const TranslationBlock *tb) in tb_cflags() argument 155 return qatomic_read(&tb->cflags); in tb_cflags() 161 static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb) in tb_page_addr0() argument 164 return tb->itree.start; in tb_page_addr0() 166 return tb->page_addr[0]; in tb_page_addr0() 170 static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb) in tb_page_addr1() argument 173 tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK; in tb_page_addr1() 174 return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next; in tb_page_addr1() 176 return tb->page_addr[1]; in tb_page_addr1() 180 static inline void tb_set_page_addr0(TranslationBlock *tb, in tb_set_page_addr0() argument [all …]
|
| /openbmc/qemu/backends/tpm/ |
| H A D | tpm_passthrough.c | 60 static void tpm_passthrough_cancel_cmd(TPMBackend *tb); 127 static void tpm_passthrough_handle_request(TPMBackend *tb, TPMBackendCmd *cmd, in tpm_passthrough_handle_request() argument 130 TPMPassthruState *tpm_pt = TPM_PASSTHROUGH(tb); in tpm_passthrough_handle_request() 139 static void tpm_passthrough_reset(TPMBackend *tb) in tpm_passthrough_reset() argument 143 tpm_passthrough_cancel_cmd(tb); in tpm_passthrough_reset() 146 static bool tpm_passthrough_get_tpm_established_flag(TPMBackend *tb) in tpm_passthrough_get_tpm_established_flag() argument 151 static int tpm_passthrough_reset_tpm_established_flag(TPMBackend *tb, in tpm_passthrough_reset_tpm_established_flag() argument 158 static void tpm_passthrough_cancel_cmd(TPMBackend *tb) in tpm_passthrough_cancel_cmd() argument 160 TPMPassthruState *tpm_pt = TPM_PASSTHROUGH(tb); in tpm_passthrough_cancel_cmd() 184 static TPMVersion tpm_passthrough_get_tpm_version(TPMBackend *tb) in tpm_passthrough_get_tpm_version() argument [all …]
|
| H A D | tpm_emulator.c | 245 static void tpm_emulator_handle_request(TPMBackend *tb, TPMBackendCmd *cmd, in tpm_emulator_handle_request() argument 248 TPMEmulator *tpm_emu = TPM_EMULATOR(tb); in tpm_emulator_handle_request() 311 static int tpm_emulator_stop_tpm(TPMBackend *tb) in tpm_emulator_stop_tpm() argument 313 TPMEmulator *tpm_emu = TPM_EMULATOR(tb); in tpm_emulator_stop_tpm() 363 static int tpm_emulator_set_buffer_size(TPMBackend *tb, in tpm_emulator_set_buffer_size() argument 367 TPMEmulator *tpm_emu = TPM_EMULATOR(tb); in tpm_emulator_set_buffer_size() 370 if (tpm_emulator_stop_tpm(tb) < 0) { in tpm_emulator_set_buffer_size() 404 static int tpm_emulator_startup_tpm_resume(TPMBackend *tb, size_t buffersize, in tpm_emulator_startup_tpm_resume() argument 407 TPMEmulator *tpm_emu = TPM_EMULATOR(tb); in tpm_emulator_startup_tpm_resume() 416 tpm_emulator_set_buffer_size(tb, buffersize, NULL) < 0) { in tpm_emulator_startup_tpm_resume() [all …]
|
| /openbmc/qemu/hw/ppc/ |
| H A D | ppc.c | 505 static int64_t tb_to_ns_round_up(uint32_t freq, uint64_t tb) in tb_to_ns_round_up() argument 507 return muldiv64_round_up(tb, NANOSECONDS_PER_SECOND, freq); in tb_to_ns_round_up() 519 uint64_t tb; in cpu_ppc_load_tbl() local 525 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), in cpu_ppc_load_tbl() 527 trace_ppc_tb_load(tb); in cpu_ppc_load_tbl() 529 return tb; in cpu_ppc_load_tbl() 535 uint64_t tb; in _cpu_ppc_load_tbu() local 537 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), in _cpu_ppc_load_tbu() 539 trace_ppc_tb_load(tb); in _cpu_ppc_load_tbu() 541 return tb >> 32; in _cpu_ppc_load_tbu() [all …]
|
| /openbmc/qemu/target/i386/tcg/ |
| H A D | tcg-cpu.c | 71 const TranslationBlock *tb) in x86_cpu_synchronize_from_tb() argument 74 if (!(tb_cflags(tb) & CF_PCREL)) { in x86_cpu_synchronize_from_tb() 77 if (tb->flags & HF_CS64_MASK) { in x86_cpu_synchronize_from_tb() 78 env->eip = tb->pc; in x86_cpu_synchronize_from_tb() 80 env->eip = (uint32_t)(tb->pc - tb->cs_base); in x86_cpu_synchronize_from_tb() 86 const TranslationBlock *tb, in x86_restore_state_to_opc() argument 94 if (tb_cflags(tb) & CF_PCREL) { in x86_restore_state_to_opc() 101 uint64_t pc = env->eip + tb->cs_base; in x86_restore_state_to_opc() 106 if (tb->flags & HF_CS64_MASK) { in x86_restore_state_to_opc() 109 env->eip = (uint32_t)(new_pc - tb->cs_base); in x86_restore_state_to_opc()
|
| /openbmc/qemu/tests/tcg/plugins/ |
| H A D | inline.c | 131 const uint64_t tb = qemu_plugin_u64_get(count_tb, i); in plugin_exit() local 154 tb, tb_inline, in plugin_exit() 160 g_assert(tb == tb_inline); in plugin_exit() 163 g_assert(tb_cond_trigger == tb / cond_trigger_limit); in plugin_exit() 164 g_assert(tb_cond_left == tb % cond_trigger_limit); in plugin_exit() 226 static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) in vcpu_tb_trans() argument 228 void *tb_store = tb; in vcpu_tb_trans() 230 tb, QEMU_PLUGIN_INLINE_STORE_U64, data_tb, (uintptr_t) tb_store); in vcpu_tb_trans() 232 tb, vcpu_tb_exec, QEMU_PLUGIN_CB_NO_REGS, tb_store); in vcpu_tb_trans() 234 tb, QEMU_PLUGIN_INLINE_ADD_U64, count_tb_inline, 1); in vcpu_tb_trans() [all …]
|
| H A D | reset.c | 30 static void tb_trans_after_reset(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) in tb_trans_after_reset() argument 33 qemu_plugin_register_vcpu_tb_exec_cb(tb, tb_exec_after_reset, in tb_trans_after_reset() 51 static void tb_trans_before_reset(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) in tb_trans_before_reset() argument 54 qemu_plugin_register_vcpu_tb_exec_cb(tb, tb_exec_before_reset, in tb_trans_before_reset()
|
| H A D | bb.c | 80 static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) in vcpu_tb_trans() argument 82 size_t n_insns = qemu_plugin_tb_n_insns(tb); in vcpu_tb_trans() 86 tb, QEMU_PLUGIN_INLINE_ADD_U64, bb_count, 1); in vcpu_tb_trans() 88 tb, QEMU_PLUGIN_INLINE_ADD_U64, insn_count, n_insns); in vcpu_tb_trans() 90 qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec, in vcpu_tb_trans()
|
| /openbmc/openbmc/meta-openembedded/meta-networking/recipes-daemons/openhpi/files/ |
| H A D | openhpi-fix-alignment-issue.patch | 12 if (tb->DataLength == 0) { 16 + /* tb->Data is not aligned -- copy to temp */ 17 + memcpy(&the_time, tb->Data, sizeof(the_time)); 19 - *(time_t *)tb->Data);
|
| /openbmc/openbmc/poky/meta/recipes-core/busybox/busybox/ |
| H A D | busybox-1.36.1-no-cbq.patch | 33 struct rtattr *tb[TCA_CBQ_MAX+1]; 44 prio_print_opt(tb[TCA_OPTIONS]); 46 - cbq_print_opt(tb[TCA_OPTIONS]); 47 + /* cbq_print_opt(tb[TCA_OPTIONS]); */ 53 /* nothing. */ /*prio_print_opt(tb[TCA_OPTIONS]);*/ 56 - cbq_print_opt(tb[TCA_OPTIONS]); 57 + /* cbq_print_opt(tb[TCA_OPTIONS]); */
|
| /openbmc/qemu/block/ |
| H A D | qcow2-bitmap.c | 225 bitmap_table_load(BlockDriverState *bs, Qcow2BitmapTable *tb, in bitmap_table_load() argument 233 assert(tb->size != 0); in bitmap_table_load() 234 table = g_try_new(uint64_t, tb->size); in bitmap_table_load() 239 assert(tb->size <= BME_MAX_TABLE_SIZE); in bitmap_table_load() 240 ret = bdrv_pread(bs->file, tb->offset, tb->size * BME_TABLE_ENTRY_SIZE, in bitmap_table_load() 246 for (i = 0; i < tb->size; ++i) { in bitmap_table_load() 264 free_bitmap_clusters(BlockDriverState *bs, Qcow2BitmapTable *tb) in free_bitmap_clusters() argument 269 ret = bitmap_table_load(bs, tb, &bitmap_table); in free_bitmap_clusters() 274 clear_bitmap_table(bs, bitmap_table, tb->size); in free_bitmap_clusters() 275 qcow2_free_clusters(bs, tb->offset, tb->size * BME_TABLE_ENTRY_SIZE, in free_bitmap_clusters() [all …]
|
| /openbmc/qemu/contrib/plugins/ |
| H A D | bbv.c | 100 static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) in vcpu_tb_trans() argument 102 uint64_t n_insns = qemu_plugin_tb_n_insns(tb); in vcpu_tb_trans() 103 uint64_t vaddr = qemu_plugin_tb_vaddr(tb); in vcpu_tb_trans() 118 tb, QEMU_PLUGIN_INLINE_ADD_U64, count_u64(), n_insns); in vcpu_tb_trans() 121 tb, QEMU_PLUGIN_INLINE_ADD_U64, bb_count_u64(bb), n_insns); in vcpu_tb_trans() 124 tb, vcpu_interval_exec, QEMU_PLUGIN_CB_NO_REGS, in vcpu_tb_trans()
|
| H A D | drcov.c | 122 static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) in vcpu_tb_trans() argument 124 uint64_t pc = qemu_plugin_tb_vaddr(tb); in vcpu_tb_trans() 125 size_t n = qemu_plugin_tb_n_insns(tb); in vcpu_tb_trans() 131 bb->size += qemu_plugin_insn_size(qemu_plugin_tb_get_insn(tb, i)); in vcpu_tb_trans() 140 qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec, in vcpu_tb_trans()
|
| H A D | cflow.c | 300 static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) in vcpu_tb_trans() argument 302 uint64_t pc = qemu_plugin_tb_vaddr(tb); in vcpu_tb_trans() 303 size_t insns = qemu_plugin_tb_n_insns(tb); in vcpu_tb_trans() 304 struct qemu_plugin_insn *first_insn = qemu_plugin_tb_get_insn(tb, 0); in vcpu_tb_trans() 305 struct qemu_plugin_insn *last_insn = qemu_plugin_tb_get_insn(tb, insns - 1); in vcpu_tb_trans() 313 tb, QEMU_PLUGIN_INLINE_STORE_U64, tb_pc, pc); in vcpu_tb_trans() 315 tb, vcpu_tb_branched_exec, QEMU_PLUGIN_CB_NO_REGS, in vcpu_tb_trans() 332 for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) { in vcpu_tb_trans() 333 struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx); in vcpu_tb_trans()
|
| /openbmc/qemu/tests/tcg/aarch64/ |
| H A D | bti-2.c | 87 void *tb, *te; in main() local 111 asm("adr %0, test_begin; adr %1, test_end" : "=r"(tb), "=r"(te)); in main() 113 memcpy(p, tb, te - tb); in main()
|
| /openbmc/qemu/plugins/ |
| H A D | api.c | 87 void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb, in qemu_plugin_register_vcpu_tb_exec_cb() argument 93 plugin_register_dyn_cb__udata(&tb->cbs, cb, flags, udata); in qemu_plugin_register_vcpu_tb_exec_cb() 97 void qemu_plugin_register_vcpu_tb_exec_cond_cb(struct qemu_plugin_tb *tb, in qemu_plugin_register_vcpu_tb_exec_cond_cb() argument 109 qemu_plugin_register_vcpu_tb_exec_cb(tb, cb, flags, udata); in qemu_plugin_register_vcpu_tb_exec_cond_cb() 112 plugin_register_dyn_cond_cb__udata(&tb->cbs, cb, flags, in qemu_plugin_register_vcpu_tb_exec_cond_cb() 117 struct qemu_plugin_tb *tb, in qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu() argument 123 plugin_register_inline_op_on_entry(&tb->cbs, 0, op, entry, imm); in qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu() 227 size_t qemu_plugin_tb_n_insns(const struct qemu_plugin_tb *tb) in qemu_plugin_tb_n_insns() argument 229 return tb->n; in qemu_plugin_tb_n_insns() 232 uint64_t qemu_plugin_tb_vaddr(const struct qemu_plugin_tb *tb) in qemu_plugin_tb_vaddr() argument [all …]
|
| /openbmc/qemu/target/mips/tcg/ |
| H A D | tcg-internal.h | 19 void mips_translate_code(CPUState *cs, TranslationBlock *tb, 22 void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 27 const TranslationBlock *tb, 55 bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb);
|