xref: /openbmc/qemu/accel/tcg/tb-jmp-cache.h (revision 03a648c4)
1 /*
2  * The per-CPU TranslationBlock jump cache.
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #ifndef ACCEL_TCG_TB_JMP_CACHE_H
10 #define ACCEL_TCG_TB_JMP_CACHE_H
11 
12 #define TB_JMP_CACHE_BITS 12
13 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
14 
15 /*
16  * Accessed in parallel; all accesses to 'tb' must be atomic.
17  * For CF_PCREL, accesses to 'pc' must be protected by a
18  * load_acquire/store_release to 'tb'.
19  */
20 struct CPUJumpCache {
21     struct rcu_head rcu;
22     struct {
23         TranslationBlock *tb;
24         target_ulong pc;
25     } array[TB_JMP_CACHE_SIZE];
26 };
27 
28 static inline TranslationBlock *
29 tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t cflags, uint32_t hash)
30 {
31     if (cflags & CF_PCREL) {
32         /* Use acquire to ensure current load of pc from jc. */
33         return qatomic_load_acquire(&jc->array[hash].tb);
34     } else {
35         /* Use rcu_read to ensure current load of pc from *tb. */
36         return qatomic_rcu_read(&jc->array[hash].tb);
37     }
38 }
39 
40 static inline target_ulong
41 tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
42 {
43     if (tb_cflags(tb) & CF_PCREL) {
44         return jc->array[hash].pc;
45     } else {
46         return tb_pc(tb);
47     }
48 }
49 
50 static inline void
51 tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
52                  TranslationBlock *tb, target_ulong pc)
53 {
54     if (tb_cflags(tb) & CF_PCREL) {
55         jc->array[hash].pc = pc;
56         /* Use store_release on tb to ensure pc is written first. */
57         qatomic_store_release(&jc->array[hash].tb, tb);
58     } else{
59         /* Use the pc value already stored in tb->pc. */
60         qatomic_set(&jc->array[hash].tb, tb);
61     }
62 }
63 
64 #endif /* ACCEL_TCG_TB_JMP_CACHE_H */
65