xref: /openbmc/qemu/accel/tcg/tb-jmp-cache.h (revision 8466405e)
1 /*
2  * The per-CPU TranslationBlock jump cache.
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #ifndef ACCEL_TCG_TB_JMP_CACHE_H
10 #define ACCEL_TCG_TB_JMP_CACHE_H
11 
12 #define TB_JMP_CACHE_BITS 12
13 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
14 
15 /*
16  * Accessed in parallel; all accesses to 'tb' must be atomic.
17  * For TARGET_TB_PCREL, accesses to 'pc' must be protected by
18  * a load_acquire/store_release to 'tb'.
19  */
20 struct CPUJumpCache {
21     struct {
22         TranslationBlock *tb;
23 #if TARGET_TB_PCREL
24         target_ulong pc;
25 #endif
26     } array[TB_JMP_CACHE_SIZE];
27 };
28 
29 static inline TranslationBlock *
30 tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash)
31 {
32 #if TARGET_TB_PCREL
33     /* Use acquire to ensure current load of pc from jc. */
34     return qatomic_load_acquire(&jc->array[hash].tb);
35 #else
36     /* Use rcu_read to ensure current load of pc from *tb. */
37     return qatomic_rcu_read(&jc->array[hash].tb);
38 #endif
39 }
40 
41 static inline target_ulong
42 tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
43 {
44 #if TARGET_TB_PCREL
45     return jc->array[hash].pc;
46 #else
47     return tb_pc(tb);
48 #endif
49 }
50 
51 static inline void
52 tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
53                  TranslationBlock *tb, target_ulong pc)
54 {
55 #if TARGET_TB_PCREL
56     jc->array[hash].pc = pc;
57     /* Use store_release on tb to ensure pc is written first. */
58     qatomic_store_release(&jc->array[hash].tb, tb);
59 #else
60     /* Use the pc value already stored in tb->pc. */
61     qatomic_set(&jc->array[hash].tb, tb);
62 #endif
63 }
64 
65 #endif /* ACCEL_TCG_TB_JMP_CACHE_H */
66