1f091d5a4SEugeniy Paltsev /* SPDX-License-Identifier: GPL-2.0 */
2f091d5a4SEugeniy Paltsev #ifndef _ASM_ARC_JUMP_LABEL_H
3f091d5a4SEugeniy Paltsev #define _ASM_ARC_JUMP_LABEL_H
4f091d5a4SEugeniy Paltsev
5f091d5a4SEugeniy Paltsev #ifndef __ASSEMBLY__
6f091d5a4SEugeniy Paltsev
7f091d5a4SEugeniy Paltsev #include <linux/stringify.h>
8f091d5a4SEugeniy Paltsev #include <linux/types.h>
9f091d5a4SEugeniy Paltsev
10f091d5a4SEugeniy Paltsev #define JUMP_LABEL_NOP_SIZE 4
11f091d5a4SEugeniy Paltsev
12f091d5a4SEugeniy Paltsev /*
13f091d5a4SEugeniy Paltsev * NOTE about '.balign 4':
14f091d5a4SEugeniy Paltsev *
15f091d5a4SEugeniy Paltsev * To make atomic update of patched instruction available we need to guarantee
16f091d5a4SEugeniy Paltsev * that this instruction doesn't cross L1 cache line boundary.
17f091d5a4SEugeniy Paltsev *
18f091d5a4SEugeniy Paltsev * As of today we simply align instruction which can be patched by 4 byte using
19f091d5a4SEugeniy Paltsev * ".balign 4" directive. In that case patched instruction is aligned with one
20f091d5a4SEugeniy Paltsev * 16-bit NOP_S if this is required.
21f091d5a4SEugeniy Paltsev * However 'align by 4' directive is much stricter than it actually required.
22f091d5a4SEugeniy Paltsev * It's enough that our 32-bit instruction don't cross L1 cache line boundary /
23f091d5a4SEugeniy Paltsev * L1 I$ fetch block boundary which can be achieved by using
24f091d5a4SEugeniy Paltsev * ".bundle_align_mode" assembler directive. That will save us from adding
25f091d5a4SEugeniy Paltsev * useless NOP_S padding in most of the cases.
26f091d5a4SEugeniy Paltsev *
27f091d5a4SEugeniy Paltsev * TODO: switch to ".bundle_align_mode" directive using whin it will be
28f091d5a4SEugeniy Paltsev * supported by ARC toolchain.
29f091d5a4SEugeniy Paltsev */
30f091d5a4SEugeniy Paltsev
arch_static_branch(struct static_key * key,bool branch)31f091d5a4SEugeniy Paltsev static __always_inline bool arch_static_branch(struct static_key *key,
32f091d5a4SEugeniy Paltsev bool branch)
33f091d5a4SEugeniy Paltsev {
34*aaff74d8SLinus Torvalds asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
35f091d5a4SEugeniy Paltsev "1: \n"
36f091d5a4SEugeniy Paltsev "nop \n"
37f091d5a4SEugeniy Paltsev ".pushsection __jump_table, \"aw\" \n"
38f091d5a4SEugeniy Paltsev ".word 1b, %l[l_yes], %c0 \n"
39f091d5a4SEugeniy Paltsev ".popsection \n"
40f091d5a4SEugeniy Paltsev : : "i" (&((char *)key)[branch]) : : l_yes);
41f091d5a4SEugeniy Paltsev
42f091d5a4SEugeniy Paltsev return false;
43f091d5a4SEugeniy Paltsev l_yes:
44f091d5a4SEugeniy Paltsev return true;
45f091d5a4SEugeniy Paltsev }
46f091d5a4SEugeniy Paltsev
arch_static_branch_jump(struct static_key * key,bool branch)47f091d5a4SEugeniy Paltsev static __always_inline bool arch_static_branch_jump(struct static_key *key,
48f091d5a4SEugeniy Paltsev bool branch)
49f091d5a4SEugeniy Paltsev {
50*aaff74d8SLinus Torvalds asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
51f091d5a4SEugeniy Paltsev "1: \n"
52f091d5a4SEugeniy Paltsev "b %l[l_yes] \n"
53f091d5a4SEugeniy Paltsev ".pushsection __jump_table, \"aw\" \n"
54f091d5a4SEugeniy Paltsev ".word 1b, %l[l_yes], %c0 \n"
55f091d5a4SEugeniy Paltsev ".popsection \n"
56f091d5a4SEugeniy Paltsev : : "i" (&((char *)key)[branch]) : : l_yes);
57f091d5a4SEugeniy Paltsev
58f091d5a4SEugeniy Paltsev return false;
59f091d5a4SEugeniy Paltsev l_yes:
60f091d5a4SEugeniy Paltsev return true;
61f091d5a4SEugeniy Paltsev }
62f091d5a4SEugeniy Paltsev
63f091d5a4SEugeniy Paltsev typedef u32 jump_label_t;
64f091d5a4SEugeniy Paltsev
65f091d5a4SEugeniy Paltsev struct jump_entry {
66f091d5a4SEugeniy Paltsev jump_label_t code;
67f091d5a4SEugeniy Paltsev jump_label_t target;
68f091d5a4SEugeniy Paltsev jump_label_t key;
69f091d5a4SEugeniy Paltsev };
70f091d5a4SEugeniy Paltsev
71f091d5a4SEugeniy Paltsev #endif /* __ASSEMBLY__ */
72f091d5a4SEugeniy Paltsev #endif
73