1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_CODE_PATCHING_H
3 #define _ASM_POWERPC_CODE_PATCHING_H
4 
5 /*
6  * Copyright 2008, Michael Ellerman, IBM Corporation.
7  */
8 
9 #include <asm/types.h>
10 #include <asm/ppc-opcode.h>
11 #include <linux/string.h>
12 #include <linux/kallsyms.h>
13 #include <asm/asm-compat.h>
14 #include <asm/inst.h>
15 
16 /* Flags for create_branch:
17  * "b"   == create_branch(addr, target, 0);
18  * "ba"  == create_branch(addr, target, BRANCH_ABSOLUTE);
19  * "bl"  == create_branch(addr, target, BRANCH_SET_LINK);
20  * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
21  */
22 #define BRANCH_SET_LINK	0x1
23 #define BRANCH_ABSOLUTE	0x2
24 
25 bool is_offset_in_branch_range(long offset);
26 int create_branch(struct ppc_inst *instr, const u32 *addr,
27 		  unsigned long target, int flags);
28 int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
29 		       unsigned long target, int flags);
30 int patch_branch(u32 *addr, unsigned long target, int flags);
31 int patch_instruction(u32 *addr, struct ppc_inst instr);
32 int raw_patch_instruction(u32 *addr, struct ppc_inst instr);
33 
34 static inline unsigned long patch_site_addr(s32 *site)
35 {
36 	return (unsigned long)site + *site;
37 }
38 
39 static inline int patch_instruction_site(s32 *site, struct ppc_inst instr)
40 {
41 	return patch_instruction((u32 *)patch_site_addr(site), instr);
42 }
43 
44 static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
45 {
46 	return patch_branch((u32 *)patch_site_addr(site), target, flags);
47 }
48 
49 static inline int modify_instruction(unsigned int *addr, unsigned int clr,
50 				     unsigned int set)
51 {
52 	return patch_instruction(addr, ppc_inst((*addr & ~clr) | set));
53 }
54 
55 static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
56 {
57 	return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
58 }
59 
60 int instr_is_relative_branch(struct ppc_inst instr);
61 int instr_is_relative_link_branch(struct ppc_inst instr);
62 unsigned long branch_target(const u32 *instr);
63 int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src);
64 extern bool is_conditional_branch(struct ppc_inst instr);
65 #ifdef CONFIG_PPC_BOOK3E_64
66 void __patch_exception(int exc, unsigned long addr);
67 #define patch_exception(exc, name) do { \
68 	extern unsigned int name; \
69 	__patch_exception((exc), (unsigned long)&name); \
70 } while (0)
71 #endif
72 
73 #define OP_RT_RA_MASK	0xffff0000UL
74 #define LIS_R2		(PPC_RAW_LIS(_R2, 0))
75 #define ADDIS_R2_R12	(PPC_RAW_ADDIS(_R2, _R12, 0))
76 #define ADDI_R2_R2	(PPC_RAW_ADDI(_R2, _R2, 0))
77 
78 
79 static inline unsigned long ppc_function_entry(void *func)
80 {
81 #ifdef PPC64_ELF_ABI_v2
82 	u32 *insn = func;
83 
84 	/*
85 	 * A PPC64 ABIv2 function may have a local and a global entry
86 	 * point. We need to use the local entry point when patching
87 	 * functions, so identify and step over the global entry point
88 	 * sequence.
89 	 *
90 	 * The global entry point sequence is always of the form:
91 	 *
92 	 * addis r2,r12,XXXX
93 	 * addi  r2,r2,XXXX
94 	 *
95 	 * A linker optimisation may convert the addis to lis:
96 	 *
97 	 * lis   r2,XXXX
98 	 * addi  r2,r2,XXXX
99 	 */
100 	if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
101 	     ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
102 	    ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
103 		return (unsigned long)(insn + 2);
104 	else
105 		return (unsigned long)func;
106 #elif defined(PPC64_ELF_ABI_v1)
107 	/*
108 	 * On PPC64 ABIv1 the function pointer actually points to the
109 	 * function's descriptor. The first entry in the descriptor is the
110 	 * address of the function text.
111 	 */
112 	return ((func_descr_t *)func)->entry;
113 #else
114 	return (unsigned long)func;
115 #endif
116 }
117 
118 static inline unsigned long ppc_global_function_entry(void *func)
119 {
120 #ifdef PPC64_ELF_ABI_v2
121 	/* PPC64 ABIv2 the global entry point is at the address */
122 	return (unsigned long)func;
123 #else
124 	/* All other cases there is no change vs ppc_function_entry() */
125 	return ppc_function_entry(func);
126 #endif
127 }
128 
129 /*
130  * Wrapper around kallsyms_lookup() to return function entry address:
131  * - For ABIv1, we lookup the dot variant.
132  * - For ABIv2, we return the local entry point.
133  */
134 static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
135 {
136 	unsigned long addr;
137 #ifdef PPC64_ELF_ABI_v1
138 	/* check for dot variant */
139 	char dot_name[1 + KSYM_NAME_LEN];
140 	bool dot_appended = false;
141 
142 	if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN)
143 		return 0;
144 
145 	if (name[0] != '.') {
146 		dot_name[0] = '.';
147 		dot_name[1] = '\0';
148 		strlcat(dot_name, name, sizeof(dot_name));
149 		dot_appended = true;
150 	} else {
151 		dot_name[0] = '\0';
152 		strlcat(dot_name, name, sizeof(dot_name));
153 	}
154 	addr = kallsyms_lookup_name(dot_name);
155 	if (!addr && dot_appended)
156 		/* Let's try the original non-dot symbol lookup	*/
157 		addr = kallsyms_lookup_name(name);
158 #elif defined(PPC64_ELF_ABI_v2)
159 	addr = kallsyms_lookup_name(name);
160 	if (addr)
161 		addr = ppc_function_entry((void *)addr);
162 #else
163 	addr = kallsyms_lookup_name(name);
164 #endif
165 	return addr;
166 }
167 
168 #ifdef CONFIG_PPC64
169 /*
170  * Some instruction encodings commonly used in dynamic ftracing
171  * and function live patching.
172  */
173 
174 /* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
175 #ifdef PPC64_ELF_ABI_v2
176 #define R2_STACK_OFFSET         24
177 #else
178 #define R2_STACK_OFFSET         40
179 #endif
180 
181 #define PPC_INST_LD_TOC		PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
182 
183 /* usually preceded by a mflr r0 */
184 #define PPC_INST_STD_LR		PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
185 #endif /* CONFIG_PPC64 */
186 
187 #endif /* _ASM_POWERPC_CODE_PATCHING_H */
188