1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _ASM_POWERPC_CODE_PATCHING_H 3 #define _ASM_POWERPC_CODE_PATCHING_H 4 5 /* 6 * Copyright 2008, Michael Ellerman, IBM Corporation. 7 */ 8 9 #include <asm/types.h> 10 #include <asm/ppc-opcode.h> 11 #include <linux/string.h> 12 #include <linux/kallsyms.h> 13 #include <asm/asm-compat.h> 14 #include <asm/inst.h> 15 16 /* Flags for create_branch: 17 * "b" == create_branch(addr, target, 0); 18 * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); 19 * "bl" == create_branch(addr, target, BRANCH_SET_LINK); 20 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK); 21 */ 22 #define BRANCH_SET_LINK 0x1 23 #define BRANCH_ABSOLUTE 0x2 24 25 bool is_offset_in_branch_range(long offset); 26 bool is_offset_in_cond_branch_range(long offset); 27 int create_branch(struct ppc_inst *instr, const u32 *addr, 28 unsigned long target, int flags); 29 int create_cond_branch(struct ppc_inst *instr, const u32 *addr, 30 unsigned long target, int flags); 31 int patch_branch(u32 *addr, unsigned long target, int flags); 32 int patch_instruction(u32 *addr, struct ppc_inst instr); 33 int raw_patch_instruction(u32 *addr, struct ppc_inst instr); 34 35 static inline unsigned long patch_site_addr(s32 *site) 36 { 37 return (unsigned long)site + *site; 38 } 39 40 static inline int patch_instruction_site(s32 *site, struct ppc_inst instr) 41 { 42 return patch_instruction((u32 *)patch_site_addr(site), instr); 43 } 44 45 static inline int patch_branch_site(s32 *site, unsigned long target, int flags) 46 { 47 return patch_branch((u32 *)patch_site_addr(site), target, flags); 48 } 49 50 static inline int modify_instruction(unsigned int *addr, unsigned int clr, 51 unsigned int set) 52 { 53 return patch_instruction(addr, ppc_inst((*addr & ~clr) | set)); 54 } 55 56 static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set) 57 { 58 return modify_instruction((unsigned int *)patch_site_addr(site), clr, set); 59 } 60 61 int instr_is_relative_branch(struct ppc_inst instr); 62 int instr_is_relative_link_branch(struct ppc_inst instr); 63 unsigned long branch_target(const u32 *instr); 64 int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src); 65 extern bool is_conditional_branch(struct ppc_inst instr); 66 #ifdef CONFIG_PPC_BOOK3E_64 67 void __patch_exception(int exc, unsigned long addr); 68 #define patch_exception(exc, name) do { \ 69 extern unsigned int name; \ 70 __patch_exception((exc), (unsigned long)&name); \ 71 } while (0) 72 #endif 73 74 #define OP_RT_RA_MASK 0xffff0000UL 75 #define LIS_R2 (PPC_RAW_LIS(_R2, 0)) 76 #define ADDIS_R2_R12 (PPC_RAW_ADDIS(_R2, _R12, 0)) 77 #define ADDI_R2_R2 (PPC_RAW_ADDI(_R2, _R2, 0)) 78 79 80 static inline unsigned long ppc_function_entry(void *func) 81 { 82 #ifdef PPC64_ELF_ABI_v2 83 u32 *insn = func; 84 85 /* 86 * A PPC64 ABIv2 function may have a local and a global entry 87 * point. We need to use the local entry point when patching 88 * functions, so identify and step over the global entry point 89 * sequence. 90 * 91 * The global entry point sequence is always of the form: 92 * 93 * addis r2,r12,XXXX 94 * addi r2,r2,XXXX 95 * 96 * A linker optimisation may convert the addis to lis: 97 * 98 * lis r2,XXXX 99 * addi r2,r2,XXXX 100 */ 101 if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) || 102 ((*insn & OP_RT_RA_MASK) == LIS_R2)) && 103 ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2)) 104 return (unsigned long)(insn + 2); 105 else 106 return (unsigned long)func; 107 #elif defined(PPC64_ELF_ABI_v1) 108 /* 109 * On PPC64 ABIv1 the function pointer actually points to the 110 * function's descriptor. The first entry in the descriptor is the 111 * address of the function text. 112 */ 113 return ((func_descr_t *)func)->entry; 114 #else 115 return (unsigned long)func; 116 #endif 117 } 118 119 static inline unsigned long ppc_global_function_entry(void *func) 120 { 121 #ifdef PPC64_ELF_ABI_v2 122 /* PPC64 ABIv2 the global entry point is at the address */ 123 return (unsigned long)func; 124 #else 125 /* All other cases there is no change vs ppc_function_entry() */ 126 return ppc_function_entry(func); 127 #endif 128 } 129 130 /* 131 * Wrapper around kallsyms_lookup() to return function entry address: 132 * - For ABIv1, we lookup the dot variant. 133 * - For ABIv2, we return the local entry point. 134 */ 135 static inline unsigned long ppc_kallsyms_lookup_name(const char *name) 136 { 137 unsigned long addr; 138 #ifdef PPC64_ELF_ABI_v1 139 /* check for dot variant */ 140 char dot_name[1 + KSYM_NAME_LEN]; 141 bool dot_appended = false; 142 143 if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN) 144 return 0; 145 146 if (name[0] != '.') { 147 dot_name[0] = '.'; 148 dot_name[1] = '\0'; 149 strlcat(dot_name, name, sizeof(dot_name)); 150 dot_appended = true; 151 } else { 152 dot_name[0] = '\0'; 153 strlcat(dot_name, name, sizeof(dot_name)); 154 } 155 addr = kallsyms_lookup_name(dot_name); 156 if (!addr && dot_appended) 157 /* Let's try the original non-dot symbol lookup */ 158 addr = kallsyms_lookup_name(name); 159 #elif defined(PPC64_ELF_ABI_v2) 160 addr = kallsyms_lookup_name(name); 161 if (addr) 162 addr = ppc_function_entry((void *)addr); 163 #else 164 addr = kallsyms_lookup_name(name); 165 #endif 166 return addr; 167 } 168 169 #ifdef CONFIG_PPC64 170 /* 171 * Some instruction encodings commonly used in dynamic ftracing 172 * and function live patching. 173 */ 174 175 /* This must match the definition of STK_GOT in <asm/ppc_asm.h> */ 176 #ifdef PPC64_ELF_ABI_v2 177 #define R2_STACK_OFFSET 24 178 #else 179 #define R2_STACK_OFFSET 40 180 #endif 181 182 #define PPC_INST_LD_TOC PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET) 183 184 /* usually preceded by a mflr r0 */ 185 #define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF) 186 #endif /* CONFIG_PPC64 */ 187 188 #endif /* _ASM_POWERPC_CODE_PATCHING_H */ 189