1 #ifndef _ASM_POWERPC_CODE_PATCHING_H 2 #define _ASM_POWERPC_CODE_PATCHING_H 3 4 /* 5 * Copyright 2008, Michael Ellerman, IBM Corporation. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <asm/types.h> 14 #include <asm/ppc-opcode.h> 15 #include <linux/string.h> 16 #include <linux/kallsyms.h> 17 #include <asm/asm-compat.h> 18 19 /* Flags for create_branch: 20 * "b" == create_branch(addr, target, 0); 21 * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); 22 * "bl" == create_branch(addr, target, BRANCH_SET_LINK); 23 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK); 24 */ 25 #define BRANCH_SET_LINK 0x1 26 #define BRANCH_ABSOLUTE 0x2 27 28 bool is_offset_in_branch_range(long offset); 29 unsigned int create_branch(const unsigned int *addr, 30 unsigned long target, int flags); 31 unsigned int create_cond_branch(const unsigned int *addr, 32 unsigned long target, int flags); 33 int patch_branch(unsigned int *addr, unsigned long target, int flags); 34 int patch_instruction(unsigned int *addr, unsigned int instr); 35 int raw_patch_instruction(unsigned int *addr, unsigned int instr); 36 37 static inline unsigned long patch_site_addr(s32 *site) 38 { 39 return (unsigned long)site + *site; 40 } 41 42 static inline int patch_instruction_site(s32 *site, unsigned int instr) 43 { 44 return patch_instruction((unsigned int *)patch_site_addr(site), instr); 45 } 46 47 static inline int patch_branch_site(s32 *site, unsigned long target, int flags) 48 { 49 return patch_branch((unsigned int *)patch_site_addr(site), target, flags); 50 } 51 52 static inline int modify_instruction(unsigned int *addr, unsigned int clr, 53 unsigned int set) 54 { 55 return patch_instruction(addr, (*addr & ~clr) | set); 56 } 57 58 static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set) 59 { 60 return modify_instruction((unsigned int *)patch_site_addr(site), clr, set); 61 } 62 63 int instr_is_relative_branch(unsigned int instr); 64 int instr_is_relative_link_branch(unsigned int instr); 65 int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); 66 unsigned long branch_target(const unsigned int *instr); 67 unsigned int translate_branch(const unsigned int *dest, 68 const unsigned int *src); 69 extern bool is_conditional_branch(unsigned int instr); 70 #ifdef CONFIG_PPC_BOOK3E_64 71 void __patch_exception(int exc, unsigned long addr); 72 #define patch_exception(exc, name) do { \ 73 extern unsigned int name; \ 74 __patch_exception((exc), (unsigned long)&name); \ 75 } while (0) 76 #endif 77 78 #define OP_RT_RA_MASK 0xffff0000UL 79 #define LIS_R2 0x3c020000UL 80 #define ADDIS_R2_R12 0x3c4c0000UL 81 #define ADDI_R2_R2 0x38420000UL 82 83 static inline unsigned long ppc_function_entry(void *func) 84 { 85 #ifdef PPC64_ELF_ABI_v2 86 u32 *insn = func; 87 88 /* 89 * A PPC64 ABIv2 function may have a local and a global entry 90 * point. We need to use the local entry point when patching 91 * functions, so identify and step over the global entry point 92 * sequence. 93 * 94 * The global entry point sequence is always of the form: 95 * 96 * addis r2,r12,XXXX 97 * addi r2,r2,XXXX 98 * 99 * A linker optimisation may convert the addis to lis: 100 * 101 * lis r2,XXXX 102 * addi r2,r2,XXXX 103 */ 104 if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) || 105 ((*insn & OP_RT_RA_MASK) == LIS_R2)) && 106 ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2)) 107 return (unsigned long)(insn + 2); 108 else 109 return (unsigned long)func; 110 #elif defined(PPC64_ELF_ABI_v1) 111 /* 112 * On PPC64 ABIv1 the function pointer actually points to the 113 * function's descriptor. The first entry in the descriptor is the 114 * address of the function text. 115 */ 116 return ((func_descr_t *)func)->entry; 117 #else 118 return (unsigned long)func; 119 #endif 120 } 121 122 static inline unsigned long ppc_global_function_entry(void *func) 123 { 124 #ifdef PPC64_ELF_ABI_v2 125 /* PPC64 ABIv2 the global entry point is at the address */ 126 return (unsigned long)func; 127 #else 128 /* All other cases there is no change vs ppc_function_entry() */ 129 return ppc_function_entry(func); 130 #endif 131 } 132 133 /* 134 * Wrapper around kallsyms_lookup() to return function entry address: 135 * - For ABIv1, we lookup the dot variant. 136 * - For ABIv2, we return the local entry point. 137 */ 138 static inline unsigned long ppc_kallsyms_lookup_name(const char *name) 139 { 140 unsigned long addr; 141 #ifdef PPC64_ELF_ABI_v1 142 /* check for dot variant */ 143 char dot_name[1 + KSYM_NAME_LEN]; 144 bool dot_appended = false; 145 146 if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN) 147 return 0; 148 149 if (name[0] != '.') { 150 dot_name[0] = '.'; 151 dot_name[1] = '\0'; 152 strlcat(dot_name, name, sizeof(dot_name)); 153 dot_appended = true; 154 } else { 155 dot_name[0] = '\0'; 156 strlcat(dot_name, name, sizeof(dot_name)); 157 } 158 addr = kallsyms_lookup_name(dot_name); 159 if (!addr && dot_appended) 160 /* Let's try the original non-dot symbol lookup */ 161 addr = kallsyms_lookup_name(name); 162 #elif defined(PPC64_ELF_ABI_v2) 163 addr = kallsyms_lookup_name(name); 164 if (addr) 165 addr = ppc_function_entry((void *)addr); 166 #else 167 addr = kallsyms_lookup_name(name); 168 #endif 169 return addr; 170 } 171 172 #ifdef CONFIG_PPC64 173 /* 174 * Some instruction encodings commonly used in dynamic ftracing 175 * and function live patching. 176 */ 177 178 /* This must match the definition of STK_GOT in <asm/ppc_asm.h> */ 179 #ifdef PPC64_ELF_ABI_v2 180 #define R2_STACK_OFFSET 24 181 #else 182 #define R2_STACK_OFFSET 40 183 #endif 184 185 #define PPC_INST_LD_TOC (PPC_INST_LD | ___PPC_RT(__REG_R2) | \ 186 ___PPC_RA(__REG_R1) | R2_STACK_OFFSET) 187 188 /* usually preceded by a mflr r0 */ 189 #define PPC_INST_STD_LR (PPC_INST_STD | ___PPC_RS(__REG_R0) | \ 190 ___PPC_RA(__REG_R1) | PPC_LR_STKOFF) 191 #endif /* CONFIG_PPC64 */ 192 193 #endif /* _ASM_POWERPC_CODE_PATCHING_H */ 194