1 /* 2 * alternative runtime patching 3 * inspired by the x86 version 4 * 5 * Copyright (C) 2014 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #define pr_fmt(fmt) "alternatives: " fmt 21 22 #include <linux/init.h> 23 #include <linux/cpu.h> 24 #include <asm/cacheflush.h> 25 #include <asm/alternative.h> 26 #include <asm/cpufeature.h> 27 #include <asm/insn.h> 28 #include <linux/stop_machine.h> 29 30 extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 31 32 struct alt_region { 33 struct alt_instr *begin; 34 struct alt_instr *end; 35 }; 36 37 /* 38 * Decode the imm field of a b/bl instruction, and return the byte 39 * offset as a signed value (so it can be used when computing a new 40 * branch target). 41 */ 42 static s32 get_branch_offset(u32 insn) 43 { 44 s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn); 45 46 /* sign-extend the immediate before turning it into a byte offset */ 47 return (imm << 6) >> 4; 48 } 49 50 static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr) 51 { 52 u32 insn; 53 54 aarch64_insn_read(altinsnptr, &insn); 55 56 /* Stop the world on instructions we don't support... */ 57 BUG_ON(aarch64_insn_is_cbz(insn)); 58 BUG_ON(aarch64_insn_is_cbnz(insn)); 59 BUG_ON(aarch64_insn_is_bcond(insn)); 60 /* ... and there is probably more. */ 61 62 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) { 63 enum aarch64_insn_branch_type type; 64 unsigned long target; 65 66 if (aarch64_insn_is_b(insn)) 67 type = AARCH64_INSN_BRANCH_NOLINK; 68 else 69 type = AARCH64_INSN_BRANCH_LINK; 70 71 target = (unsigned long)altinsnptr + get_branch_offset(insn); 72 insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr, 73 target, type); 74 } 75 76 return insn; 77 } 78 79 static int __apply_alternatives(void *alt_region) 80 { 81 struct alt_instr *alt; 82 struct alt_region *region = alt_region; 83 u8 *origptr, *replptr; 84 85 for (alt = region->begin; alt < region->end; alt++) { 86 u32 insn; 87 int i; 88 89 if (!cpus_have_cap(alt->cpufeature)) 90 continue; 91 92 BUG_ON(alt->alt_len != alt->orig_len); 93 94 pr_info_once("patching kernel code\n"); 95 96 origptr = (u8 *)&alt->orig_offset + alt->orig_offset; 97 replptr = (u8 *)&alt->alt_offset + alt->alt_offset; 98 99 for (i = 0; i < alt->alt_len; i += sizeof(insn)) { 100 insn = get_alt_insn(origptr + i, replptr + i); 101 aarch64_insn_write(origptr + i, insn); 102 } 103 104 flush_icache_range((uintptr_t)origptr, 105 (uintptr_t)(origptr + alt->alt_len)); 106 } 107 108 return 0; 109 } 110 111 void apply_alternatives_all(void) 112 { 113 struct alt_region region = { 114 .begin = __alt_instructions, 115 .end = __alt_instructions_end, 116 }; 117 118 /* better not try code patching on a live SMP system */ 119 stop_machine(__apply_alternatives, ®ion, NULL); 120 } 121 122 void apply_alternatives(void *start, size_t length) 123 { 124 struct alt_region region = { 125 .begin = start, 126 .end = start + length, 127 }; 128 129 __apply_alternatives(®ion); 130 } 131 132 void free_alternatives_memory(void) 133 { 134 free_reserved_area(__alt_instructions, __alt_instructions_end, 135 0, "alternatives"); 136 } 137