1 /* 2 * alternative runtime patching 3 * inspired by the x86 version 4 * 5 * Copyright (C) 2014 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #define pr_fmt(fmt) "alternatives: " fmt 21 22 #include <linux/init.h> 23 #include <linux/cpu.h> 24 #include <asm/cacheflush.h> 25 #include <asm/alternative.h> 26 #include <asm/cpufeature.h> 27 #include <asm/insn.h> 28 #include <linux/stop_machine.h> 29 30 #define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f) 31 #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) 32 #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) 33 34 extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 35 36 struct alt_region { 37 struct alt_instr *begin; 38 struct alt_instr *end; 39 }; 40 41 /* 42 * Check if the target PC is within an alternative block. 43 */ 44 static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) 45 { 46 unsigned long replptr; 47 48 if (kernel_text_address(pc)) 49 return 1; 50 51 replptr = (unsigned long)ALT_REPL_PTR(alt); 52 if (pc >= replptr && pc <= (replptr + alt->alt_len)) 53 return 0; 54 55 /* 56 * Branching into *another* alternate sequence is doomed, and 57 * we're not even trying to fix it up. 58 */ 59 BUG(); 60 } 61 62 static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr) 63 { 64 u32 insn; 65 66 insn = le32_to_cpu(*altinsnptr); 67 68 if (aarch64_insn_is_branch_imm(insn)) { 69 s32 offset = aarch64_get_branch_offset(insn); 70 unsigned long target; 71 72 target = (unsigned long)altinsnptr + offset; 73 74 /* 75 * If we're branching inside the alternate sequence, 76 * do not rewrite the instruction, as it is already 77 * correct. Otherwise, generate the new instruction. 78 */ 79 if (branch_insn_requires_update(alt, target)) { 80 offset = target - (unsigned long)insnptr; 81 insn = aarch64_set_branch_offset(insn, offset); 82 } 83 } 84 85 return insn; 86 } 87 88 static int __apply_alternatives(void *alt_region) 89 { 90 struct alt_instr *alt; 91 struct alt_region *region = alt_region; 92 u32 *origptr, *replptr; 93 94 for (alt = region->begin; alt < region->end; alt++) { 95 u32 insn; 96 int i, nr_inst; 97 98 if (!cpus_have_cap(alt->cpufeature)) 99 continue; 100 101 BUG_ON(alt->alt_len != alt->orig_len); 102 103 pr_info_once("patching kernel code\n"); 104 105 origptr = ALT_ORIG_PTR(alt); 106 replptr = ALT_REPL_PTR(alt); 107 nr_inst = alt->alt_len / sizeof(insn); 108 109 for (i = 0; i < nr_inst; i++) { 110 insn = get_alt_insn(alt, origptr + i, replptr + i); 111 *(origptr + i) = cpu_to_le32(insn); 112 } 113 114 flush_icache_range((uintptr_t)origptr, 115 (uintptr_t)(origptr + nr_inst)); 116 } 117 118 return 0; 119 } 120 121 void apply_alternatives_all(void) 122 { 123 struct alt_region region = { 124 .begin = __alt_instructions, 125 .end = __alt_instructions_end, 126 }; 127 128 /* better not try code patching on a live SMP system */ 129 stop_machine(__apply_alternatives, ®ion, NULL); 130 } 131 132 void apply_alternatives(void *start, size_t length) 133 { 134 struct alt_region region = { 135 .begin = start, 136 .end = start + length, 137 }; 138 139 __apply_alternatives(®ion); 140 } 141 142 void free_alternatives_memory(void) 143 { 144 free_reserved_area(__alt_instructions, __alt_instructions_end, 145 0, "alternatives"); 146 } 147