xref: /openbmc/linux/arch/arm64/kernel/alternative.c (revision e3b9f1e8)
1 /*
2  * alternative runtime patching
3  * inspired by the x86 version
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #define pr_fmt(fmt) "alternatives: " fmt
21 
22 #include <linux/init.h>
23 #include <linux/cpu.h>
24 #include <asm/cacheflush.h>
25 #include <asm/alternative.h>
26 #include <asm/cpufeature.h>
27 #include <asm/insn.h>
28 #include <asm/sections.h>
29 #include <linux/stop_machine.h>
30 
31 #define __ALT_PTR(a,f)		((void *)&(a)->f + (a)->f)
32 #define ALT_ORIG_PTR(a)		__ALT_PTR(a, orig_offset)
33 #define ALT_REPL_PTR(a)		__ALT_PTR(a, alt_offset)
34 
35 int alternatives_applied;
36 
37 struct alt_region {
38 	struct alt_instr *begin;
39 	struct alt_instr *end;
40 };
41 
42 /*
43  * Check if the target PC is within an alternative block.
44  */
45 static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
46 {
47 	unsigned long replptr;
48 
49 	if (kernel_text_address(pc))
50 		return 1;
51 
52 	replptr = (unsigned long)ALT_REPL_PTR(alt);
53 	if (pc >= replptr && pc <= (replptr + alt->alt_len))
54 		return 0;
55 
56 	/*
57 	 * Branching into *another* alternate sequence is doomed, and
58 	 * we're not even trying to fix it up.
59 	 */
60 	BUG();
61 }
62 
63 #define align_down(x, a)	((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
64 
65 static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
66 {
67 	u32 insn;
68 
69 	insn = le32_to_cpu(*altinsnptr);
70 
71 	if (aarch64_insn_is_branch_imm(insn)) {
72 		s32 offset = aarch64_get_branch_offset(insn);
73 		unsigned long target;
74 
75 		target = (unsigned long)altinsnptr + offset;
76 
77 		/*
78 		 * If we're branching inside the alternate sequence,
79 		 * do not rewrite the instruction, as it is already
80 		 * correct. Otherwise, generate the new instruction.
81 		 */
82 		if (branch_insn_requires_update(alt, target)) {
83 			offset = target - (unsigned long)insnptr;
84 			insn = aarch64_set_branch_offset(insn, offset);
85 		}
86 	} else if (aarch64_insn_is_adrp(insn)) {
87 		s32 orig_offset, new_offset;
88 		unsigned long target;
89 
90 		/*
91 		 * If we're replacing an adrp instruction, which uses PC-relative
92 		 * immediate addressing, adjust the offset to reflect the new
93 		 * PC. adrp operates on 4K aligned addresses.
94 		 */
95 		orig_offset  = aarch64_insn_adrp_get_offset(insn);
96 		target = align_down(altinsnptr, SZ_4K) + orig_offset;
97 		new_offset = target - align_down(insnptr, SZ_4K);
98 		insn = aarch64_insn_adrp_set_offset(insn, new_offset);
99 	} else if (aarch64_insn_uses_literal(insn)) {
100 		/*
101 		 * Disallow patching unhandled instructions using PC relative
102 		 * literal addresses
103 		 */
104 		BUG();
105 	}
106 
107 	return insn;
108 }
109 
110 static void __apply_alternatives(void *alt_region, bool use_linear_alias)
111 {
112 	struct alt_instr *alt;
113 	struct alt_region *region = alt_region;
114 	__le32 *origptr, *replptr, *updptr;
115 
116 	for (alt = region->begin; alt < region->end; alt++) {
117 		u32 insn;
118 		int i, nr_inst;
119 
120 		if (!cpus_have_cap(alt->cpufeature))
121 			continue;
122 
123 		BUG_ON(alt->alt_len != alt->orig_len);
124 
125 		pr_info_once("patching kernel code\n");
126 
127 		origptr = ALT_ORIG_PTR(alt);
128 		replptr = ALT_REPL_PTR(alt);
129 		updptr = use_linear_alias ? lm_alias(origptr) : origptr;
130 		nr_inst = alt->alt_len / sizeof(insn);
131 
132 		for (i = 0; i < nr_inst; i++) {
133 			insn = get_alt_insn(alt, origptr + i, replptr + i);
134 			updptr[i] = cpu_to_le32(insn);
135 		}
136 
137 		flush_icache_range((uintptr_t)origptr,
138 				   (uintptr_t)(origptr + nr_inst));
139 	}
140 }
141 
142 /*
143  * We might be patching the stop_machine state machine, so implement a
144  * really simple polling protocol here.
145  */
146 static int __apply_alternatives_multi_stop(void *unused)
147 {
148 	struct alt_region region = {
149 		.begin	= (struct alt_instr *)__alt_instructions,
150 		.end	= (struct alt_instr *)__alt_instructions_end,
151 	};
152 
153 	/* We always have a CPU 0 at this point (__init) */
154 	if (smp_processor_id()) {
155 		while (!READ_ONCE(alternatives_applied))
156 			cpu_relax();
157 		isb();
158 	} else {
159 		BUG_ON(alternatives_applied);
160 		__apply_alternatives(&region, true);
161 		/* Barriers provided by the cache flushing */
162 		WRITE_ONCE(alternatives_applied, 1);
163 	}
164 
165 	return 0;
166 }
167 
168 void __init apply_alternatives_all(void)
169 {
170 	/* better not try code patching on a live SMP system */
171 	stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
172 }
173 
174 void apply_alternatives(void *start, size_t length)
175 {
176 	struct alt_region region = {
177 		.begin	= start,
178 		.end	= start + length,
179 	};
180 
181 	__apply_alternatives(&region, false);
182 }
183