xref: /openbmc/linux/arch/arm64/kernel/patching.c (revision e4ecbe83)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/spinlock.h>
6 #include <linux/stop_machine.h>
7 #include <linux/uaccess.h>
8 
9 #include <asm/cacheflush.h>
10 #include <asm/fixmap.h>
11 #include <asm/insn.h>
12 #include <asm/kprobes.h>
13 #include <asm/patching.h>
14 #include <asm/sections.h>
15 
16 static DEFINE_RAW_SPINLOCK(patch_lock);
17 
is_exit_text(unsigned long addr)18 static bool is_exit_text(unsigned long addr)
19 {
20 	/* discarded with init text/data */
21 	return system_state < SYSTEM_RUNNING &&
22 		addr >= (unsigned long)__exittext_begin &&
23 		addr < (unsigned long)__exittext_end;
24 }
25 
is_image_text(unsigned long addr)26 static bool is_image_text(unsigned long addr)
27 {
28 	return core_kernel_text(addr) || is_exit_text(addr);
29 }
30 
patch_map(void * addr,int fixmap)31 static void __kprobes *patch_map(void *addr, int fixmap)
32 {
33 	unsigned long uintaddr = (uintptr_t) addr;
34 	bool image = is_image_text(uintaddr);
35 	struct page *page;
36 
37 	if (image)
38 		page = phys_to_page(__pa_symbol(addr));
39 	else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
40 		page = vmalloc_to_page(addr);
41 	else
42 		return addr;
43 
44 	BUG_ON(!page);
45 	return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
46 			(uintaddr & ~PAGE_MASK));
47 }
48 
patch_unmap(int fixmap)49 static void __kprobes patch_unmap(int fixmap)
50 {
51 	clear_fixmap(fixmap);
52 }
53 /*
54  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
55  * little-endian.
56  */
aarch64_insn_read(void * addr,u32 * insnp)57 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
58 {
59 	int ret;
60 	__le32 val;
61 
62 	ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
63 	if (!ret)
64 		*insnp = le32_to_cpu(val);
65 
66 	return ret;
67 }
68 
__aarch64_insn_write(void * addr,__le32 insn)69 static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
70 {
71 	void *waddr = addr;
72 	unsigned long flags = 0;
73 	int ret;
74 
75 	raw_spin_lock_irqsave(&patch_lock, flags);
76 	waddr = patch_map(addr, FIX_TEXT_POKE0);
77 
78 	ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
79 
80 	patch_unmap(FIX_TEXT_POKE0);
81 	raw_spin_unlock_irqrestore(&patch_lock, flags);
82 
83 	return ret;
84 }
85 
aarch64_insn_write(void * addr,u32 insn)86 int __kprobes aarch64_insn_write(void *addr, u32 insn)
87 {
88 	return __aarch64_insn_write(addr, cpu_to_le32(insn));
89 }
90 
aarch64_insn_write_literal_u64(void * addr,u64 val)91 noinstr int aarch64_insn_write_literal_u64(void *addr, u64 val)
92 {
93 	u64 *waddr;
94 	unsigned long flags;
95 	int ret;
96 
97 	raw_spin_lock_irqsave(&patch_lock, flags);
98 	waddr = patch_map(addr, FIX_TEXT_POKE0);
99 
100 	ret = copy_to_kernel_nofault(waddr, &val, sizeof(val));
101 
102 	patch_unmap(FIX_TEXT_POKE0);
103 	raw_spin_unlock_irqrestore(&patch_lock, flags);
104 
105 	return ret;
106 }
107 
aarch64_insn_patch_text_nosync(void * addr,u32 insn)108 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
109 {
110 	u32 *tp = addr;
111 	int ret;
112 
113 	/* A64 instructions must be word aligned */
114 	if ((uintptr_t)tp & 0x3)
115 		return -EINVAL;
116 
117 	ret = aarch64_insn_write(tp, insn);
118 	if (ret == 0)
119 		caches_clean_inval_pou((uintptr_t)tp,
120 				     (uintptr_t)tp + AARCH64_INSN_SIZE);
121 
122 	return ret;
123 }
124 
125 struct aarch64_insn_patch {
126 	void		**text_addrs;
127 	u32		*new_insns;
128 	int		insn_cnt;
129 	atomic_t	cpu_count;
130 };
131 
aarch64_insn_patch_text_cb(void * arg)132 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
133 {
134 	int i, ret = 0;
135 	struct aarch64_insn_patch *pp = arg;
136 
137 	/* The last CPU becomes master */
138 	if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
139 		for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
140 			ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
141 							     pp->new_insns[i]);
142 		/* Notify other processors with an additional increment. */
143 		atomic_inc(&pp->cpu_count);
144 	} else {
145 		while (atomic_read(&pp->cpu_count) <= num_online_cpus())
146 			cpu_relax();
147 		isb();
148 	}
149 
150 	return ret;
151 }
152 
aarch64_insn_patch_text(void * addrs[],u32 insns[],int cnt)153 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
154 {
155 	struct aarch64_insn_patch patch = {
156 		.text_addrs = addrs,
157 		.new_insns = insns,
158 		.insn_cnt = cnt,
159 		.cpu_count = ATOMIC_INIT(0),
160 	};
161 
162 	if (cnt <= 0)
163 		return -EINVAL;
164 
165 	return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
166 				       cpu_online_mask);
167 }
168