xref: /openbmc/linux/arch/riscv/kernel/patch.c (revision b5265c81)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 SiFive
4  */
5 
6 #include <linux/spinlock.h>
7 #include <linux/mm.h>
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/stop_machine.h>
11 #include <asm/kprobes.h>
12 #include <asm/cacheflush.h>
13 #include <asm/fixmap.h>
14 #include <asm/patch.h>
15 
16 struct patch_insn {
17 	void *addr;
18 	u32 insn;
19 	atomic_t cpu_count;
20 };
21 
22 #ifdef CONFIG_MMU
23 static void *patch_map(void *addr, int fixmap)
24 {
25 	uintptr_t uintaddr = (uintptr_t) addr;
26 	struct page *page;
27 
28 	if (core_kernel_text(uintaddr))
29 		page = phys_to_page(__pa_symbol(addr));
30 	else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
31 		page = vmalloc_to_page(addr);
32 	else
33 		return addr;
34 
35 	BUG_ON(!page);
36 
37 	return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
38 					 (uintaddr & ~PAGE_MASK));
39 }
40 NOKPROBE_SYMBOL(patch_map);
41 
42 static void patch_unmap(int fixmap)
43 {
44 	clear_fixmap(fixmap);
45 }
46 NOKPROBE_SYMBOL(patch_unmap);
47 
48 static int patch_insn_write(void *addr, const void *insn, size_t len)
49 {
50 	void *waddr = addr;
51 	bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
52 	int ret;
53 
54 	/*
55 	 * Before reaching here, it was expected to lock the text_mutex
56 	 * already, so we don't need to give another lock here and could
57 	 * ensure that it was safe between each cores.
58 	 */
59 	lockdep_assert_held(&text_mutex);
60 
61 	if (across_pages)
62 		patch_map(addr + len, FIX_TEXT_POKE1);
63 
64 	waddr = patch_map(addr, FIX_TEXT_POKE0);
65 
66 	ret = probe_kernel_write(waddr, insn, len);
67 
68 	patch_unmap(FIX_TEXT_POKE0);
69 
70 	if (across_pages)
71 		patch_unmap(FIX_TEXT_POKE1);
72 
73 	return ret;
74 }
75 NOKPROBE_SYMBOL(patch_insn_write);
76 #else
77 static int patch_insn_write(void *addr, const void *insn, size_t len)
78 {
79 	return probe_kernel_write(addr, insn, len);
80 }
81 NOKPROBE_SYMBOL(patch_insn_write);
82 #endif /* CONFIG_MMU */
83 
84 int patch_text_nosync(void *addr, const void *insns, size_t len)
85 {
86 	u32 *tp = addr;
87 	int ret;
88 
89 	ret = patch_insn_write(tp, insns, len);
90 
91 	if (!ret)
92 		flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
93 
94 	return ret;
95 }
96 NOKPROBE_SYMBOL(patch_text_nosync);
97 
98 static int patch_text_cb(void *data)
99 {
100 	struct patch_insn *patch = data;
101 	int ret = 0;
102 
103 	if (atomic_inc_return(&patch->cpu_count) == 1) {
104 		ret =
105 		    patch_text_nosync(patch->addr, &patch->insn,
106 					    GET_INSN_LENGTH(patch->insn));
107 		atomic_inc(&patch->cpu_count);
108 	} else {
109 		while (atomic_read(&patch->cpu_count) <= num_online_cpus())
110 			cpu_relax();
111 		smp_mb();
112 	}
113 
114 	return ret;
115 }
116 NOKPROBE_SYMBOL(patch_text_cb);
117 
118 int patch_text(void *addr, u32 insn)
119 {
120 	struct patch_insn patch = {
121 		.addr = addr,
122 		.insn = insn,
123 		.cpu_count = ATOMIC_INIT(0),
124 	};
125 
126 	return stop_machine_cpuslocked(patch_text_cb,
127 				       &patch, cpu_online_mask);
128 }
129 NOKPROBE_SYMBOL(patch_text);
130