xref: /openbmc/linux/kernel/bpf/disasm.c (revision d9f6e12f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 
6 #include <linux/bpf.h>
7 
8 #include "disasm.h"
9 
10 #define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x)
11 static const char * const func_id_str[] = {
12 	__BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN)
13 };
14 #undef __BPF_FUNC_STR_FN
15 
16 static const char *__func_get_name(const struct bpf_insn_cbs *cbs,
17 				   const struct bpf_insn *insn,
18 				   char *buff, size_t len)
19 {
20 	BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
21 
22 	if (insn->src_reg != BPF_PSEUDO_CALL &&
23 	    insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
24 	    func_id_str[insn->imm])
25 		return func_id_str[insn->imm];
26 
27 	if (cbs && cbs->cb_call)
28 		return cbs->cb_call(cbs->private_data, insn);
29 
30 	if (insn->src_reg == BPF_PSEUDO_CALL)
31 		snprintf(buff, len, "%+d", insn->imm);
32 
33 	return buff;
34 }
35 
36 static const char *__func_imm_name(const struct bpf_insn_cbs *cbs,
37 				   const struct bpf_insn *insn,
38 				   u64 full_imm, char *buff, size_t len)
39 {
40 	if (cbs && cbs->cb_imm)
41 		return cbs->cb_imm(cbs->private_data, insn, full_imm);
42 
43 	snprintf(buff, len, "0x%llx", (unsigned long long)full_imm);
44 	return buff;
45 }
46 
47 const char *func_id_name(int id)
48 {
49 	if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
50 		return func_id_str[id];
51 	else
52 		return "unknown";
53 }
54 
55 const char *const bpf_class_string[8] = {
56 	[BPF_LD]    = "ld",
57 	[BPF_LDX]   = "ldx",
58 	[BPF_ST]    = "st",
59 	[BPF_STX]   = "stx",
60 	[BPF_ALU]   = "alu",
61 	[BPF_JMP]   = "jmp",
62 	[BPF_JMP32] = "jmp32",
63 	[BPF_ALU64] = "alu64",
64 };
65 
66 const char *const bpf_alu_string[16] = {
67 	[BPF_ADD >> 4]  = "+=",
68 	[BPF_SUB >> 4]  = "-=",
69 	[BPF_MUL >> 4]  = "*=",
70 	[BPF_DIV >> 4]  = "/=",
71 	[BPF_OR  >> 4]  = "|=",
72 	[BPF_AND >> 4]  = "&=",
73 	[BPF_LSH >> 4]  = "<<=",
74 	[BPF_RSH >> 4]  = ">>=",
75 	[BPF_NEG >> 4]  = "neg",
76 	[BPF_MOD >> 4]  = "%=",
77 	[BPF_XOR >> 4]  = "^=",
78 	[BPF_MOV >> 4]  = "=",
79 	[BPF_ARSH >> 4] = "s>>=",
80 	[BPF_END >> 4]  = "endian",
81 };
82 
83 static const char *const bpf_atomic_alu_string[16] = {
84 	[BPF_ADD >> 4]  = "add",
85 	[BPF_AND >> 4]  = "and",
86 	[BPF_OR >> 4]  = "or",
87 	[BPF_XOR >> 4]  = "or",
88 };
89 
90 static const char *const bpf_ldst_string[] = {
91 	[BPF_W >> 3]  = "u32",
92 	[BPF_H >> 3]  = "u16",
93 	[BPF_B >> 3]  = "u8",
94 	[BPF_DW >> 3] = "u64",
95 };
96 
97 static const char *const bpf_jmp_string[16] = {
98 	[BPF_JA >> 4]   = "jmp",
99 	[BPF_JEQ >> 4]  = "==",
100 	[BPF_JGT >> 4]  = ">",
101 	[BPF_JLT >> 4]  = "<",
102 	[BPF_JGE >> 4]  = ">=",
103 	[BPF_JLE >> 4]  = "<=",
104 	[BPF_JSET >> 4] = "&",
105 	[BPF_JNE >> 4]  = "!=",
106 	[BPF_JSGT >> 4] = "s>",
107 	[BPF_JSLT >> 4] = "s<",
108 	[BPF_JSGE >> 4] = "s>=",
109 	[BPF_JSLE >> 4] = "s<=",
110 	[BPF_CALL >> 4] = "call",
111 	[BPF_EXIT >> 4] = "exit",
112 };
113 
114 static void print_bpf_end_insn(bpf_insn_print_t verbose,
115 			       void *private_data,
116 			       const struct bpf_insn *insn)
117 {
118 	verbose(private_data, "(%02x) r%d = %s%d r%d\n",
119 		insn->code, insn->dst_reg,
120 		BPF_SRC(insn->code) == BPF_TO_BE ? "be" : "le",
121 		insn->imm, insn->dst_reg);
122 }
123 
124 void print_bpf_insn(const struct bpf_insn_cbs *cbs,
125 		    const struct bpf_insn *insn,
126 		    bool allow_ptr_leaks)
127 {
128 	const bpf_insn_print_t verbose = cbs->cb_print;
129 	u8 class = BPF_CLASS(insn->code);
130 
131 	if (class == BPF_ALU || class == BPF_ALU64) {
132 		if (BPF_OP(insn->code) == BPF_END) {
133 			if (class == BPF_ALU64)
134 				verbose(cbs->private_data, "BUG_alu64_%02x\n", insn->code);
135 			else
136 				print_bpf_end_insn(verbose, cbs->private_data, insn);
137 		} else if (BPF_OP(insn->code) == BPF_NEG) {
138 			verbose(cbs->private_data, "(%02x) %c%d = -%c%d\n",
139 				insn->code, class == BPF_ALU ? 'w' : 'r',
140 				insn->dst_reg, class == BPF_ALU ? 'w' : 'r',
141 				insn->dst_reg);
142 		} else if (BPF_SRC(insn->code) == BPF_X) {
143 			verbose(cbs->private_data, "(%02x) %c%d %s %c%d\n",
144 				insn->code, class == BPF_ALU ? 'w' : 'r',
145 				insn->dst_reg,
146 				bpf_alu_string[BPF_OP(insn->code) >> 4],
147 				class == BPF_ALU ? 'w' : 'r',
148 				insn->src_reg);
149 		} else {
150 			verbose(cbs->private_data, "(%02x) %c%d %s %d\n",
151 				insn->code, class == BPF_ALU ? 'w' : 'r',
152 				insn->dst_reg,
153 				bpf_alu_string[BPF_OP(insn->code) >> 4],
154 				insn->imm);
155 		}
156 	} else if (class == BPF_STX) {
157 		if (BPF_MODE(insn->code) == BPF_MEM)
158 			verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = r%d\n",
159 				insn->code,
160 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
161 				insn->dst_reg,
162 				insn->off, insn->src_reg);
163 		else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
164 			 (insn->imm == BPF_ADD || insn->imm == BPF_AND ||
165 			  insn->imm == BPF_OR || insn->imm == BPF_XOR)) {
166 			verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) %s r%d\n",
167 				insn->code,
168 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
169 				insn->dst_reg, insn->off,
170 				bpf_alu_string[BPF_OP(insn->imm) >> 4],
171 				insn->src_reg);
172 		} else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
173 			   (insn->imm == (BPF_ADD | BPF_FETCH) ||
174 			    insn->imm == (BPF_AND | BPF_FETCH) ||
175 			    insn->imm == (BPF_OR | BPF_FETCH) ||
176 			    insn->imm == (BPF_XOR | BPF_FETCH))) {
177 			verbose(cbs->private_data, "(%02x) r%d = atomic%s_fetch_%s((%s *)(r%d %+d), r%d)\n",
178 				insn->code, insn->src_reg,
179 				BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
180 				bpf_atomic_alu_string[BPF_OP(insn->imm) >> 4],
181 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
182 				insn->dst_reg, insn->off, insn->src_reg);
183 		} else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
184 			   insn->imm == BPF_CMPXCHG) {
185 			verbose(cbs->private_data, "(%02x) r0 = atomic%s_cmpxchg((%s *)(r%d %+d), r0, r%d)\n",
186 				insn->code,
187 				BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
188 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
189 				insn->dst_reg, insn->off,
190 				insn->src_reg);
191 		} else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
192 			   insn->imm == BPF_XCHG) {
193 			verbose(cbs->private_data, "(%02x) r%d = atomic%s_xchg((%s *)(r%d %+d), r%d)\n",
194 				insn->code, insn->src_reg,
195 				BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
196 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
197 				insn->dst_reg, insn->off, insn->src_reg);
198 		} else {
199 			verbose(cbs->private_data, "BUG_%02x\n", insn->code);
200 		}
201 	} else if (class == BPF_ST) {
202 		if (BPF_MODE(insn->code) != BPF_MEM) {
203 			verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
204 			return;
205 		}
206 		verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
207 			insn->code,
208 			bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
209 			insn->dst_reg,
210 			insn->off, insn->imm);
211 	} else if (class == BPF_LDX) {
212 		if (BPF_MODE(insn->code) != BPF_MEM) {
213 			verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
214 			return;
215 		}
216 		verbose(cbs->private_data, "(%02x) r%d = *(%s *)(r%d %+d)\n",
217 			insn->code, insn->dst_reg,
218 			bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
219 			insn->src_reg, insn->off);
220 	} else if (class == BPF_LD) {
221 		if (BPF_MODE(insn->code) == BPF_ABS) {
222 			verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[%d]\n",
223 				insn->code,
224 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
225 				insn->imm);
226 		} else if (BPF_MODE(insn->code) == BPF_IND) {
227 			verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[r%d + %d]\n",
228 				insn->code,
229 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
230 				insn->src_reg, insn->imm);
231 		} else if (BPF_MODE(insn->code) == BPF_IMM &&
232 			   BPF_SIZE(insn->code) == BPF_DW) {
233 			/* At this point, we already made sure that the second
234 			 * part of the ldimm64 insn is accessible.
235 			 */
236 			u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
237 			bool is_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD ||
238 				      insn->src_reg == BPF_PSEUDO_MAP_VALUE;
239 			char tmp[64];
240 
241 			if (is_ptr && !allow_ptr_leaks)
242 				imm = 0;
243 
244 			verbose(cbs->private_data, "(%02x) r%d = %s\n",
245 				insn->code, insn->dst_reg,
246 				__func_imm_name(cbs, insn, imm,
247 						tmp, sizeof(tmp)));
248 		} else {
249 			verbose(cbs->private_data, "BUG_ld_%02x\n", insn->code);
250 			return;
251 		}
252 	} else if (class == BPF_JMP32 || class == BPF_JMP) {
253 		u8 opcode = BPF_OP(insn->code);
254 
255 		if (opcode == BPF_CALL) {
256 			char tmp[64];
257 
258 			if (insn->src_reg == BPF_PSEUDO_CALL) {
259 				verbose(cbs->private_data, "(%02x) call pc%s\n",
260 					insn->code,
261 					__func_get_name(cbs, insn,
262 							tmp, sizeof(tmp)));
263 			} else {
264 				strcpy(tmp, "unknown");
265 				verbose(cbs->private_data, "(%02x) call %s#%d\n", insn->code,
266 					__func_get_name(cbs, insn,
267 							tmp, sizeof(tmp)),
268 					insn->imm);
269 			}
270 		} else if (insn->code == (BPF_JMP | BPF_JA)) {
271 			verbose(cbs->private_data, "(%02x) goto pc%+d\n",
272 				insn->code, insn->off);
273 		} else if (insn->code == (BPF_JMP | BPF_EXIT)) {
274 			verbose(cbs->private_data, "(%02x) exit\n", insn->code);
275 		} else if (BPF_SRC(insn->code) == BPF_X) {
276 			verbose(cbs->private_data,
277 				"(%02x) if %c%d %s %c%d goto pc%+d\n",
278 				insn->code, class == BPF_JMP32 ? 'w' : 'r',
279 				insn->dst_reg,
280 				bpf_jmp_string[BPF_OP(insn->code) >> 4],
281 				class == BPF_JMP32 ? 'w' : 'r',
282 				insn->src_reg, insn->off);
283 		} else {
284 			verbose(cbs->private_data,
285 				"(%02x) if %c%d %s 0x%x goto pc%+d\n",
286 				insn->code, class == BPF_JMP32 ? 'w' : 'r',
287 				insn->dst_reg,
288 				bpf_jmp_string[BPF_OP(insn->code) >> 4],
289 				insn->imm, insn->off);
290 		}
291 	} else {
292 		verbose(cbs->private_data, "(%02x) %s\n",
293 			insn->code, bpf_class_string[class]);
294 	}
295 }
296