1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 
4 #define MAX_INSNS	512
5 #define MAX_MATCHES	16
6 
7 struct bpf_reg_match {
8 	unsigned int line;
9 	const char *match;
10 };
11 
12 struct bpf_align_test {
13 	const char *descr;
14 	struct bpf_insn	insns[MAX_INSNS];
15 	enum {
16 		UNDEF,
17 		ACCEPT,
18 		REJECT
19 	} result;
20 	enum bpf_prog_type prog_type;
21 	/* Matches must be in order of increasing line */
22 	struct bpf_reg_match matches[MAX_MATCHES];
23 };
24 
25 static struct bpf_align_test tests[] = {
26 	/* Four tests of known constants.  These aren't staggeringly
27 	 * interesting since we track exact values now.
28 	 */
29 	{
30 		.descr = "mov",
31 		.insns = {
32 			BPF_MOV64_IMM(BPF_REG_3, 2),
33 			BPF_MOV64_IMM(BPF_REG_3, 4),
34 			BPF_MOV64_IMM(BPF_REG_3, 8),
35 			BPF_MOV64_IMM(BPF_REG_3, 16),
36 			BPF_MOV64_IMM(BPF_REG_3, 32),
37 			BPF_MOV64_IMM(BPF_REG_0, 0),
38 			BPF_EXIT_INSN(),
39 		},
40 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
41 		.matches = {
42 			{0, "R1=ctx(id=0,off=0,imm=0)"},
43 			{0, "R10=fp0"},
44 			{0, "R3_w=inv2"},
45 			{1, "R3_w=inv4"},
46 			{2, "R3_w=inv8"},
47 			{3, "R3_w=inv16"},
48 			{4, "R3_w=inv32"},
49 		},
50 	},
51 	{
52 		.descr = "shift",
53 		.insns = {
54 			BPF_MOV64_IMM(BPF_REG_3, 1),
55 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
56 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
57 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
58 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
59 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
60 			BPF_MOV64_IMM(BPF_REG_4, 32),
61 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
62 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
63 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
64 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
65 			BPF_MOV64_IMM(BPF_REG_0, 0),
66 			BPF_EXIT_INSN(),
67 		},
68 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
69 		.matches = {
70 			{0, "R1=ctx(id=0,off=0,imm=0)"},
71 			{0, "R10=fp0"},
72 			{0, "R3_w=inv1"},
73 			{1, "R3_w=inv2"},
74 			{2, "R3_w=inv4"},
75 			{3, "R3_w=inv8"},
76 			{4, "R3_w=inv16"},
77 			{5, "R3_w=inv1"},
78 			{6, "R4_w=inv32"},
79 			{7, "R4_w=inv16"},
80 			{8, "R4_w=inv8"},
81 			{9, "R4_w=inv4"},
82 			{10, "R4_w=inv2"},
83 		},
84 	},
85 	{
86 		.descr = "addsub",
87 		.insns = {
88 			BPF_MOV64_IMM(BPF_REG_3, 4),
89 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
90 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
91 			BPF_MOV64_IMM(BPF_REG_4, 8),
92 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
93 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
94 			BPF_MOV64_IMM(BPF_REG_0, 0),
95 			BPF_EXIT_INSN(),
96 		},
97 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
98 		.matches = {
99 			{0, "R1=ctx(id=0,off=0,imm=0)"},
100 			{0, "R10=fp0"},
101 			{0, "R3_w=inv4"},
102 			{1, "R3_w=inv8"},
103 			{2, "R3_w=inv10"},
104 			{3, "R4_w=inv8"},
105 			{4, "R4_w=inv12"},
106 			{5, "R4_w=inv14"},
107 		},
108 	},
109 	{
110 		.descr = "mul",
111 		.insns = {
112 			BPF_MOV64_IMM(BPF_REG_3, 7),
113 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
114 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
115 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
116 			BPF_MOV64_IMM(BPF_REG_0, 0),
117 			BPF_EXIT_INSN(),
118 		},
119 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
120 		.matches = {
121 			{0, "R1=ctx(id=0,off=0,imm=0)"},
122 			{0, "R10=fp0"},
123 			{0, "R3_w=inv7"},
124 			{1, "R3_w=inv7"},
125 			{2, "R3_w=inv14"},
126 			{3, "R3_w=inv56"},
127 		},
128 	},
129 
130 	/* Tests using unknown values */
131 #define PREP_PKT_POINTERS \
132 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
133 		    offsetof(struct __sk_buff, data)), \
134 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
135 		    offsetof(struct __sk_buff, data_end))
136 
137 #define LOAD_UNKNOWN(DST_REG) \
138 	PREP_PKT_POINTERS, \
139 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
140 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
141 	BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
142 	BPF_EXIT_INSN(), \
143 	BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
144 
145 	{
146 		.descr = "unknown shift",
147 		.insns = {
148 			LOAD_UNKNOWN(BPF_REG_3),
149 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
150 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
151 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
152 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
153 			LOAD_UNKNOWN(BPF_REG_4),
154 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
155 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
156 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
157 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
158 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
159 			BPF_MOV64_IMM(BPF_REG_0, 0),
160 			BPF_EXIT_INSN(),
161 		},
162 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
163 		.matches = {
164 			{6, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
165 			{6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
166 			{7, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
167 			{8, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
168 			{9, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
169 			{10, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
170 			{12, "R3_w=pkt_end(id=0,off=0,imm=0)"},
171 			{17, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
172 			{18, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
173 			{19, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
174 			{20, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
175 			{21, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
176 			{22, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
177 		},
178 	},
179 	{
180 		.descr = "unknown mul",
181 		.insns = {
182 			LOAD_UNKNOWN(BPF_REG_3),
183 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
184 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
185 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
186 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
187 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
188 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
189 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
190 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
191 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
192 			BPF_MOV64_IMM(BPF_REG_0, 0),
193 			BPF_EXIT_INSN(),
194 		},
195 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
196 		.matches = {
197 			{6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
198 			{7, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
199 			{8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
200 			{9, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
201 			{10, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
202 			{11, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
203 			{12, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
204 			{13, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
205 			{14, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
206 			{15, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
207 		},
208 	},
209 	{
210 		.descr = "packet const offset",
211 		.insns = {
212 			PREP_PKT_POINTERS,
213 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
214 
215 			BPF_MOV64_IMM(BPF_REG_0, 0),
216 
217 			/* Skip over ethernet header.  */
218 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
219 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
220 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
221 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
222 			BPF_EXIT_INSN(),
223 
224 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
225 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
226 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
227 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
228 			BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
229 			BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
230 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
231 
232 			BPF_MOV64_IMM(BPF_REG_0, 0),
233 			BPF_EXIT_INSN(),
234 		},
235 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
236 		.matches = {
237 			{2, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
238 			{4, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
239 			{5, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
240 			{9, "R2=pkt(id=0,off=0,r=18,imm=0)"},
241 			{10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
242 			{10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
243 			{13, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
244 			{14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
245 		},
246 	},
247 	{
248 		.descr = "packet variable offset",
249 		.insns = {
250 			LOAD_UNKNOWN(BPF_REG_6),
251 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
252 
253 			/* First, add a constant to the R5 packet pointer,
254 			 * then a variable with a known alignment.
255 			 */
256 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
257 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
258 			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
259 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
260 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
261 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
262 			BPF_EXIT_INSN(),
263 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
264 
265 			/* Now, test in the other direction.  Adding first
266 			 * the variable offset to R5, then the constant.
267 			 */
268 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
269 			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
270 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
271 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
272 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
273 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
274 			BPF_EXIT_INSN(),
275 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
276 
277 			/* Test multiple accumulations of unknown values
278 			 * into a packet pointer.
279 			 */
280 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
281 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
282 			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
283 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
284 			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
285 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
286 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
287 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
288 			BPF_EXIT_INSN(),
289 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
290 
291 			BPF_MOV64_IMM(BPF_REG_0, 0),
292 			BPF_EXIT_INSN(),
293 		},
294 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
295 		.matches = {
296 			/* Calculated offset in R6 has unknown value, but known
297 			 * alignment of 4.
298 			 */
299 			{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
300 			{7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
301 			/* Offset is added to packet pointer R5, resulting in
302 			 * known fixed offset, and variable offset from R6.
303 			 */
304 			{11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
305 			/* At the time the word size load is performed from R5,
306 			 * it's total offset is NET_IP_ALIGN + reg->off (0) +
307 			 * reg->aux_off (14) which is 16.  Then the variable
308 			 * offset is considered using reg->aux_off_align which
309 			 * is 4 and meets the load's requirements.
310 			 */
311 			{15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
312 			{15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
313 			/* Variable offset is added to R5 packet pointer,
314 			 * resulting in auxiliary alignment of 4.
315 			 */
316 			{17, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
317 			/* Constant offset is added to R5, resulting in
318 			 * reg->off of 14.
319 			 */
320 			{18, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
321 			/* At the time the word size load is performed from R5,
322 			 * its total fixed offset is NET_IP_ALIGN + reg->off
323 			 * (14) which is 16.  Then the variable offset is 4-byte
324 			 * aligned, so the total offset is 4-byte aligned and
325 			 * meets the load's requirements.
326 			 */
327 			{23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
328 			{23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
329 			/* Constant offset is added to R5 packet pointer,
330 			 * resulting in reg->off value of 14.
331 			 */
332 			{25, "R5_w=pkt(id=0,off=14,r=8"},
333 			/* Variable offset is added to R5, resulting in a
334 			 * variable offset of (4n).
335 			 */
336 			{26, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
337 			/* Constant is added to R5 again, setting reg->off to 18. */
338 			{27, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
339 			/* And once more we add a variable; resulting var_off
340 			 * is still (4n), fixed offset is not changed.
341 			 * Also, we create a new reg->id.
342 			 */
343 			{28, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
344 			/* At the time the word size load is performed from R5,
345 			 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
346 			 * which is 20.  Then the variable offset is (4n), so
347 			 * the total offset is 4-byte aligned and meets the
348 			 * load's requirements.
349 			 */
350 			{33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
351 			{33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
352 		},
353 	},
354 	{
355 		.descr = "packet variable offset 2",
356 		.insns = {
357 			/* Create an unknown offset, (4n+2)-aligned */
358 			LOAD_UNKNOWN(BPF_REG_6),
359 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
360 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
361 			/* Add it to the packet pointer */
362 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
363 			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
364 			/* Check bounds and perform a read */
365 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
366 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
367 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
368 			BPF_EXIT_INSN(),
369 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
370 			/* Make a (4n) offset from the value we just read */
371 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff),
372 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
373 			/* Add it to the packet pointer */
374 			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
375 			/* Check bounds and perform a read */
376 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
377 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
378 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
379 			BPF_EXIT_INSN(),
380 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
381 			BPF_MOV64_IMM(BPF_REG_0, 0),
382 			BPF_EXIT_INSN(),
383 		},
384 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
385 		.matches = {
386 			/* Calculated offset in R6 has unknown value, but known
387 			 * alignment of 4.
388 			 */
389 			{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
390 			{7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
391 			/* Adding 14 makes R6 be (4n+2) */
392 			{8, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
393 			/* Packet pointer has (4n+2) offset */
394 			{11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
395 			{12, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
396 			/* At the time the word size load is performed from R5,
397 			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
398 			 * which is 2.  Then the variable offset is (4n+2), so
399 			 * the total offset is 4-byte aligned and meets the
400 			 * load's requirements.
401 			 */
402 			{15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
403 			/* Newly read value in R6 was shifted left by 2, so has
404 			 * known alignment of 4.
405 			 */
406 			{17, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
407 			/* Added (4n) to packet pointer's (4n+2) var_off, giving
408 			 * another (4n+2).
409 			 */
410 			{19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
411 			{20, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
412 			/* At the time the word size load is performed from R5,
413 			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
414 			 * which is 2.  Then the variable offset is (4n+2), so
415 			 * the total offset is 4-byte aligned and meets the
416 			 * load's requirements.
417 			 */
418 			{23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
419 		},
420 	},
421 	{
422 		.descr = "dubious pointer arithmetic",
423 		.insns = {
424 			PREP_PKT_POINTERS,
425 			BPF_MOV64_IMM(BPF_REG_0, 0),
426 			/* (ptr - ptr) << 2 */
427 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
428 			BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
429 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
430 			/* We have a (4n) value.  Let's make a packet offset
431 			 * out of it.  First add 14, to make it a (4n+2)
432 			 */
433 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
434 			/* Then make sure it's nonnegative */
435 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1),
436 			BPF_EXIT_INSN(),
437 			/* Add it to packet pointer */
438 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
439 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
440 			/* Check bounds and perform a read */
441 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_6),
442 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
443 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
444 			BPF_EXIT_INSN(),
445 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0),
446 			BPF_EXIT_INSN(),
447 		},
448 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
449 		.result = REJECT,
450 		.matches = {
451 			{3, "R5_w=pkt_end(id=0,off=0,imm=0)"},
452 			/* (ptr - ptr) << 2 == unknown, (4n) */
453 			{5, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
454 			/* (4n) + 14 == (4n+2).  We blow our bounds, because
455 			 * the add could overflow.
456 			 */
457 			{6, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
458 			/* Checked s>=0 */
459 			{9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
460 			/* packet pointer + nonnegative (4n+2) */
461 			{11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
462 			{12, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
463 			/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
464 			 * We checked the bounds, but it might have been able
465 			 * to overflow if the packet pointer started in the
466 			 * upper half of the address space.
467 			 * So we did not get a 'range' on R6, and the access
468 			 * attempt will fail.
469 			 */
470 			{15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
471 		}
472 	},
473 	{
474 		.descr = "variable subtraction",
475 		.insns = {
476 			/* Create an unknown offset, (4n+2)-aligned */
477 			LOAD_UNKNOWN(BPF_REG_6),
478 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
479 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
480 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
481 			/* Create another unknown, (4n)-aligned, and subtract
482 			 * it from the first one
483 			 */
484 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
485 			BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7),
486 			/* Bounds-check the result */
487 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1),
488 			BPF_EXIT_INSN(),
489 			/* Add it to the packet pointer */
490 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
491 			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
492 			/* Check bounds and perform a read */
493 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
494 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
495 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
496 			BPF_EXIT_INSN(),
497 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
498 			BPF_EXIT_INSN(),
499 		},
500 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
501 		.matches = {
502 			/* Calculated offset in R6 has unknown value, but known
503 			 * alignment of 4.
504 			 */
505 			{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
506 			{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
507 			/* Adding 14 makes R6 be (4n+2) */
508 			{9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
509 			/* New unknown value in R7 is (4n) */
510 			{10, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
511 			/* Subtracting it from R6 blows our unsigned bounds */
512 			{11, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
513 			/* Checked s>= 0 */
514 			{14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
515 			/* At the time the word size load is performed from R5,
516 			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
517 			 * which is 2.  Then the variable offset is (4n+2), so
518 			 * the total offset is 4-byte aligned and meets the
519 			 * load's requirements.
520 			 */
521 			{20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
522 
523 		},
524 	},
525 	{
526 		.descr = "pointer variable subtraction",
527 		.insns = {
528 			/* Create an unknown offset, (4n+2)-aligned and bounded
529 			 * to [14,74]
530 			 */
531 			LOAD_UNKNOWN(BPF_REG_6),
532 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
533 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf),
534 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
535 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
536 			/* Subtract it from the packet pointer */
537 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
538 			BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6),
539 			/* Create another unknown, (4n)-aligned and >= 74.
540 			 * That in fact means >= 76, since 74 % 4 == 2
541 			 */
542 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
543 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76),
544 			/* Add it to the packet pointer */
545 			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7),
546 			/* Check bounds and perform a read */
547 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
548 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
549 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
550 			BPF_EXIT_INSN(),
551 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
552 			BPF_EXIT_INSN(),
553 		},
554 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
555 		.matches = {
556 			/* Calculated offset in R6 has unknown value, but known
557 			 * alignment of 4.
558 			 */
559 			{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
560 			{9, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
561 			/* Adding 14 makes R6 be (4n+2) */
562 			{10, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
563 			/* Subtracting from packet pointer overflows ubounds */
564 			{13, "R5_w=pkt(id=2,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
565 			/* New unknown value in R7 is (4n), >= 76 */
566 			{14, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
567 			/* Adding it to packet pointer gives nice bounds again */
568 			{16, "R5_w=pkt(id=3,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
569 			/* At the time the word size load is performed from R5,
570 			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
571 			 * which is 2.  Then the variable offset is (4n+2), so
572 			 * the total offset is 4-byte aligned and meets the
573 			 * load's requirements.
574 			 */
575 			{20, "R5=pkt(id=3,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
576 		},
577 	},
578 };
579 
580 static int probe_filter_length(const struct bpf_insn *fp)
581 {
582 	int len;
583 
584 	for (len = MAX_INSNS - 1; len > 0; --len)
585 		if (fp[len].code != 0 || fp[len].imm != 0)
586 			break;
587 	return len + 1;
588 }
589 
590 static char bpf_vlog[32768];
591 
592 static int do_test_single(struct bpf_align_test *test)
593 {
594 	struct bpf_insn *prog = test->insns;
595 	int prog_type = test->prog_type;
596 	char bpf_vlog_copy[32768];
597 	LIBBPF_OPTS(bpf_prog_load_opts, opts,
598 		.prog_flags = BPF_F_STRICT_ALIGNMENT,
599 		.log_buf = bpf_vlog,
600 		.log_size = sizeof(bpf_vlog),
601 		.log_level = 2,
602 	);
603 	const char *line_ptr;
604 	int cur_line = -1;
605 	int prog_len, i;
606 	int fd_prog;
607 	int ret;
608 
609 	prog_len = probe_filter_length(prog);
610 	fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
611 				prog, prog_len, &opts);
612 	if (fd_prog < 0 && test->result != REJECT) {
613 		printf("Failed to load program.\n");
614 		printf("%s", bpf_vlog);
615 		ret = 1;
616 	} else if (fd_prog >= 0 && test->result == REJECT) {
617 		printf("Unexpected success to load!\n");
618 		printf("%s", bpf_vlog);
619 		ret = 1;
620 		close(fd_prog);
621 	} else {
622 		ret = 0;
623 		/* We make a local copy so that we can strtok() it */
624 		strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy));
625 		line_ptr = strtok(bpf_vlog_copy, "\n");
626 		for (i = 0; i < MAX_MATCHES; i++) {
627 			struct bpf_reg_match m = test->matches[i];
628 			int tmp;
629 
630 			if (!m.match)
631 				break;
632 			while (line_ptr) {
633 				cur_line = -1;
634 				sscanf(line_ptr, "%u: ", &cur_line);
635 				if (cur_line == -1)
636 					sscanf(line_ptr, "from %u to %u: ", &tmp, &cur_line);
637 				if (cur_line == m.line)
638 					break;
639 				line_ptr = strtok(NULL, "\n");
640 			}
641 			if (!line_ptr) {
642 				printf("Failed to find line %u for match: %s\n",
643 				       m.line, m.match);
644 				ret = 1;
645 				printf("%s", bpf_vlog);
646 				break;
647 			}
648 			/* Check the next line as well in case the previous line
649 			 * did not have a corresponding bpf insn. Example:
650 			 * func#0 @0
651 			 * 0: R1=ctx(id=0,off=0,imm=0) R10=fp0
652 			 * 0: (b7) r3 = 2                 ; R3_w=inv2
653 			 */
654 			if (!strstr(line_ptr, m.match)) {
655 				cur_line = -1;
656 				line_ptr = strtok(NULL, "\n");
657 				sscanf(line_ptr, "%u: ", &cur_line);
658 			}
659 			if (cur_line != m.line || !line_ptr ||
660 			    !strstr(line_ptr, m.match)) {
661 				printf("Failed to find match %u: %s\n",
662 				       m.line, m.match);
663 				ret = 1;
664 				printf("%s", bpf_vlog);
665 				break;
666 			}
667 		}
668 		if (fd_prog >= 0)
669 			close(fd_prog);
670 	}
671 	return ret;
672 }
673 
674 void test_align(void)
675 {
676 	unsigned int i;
677 
678 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
679 		struct bpf_align_test *test = &tests[i];
680 
681 		if (!test__start_subtest(test->descr))
682 			continue;
683 
684 		CHECK_FAIL(do_test_single(test));
685 	}
686 }
687