xref: /openbmc/linux/kernel/bpf/core.c (revision 62e7ca52)
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *	Jay Schulist <jschlst@samba.org>
12  *	Alexei Starovoitov <ast@plumgrid.com>
13  *	Daniel Borkmann <dborkman@redhat.com>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23 #include <linux/filter.h>
24 #include <linux/skbuff.h>
25 #include <asm/unaligned.h>
26 
27 /* Registers */
28 #define BPF_R0	regs[BPF_REG_0]
29 #define BPF_R1	regs[BPF_REG_1]
30 #define BPF_R2	regs[BPF_REG_2]
31 #define BPF_R3	regs[BPF_REG_3]
32 #define BPF_R4	regs[BPF_REG_4]
33 #define BPF_R5	regs[BPF_REG_5]
34 #define BPF_R6	regs[BPF_REG_6]
35 #define BPF_R7	regs[BPF_REG_7]
36 #define BPF_R8	regs[BPF_REG_8]
37 #define BPF_R9	regs[BPF_REG_9]
38 #define BPF_R10	regs[BPF_REG_10]
39 
40 /* Named registers */
41 #define DST	regs[insn->dst_reg]
42 #define SRC	regs[insn->src_reg]
43 #define FP	regs[BPF_REG_FP]
44 #define ARG1	regs[BPF_REG_ARG1]
45 #define CTX	regs[BPF_REG_CTX]
46 #define IMM	insn->imm
47 
48 /* No hurry in this branch
49  *
50  * Exported for the bpf jit load helper.
51  */
52 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
53 {
54 	u8 *ptr = NULL;
55 
56 	if (k >= SKF_NET_OFF)
57 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
58 	else if (k >= SKF_LL_OFF)
59 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
60 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
61 		return ptr;
62 
63 	return NULL;
64 }
65 
66 /* Base function for offset calculation. Needs to go into .text section,
67  * therefore keeping it non-static as well; will also be used by JITs
68  * anyway later on, so do not let the compiler omit it.
69  */
70 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
71 {
72 	return 0;
73 }
74 
75 /**
76  *	__bpf_prog_run - run eBPF program on a given context
77  *	@ctx: is the data we are operating on
78  *	@insn: is the array of eBPF instructions
79  *
80  * Decode and execute eBPF instructions.
81  */
82 static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
83 {
84 	u64 stack[MAX_BPF_STACK / sizeof(u64)];
85 	u64 regs[MAX_BPF_REG], tmp;
86 	static const void *jumptable[256] = {
87 		[0 ... 255] = &&default_label,
88 		/* Now overwrite non-defaults ... */
89 		/* 32 bit ALU operations */
90 		[BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
91 		[BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
92 		[BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
93 		[BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
94 		[BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
95 		[BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
96 		[BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
97 		[BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
98 		[BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
99 		[BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
100 		[BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
101 		[BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
102 		[BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
103 		[BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
104 		[BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
105 		[BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
106 		[BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
107 		[BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
108 		[BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
109 		[BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
110 		[BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
111 		[BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
112 		[BPF_ALU | BPF_NEG] = &&ALU_NEG,
113 		[BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
114 		[BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
115 		/* 64 bit ALU operations */
116 		[BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
117 		[BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
118 		[BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
119 		[BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
120 		[BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
121 		[BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
122 		[BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
123 		[BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
124 		[BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
125 		[BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
126 		[BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
127 		[BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
128 		[BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
129 		[BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
130 		[BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
131 		[BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
132 		[BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
133 		[BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
134 		[BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
135 		[BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
136 		[BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
137 		[BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
138 		[BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
139 		[BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
140 		[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
141 		/* Call instruction */
142 		[BPF_JMP | BPF_CALL] = &&JMP_CALL,
143 		/* Jumps */
144 		[BPF_JMP | BPF_JA] = &&JMP_JA,
145 		[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
146 		[BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
147 		[BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
148 		[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
149 		[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
150 		[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
151 		[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
152 		[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
153 		[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
154 		[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
155 		[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
156 		[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
157 		[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
158 		[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
159 		/* Program return */
160 		[BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
161 		/* Store instructions */
162 		[BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
163 		[BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
164 		[BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
165 		[BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
166 		[BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
167 		[BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
168 		[BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
169 		[BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
170 		[BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
171 		[BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
172 		/* Load instructions */
173 		[BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
174 		[BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
175 		[BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
176 		[BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
177 		[BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
178 		[BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
179 		[BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
180 		[BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
181 		[BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
182 		[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
183 	};
184 	void *ptr;
185 	int off;
186 
187 #define CONT	 ({ insn++; goto select_insn; })
188 #define CONT_JMP ({ insn++; goto select_insn; })
189 
190 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
191 	ARG1 = (u64) (unsigned long) ctx;
192 
193 	/* Registers used in classic BPF programs need to be reset first. */
194 	regs[BPF_REG_A] = 0;
195 	regs[BPF_REG_X] = 0;
196 
197 select_insn:
198 	goto *jumptable[insn->code];
199 
200 	/* ALU */
201 #define ALU(OPCODE, OP)			\
202 	ALU64_##OPCODE##_X:		\
203 		DST = DST OP SRC;	\
204 		CONT;			\
205 	ALU_##OPCODE##_X:		\
206 		DST = (u32) DST OP (u32) SRC;	\
207 		CONT;			\
208 	ALU64_##OPCODE##_K:		\
209 		DST = DST OP IMM;		\
210 		CONT;			\
211 	ALU_##OPCODE##_K:		\
212 		DST = (u32) DST OP (u32) IMM;	\
213 		CONT;
214 
215 	ALU(ADD,  +)
216 	ALU(SUB,  -)
217 	ALU(AND,  &)
218 	ALU(OR,   |)
219 	ALU(LSH, <<)
220 	ALU(RSH, >>)
221 	ALU(XOR,  ^)
222 	ALU(MUL,  *)
223 #undef ALU
224 	ALU_NEG:
225 		DST = (u32) -DST;
226 		CONT;
227 	ALU64_NEG:
228 		DST = -DST;
229 		CONT;
230 	ALU_MOV_X:
231 		DST = (u32) SRC;
232 		CONT;
233 	ALU_MOV_K:
234 		DST = (u32) IMM;
235 		CONT;
236 	ALU64_MOV_X:
237 		DST = SRC;
238 		CONT;
239 	ALU64_MOV_K:
240 		DST = IMM;
241 		CONT;
242 	ALU64_ARSH_X:
243 		(*(s64 *) &DST) >>= SRC;
244 		CONT;
245 	ALU64_ARSH_K:
246 		(*(s64 *) &DST) >>= IMM;
247 		CONT;
248 	ALU64_MOD_X:
249 		if (unlikely(SRC == 0))
250 			return 0;
251 		tmp = DST;
252 		DST = do_div(tmp, SRC);
253 		CONT;
254 	ALU_MOD_X:
255 		if (unlikely(SRC == 0))
256 			return 0;
257 		tmp = (u32) DST;
258 		DST = do_div(tmp, (u32) SRC);
259 		CONT;
260 	ALU64_MOD_K:
261 		tmp = DST;
262 		DST = do_div(tmp, IMM);
263 		CONT;
264 	ALU_MOD_K:
265 		tmp = (u32) DST;
266 		DST = do_div(tmp, (u32) IMM);
267 		CONT;
268 	ALU64_DIV_X:
269 		if (unlikely(SRC == 0))
270 			return 0;
271 		do_div(DST, SRC);
272 		CONT;
273 	ALU_DIV_X:
274 		if (unlikely(SRC == 0))
275 			return 0;
276 		tmp = (u32) DST;
277 		do_div(tmp, (u32) SRC);
278 		DST = (u32) tmp;
279 		CONT;
280 	ALU64_DIV_K:
281 		do_div(DST, IMM);
282 		CONT;
283 	ALU_DIV_K:
284 		tmp = (u32) DST;
285 		do_div(tmp, (u32) IMM);
286 		DST = (u32) tmp;
287 		CONT;
288 	ALU_END_TO_BE:
289 		switch (IMM) {
290 		case 16:
291 			DST = (__force u16) cpu_to_be16(DST);
292 			break;
293 		case 32:
294 			DST = (__force u32) cpu_to_be32(DST);
295 			break;
296 		case 64:
297 			DST = (__force u64) cpu_to_be64(DST);
298 			break;
299 		}
300 		CONT;
301 	ALU_END_TO_LE:
302 		switch (IMM) {
303 		case 16:
304 			DST = (__force u16) cpu_to_le16(DST);
305 			break;
306 		case 32:
307 			DST = (__force u32) cpu_to_le32(DST);
308 			break;
309 		case 64:
310 			DST = (__force u64) cpu_to_le64(DST);
311 			break;
312 		}
313 		CONT;
314 
315 	/* CALL */
316 	JMP_CALL:
317 		/* Function call scratches BPF_R1-BPF_R5 registers,
318 		 * preserves BPF_R6-BPF_R9, and stores return value
319 		 * into BPF_R0.
320 		 */
321 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
322 						       BPF_R4, BPF_R5);
323 		CONT;
324 
325 	/* JMP */
326 	JMP_JA:
327 		insn += insn->off;
328 		CONT;
329 	JMP_JEQ_X:
330 		if (DST == SRC) {
331 			insn += insn->off;
332 			CONT_JMP;
333 		}
334 		CONT;
335 	JMP_JEQ_K:
336 		if (DST == IMM) {
337 			insn += insn->off;
338 			CONT_JMP;
339 		}
340 		CONT;
341 	JMP_JNE_X:
342 		if (DST != SRC) {
343 			insn += insn->off;
344 			CONT_JMP;
345 		}
346 		CONT;
347 	JMP_JNE_K:
348 		if (DST != IMM) {
349 			insn += insn->off;
350 			CONT_JMP;
351 		}
352 		CONT;
353 	JMP_JGT_X:
354 		if (DST > SRC) {
355 			insn += insn->off;
356 			CONT_JMP;
357 		}
358 		CONT;
359 	JMP_JGT_K:
360 		if (DST > IMM) {
361 			insn += insn->off;
362 			CONT_JMP;
363 		}
364 		CONT;
365 	JMP_JGE_X:
366 		if (DST >= SRC) {
367 			insn += insn->off;
368 			CONT_JMP;
369 		}
370 		CONT;
371 	JMP_JGE_K:
372 		if (DST >= IMM) {
373 			insn += insn->off;
374 			CONT_JMP;
375 		}
376 		CONT;
377 	JMP_JSGT_X:
378 		if (((s64) DST) > ((s64) SRC)) {
379 			insn += insn->off;
380 			CONT_JMP;
381 		}
382 		CONT;
383 	JMP_JSGT_K:
384 		if (((s64) DST) > ((s64) IMM)) {
385 			insn += insn->off;
386 			CONT_JMP;
387 		}
388 		CONT;
389 	JMP_JSGE_X:
390 		if (((s64) DST) >= ((s64) SRC)) {
391 			insn += insn->off;
392 			CONT_JMP;
393 		}
394 		CONT;
395 	JMP_JSGE_K:
396 		if (((s64) DST) >= ((s64) IMM)) {
397 			insn += insn->off;
398 			CONT_JMP;
399 		}
400 		CONT;
401 	JMP_JSET_X:
402 		if (DST & SRC) {
403 			insn += insn->off;
404 			CONT_JMP;
405 		}
406 		CONT;
407 	JMP_JSET_K:
408 		if (DST & IMM) {
409 			insn += insn->off;
410 			CONT_JMP;
411 		}
412 		CONT;
413 	JMP_EXIT:
414 		return BPF_R0;
415 
416 	/* STX and ST and LDX*/
417 #define LDST(SIZEOP, SIZE)						\
418 	STX_MEM_##SIZEOP:						\
419 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
420 		CONT;							\
421 	ST_MEM_##SIZEOP:						\
422 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
423 		CONT;							\
424 	LDX_MEM_##SIZEOP:						\
425 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
426 		CONT;
427 
428 	LDST(B,   u8)
429 	LDST(H,  u16)
430 	LDST(W,  u32)
431 	LDST(DW, u64)
432 #undef LDST
433 	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
434 		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
435 			   (DST + insn->off));
436 		CONT;
437 	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
438 		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
439 			     (DST + insn->off));
440 		CONT;
441 	LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
442 		off = IMM;
443 load_word:
444 		/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
445 		 * only appearing in the programs where ctx ==
446 		 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
447 		 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
448 		 * internal BPF verifier will check that BPF_R6 ==
449 		 * ctx.
450 		 *
451 		 * BPF_ABS and BPF_IND are wrappers of function calls,
452 		 * so they scratch BPF_R1-BPF_R5 registers, preserve
453 		 * BPF_R6-BPF_R9, and store return value into BPF_R0.
454 		 *
455 		 * Implicit input:
456 		 *   ctx == skb == BPF_R6 == CTX
457 		 *
458 		 * Explicit input:
459 		 *   SRC == any register
460 		 *   IMM == 32-bit immediate
461 		 *
462 		 * Output:
463 		 *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
464 		 */
465 
466 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
467 		if (likely(ptr != NULL)) {
468 			BPF_R0 = get_unaligned_be32(ptr);
469 			CONT;
470 		}
471 
472 		return 0;
473 	LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
474 		off = IMM;
475 load_half:
476 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
477 		if (likely(ptr != NULL)) {
478 			BPF_R0 = get_unaligned_be16(ptr);
479 			CONT;
480 		}
481 
482 		return 0;
483 	LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
484 		off = IMM;
485 load_byte:
486 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
487 		if (likely(ptr != NULL)) {
488 			BPF_R0 = *(u8 *)ptr;
489 			CONT;
490 		}
491 
492 		return 0;
493 	LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
494 		off = IMM + SRC;
495 		goto load_word;
496 	LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
497 		off = IMM + SRC;
498 		goto load_half;
499 	LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
500 		off = IMM + SRC;
501 		goto load_byte;
502 
503 	default_label:
504 		/* If we ever reach this, we have a bug somewhere. */
505 		WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
506 		return 0;
507 }
508 
509 void __weak bpf_int_jit_compile(struct bpf_prog *prog)
510 {
511 }
512 
513 /**
514  *	bpf_prog_select_runtime - select execution runtime for BPF program
515  *	@fp: bpf_prog populated with internal BPF program
516  *
517  * try to JIT internal BPF program, if JIT is not available select interpreter
518  * BPF program will be executed via BPF_PROG_RUN() macro
519  */
520 void bpf_prog_select_runtime(struct bpf_prog *fp)
521 {
522 	fp->bpf_func = (void *) __bpf_prog_run;
523 
524 	/* Probe if internal BPF can be JITed */
525 	bpf_int_jit_compile(fp);
526 }
527 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
528 
529 /* free internal BPF program */
530 void bpf_prog_free(struct bpf_prog *fp)
531 {
532 	bpf_jit_free(fp);
533 }
534 EXPORT_SYMBOL_GPL(bpf_prog_free);
535