xref: /openbmc/linux/arch/powerpc/net/bpf_jit_comp.c (revision c819e2cf)
1 /* bpf_jit_comp.c: BPF JIT compiler for PPC64
2  *
3  * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4  *
5  * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12 #include <linux/moduleloader.h>
13 #include <asm/cacheflush.h>
14 #include <linux/netdevice.h>
15 #include <linux/filter.h>
16 #include <linux/if_vlan.h>
17 
18 #include "bpf_jit.h"
19 
20 int bpf_jit_enable __read_mostly;
21 
22 static inline void bpf_flush_icache(void *start, void *end)
23 {
24 	smp_wmb();
25 	flush_icache_range((unsigned long)start, (unsigned long)end);
26 }
27 
28 static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
29 				   struct codegen_context *ctx)
30 {
31 	int i;
32 	const struct sock_filter *filter = fp->insns;
33 
34 	if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
35 		/* Make stackframe */
36 		if (ctx->seen & SEEN_DATAREF) {
37 			/* If we call any helpers (for loads), save LR */
38 			EMIT(PPC_INST_MFLR | __PPC_RT(R0));
39 			PPC_STD(0, 1, 16);
40 
41 			/* Back up non-volatile regs. */
42 			PPC_STD(r_D, 1, -(8*(32-r_D)));
43 			PPC_STD(r_HL, 1, -(8*(32-r_HL)));
44 		}
45 		if (ctx->seen & SEEN_MEM) {
46 			/*
47 			 * Conditionally save regs r15-r31 as some will be used
48 			 * for M[] data.
49 			 */
50 			for (i = r_M; i < (r_M+16); i++) {
51 				if (ctx->seen & (1 << (i-r_M)))
52 					PPC_STD(i, 1, -(8*(32-i)));
53 			}
54 		}
55 		EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
56 		     (-BPF_PPC_STACKFRAME & 0xfffc));
57 	}
58 
59 	if (ctx->seen & SEEN_DATAREF) {
60 		/*
61 		 * If this filter needs to access skb data,
62 		 * prepare r_D and r_HL:
63 		 *  r_HL = skb->len - skb->data_len
64 		 *  r_D	 = skb->data
65 		 */
66 		PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
67 							 data_len));
68 		PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
69 		PPC_SUB(r_HL, r_HL, r_scratch1);
70 		PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
71 	}
72 
73 	if (ctx->seen & SEEN_XREG) {
74 		/*
75 		 * TODO: Could also detect whether first instr. sets X and
76 		 * avoid this (as below, with A).
77 		 */
78 		PPC_LI(r_X, 0);
79 	}
80 
81 	switch (filter[0].code) {
82 	case BPF_RET | BPF_K:
83 	case BPF_LD | BPF_W | BPF_LEN:
84 	case BPF_LD | BPF_W | BPF_ABS:
85 	case BPF_LD | BPF_H | BPF_ABS:
86 	case BPF_LD | BPF_B | BPF_ABS:
87 		/* first instruction sets A register (or is RET 'constant') */
88 		break;
89 	default:
90 		/* make sure we dont leak kernel information to user */
91 		PPC_LI(r_A, 0);
92 	}
93 }
94 
95 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
96 {
97 	int i;
98 
99 	if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
100 		PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
101 		if (ctx->seen & SEEN_DATAREF) {
102 			PPC_LD(0, 1, 16);
103 			PPC_MTLR(0);
104 			PPC_LD(r_D, 1, -(8*(32-r_D)));
105 			PPC_LD(r_HL, 1, -(8*(32-r_HL)));
106 		}
107 		if (ctx->seen & SEEN_MEM) {
108 			/* Restore any saved non-vol registers */
109 			for (i = r_M; i < (r_M+16); i++) {
110 				if (ctx->seen & (1 << (i-r_M)))
111 					PPC_LD(i, 1, -(8*(32-i)));
112 			}
113 		}
114 	}
115 	/* The RETs have left a return value in R3. */
116 
117 	PPC_BLR();
118 }
119 
120 #define CHOOSE_LOAD_FUNC(K, func) \
121 	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
122 
123 /* Assemble the body code between the prologue & epilogue. */
124 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
125 			      struct codegen_context *ctx,
126 			      unsigned int *addrs)
127 {
128 	const struct sock_filter *filter = fp->insns;
129 	int flen = fp->len;
130 	u8 *func;
131 	unsigned int true_cond;
132 	int i;
133 
134 	/* Start of epilogue code */
135 	unsigned int exit_addr = addrs[flen];
136 
137 	for (i = 0; i < flen; i++) {
138 		unsigned int K = filter[i].k;
139 		u16 code = bpf_anc_helper(&filter[i]);
140 
141 		/*
142 		 * addrs[] maps a BPF bytecode address into a real offset from
143 		 * the start of the body code.
144 		 */
145 		addrs[i] = ctx->idx * 4;
146 
147 		switch (code) {
148 			/*** ALU ops ***/
149 		case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
150 			ctx->seen |= SEEN_XREG;
151 			PPC_ADD(r_A, r_A, r_X);
152 			break;
153 		case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
154 			if (!K)
155 				break;
156 			PPC_ADDI(r_A, r_A, IMM_L(K));
157 			if (K >= 32768)
158 				PPC_ADDIS(r_A, r_A, IMM_HA(K));
159 			break;
160 		case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
161 			ctx->seen |= SEEN_XREG;
162 			PPC_SUB(r_A, r_A, r_X);
163 			break;
164 		case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
165 			if (!K)
166 				break;
167 			PPC_ADDI(r_A, r_A, IMM_L(-K));
168 			if (K >= 32768)
169 				PPC_ADDIS(r_A, r_A, IMM_HA(-K));
170 			break;
171 		case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
172 			ctx->seen |= SEEN_XREG;
173 			PPC_MUL(r_A, r_A, r_X);
174 			break;
175 		case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
176 			if (K < 32768)
177 				PPC_MULI(r_A, r_A, K);
178 			else {
179 				PPC_LI32(r_scratch1, K);
180 				PPC_MUL(r_A, r_A, r_scratch1);
181 			}
182 			break;
183 		case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
184 		case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
185 			ctx->seen |= SEEN_XREG;
186 			PPC_CMPWI(r_X, 0);
187 			if (ctx->pc_ret0 != -1) {
188 				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
189 			} else {
190 				PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
191 				PPC_LI(r_ret, 0);
192 				PPC_JMP(exit_addr);
193 			}
194 			if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
195 				PPC_DIVWU(r_scratch1, r_A, r_X);
196 				PPC_MUL(r_scratch1, r_X, r_scratch1);
197 				PPC_SUB(r_A, r_A, r_scratch1);
198 			} else {
199 				PPC_DIVWU(r_A, r_A, r_X);
200 			}
201 			break;
202 		case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
203 			PPC_LI32(r_scratch2, K);
204 			PPC_DIVWU(r_scratch1, r_A, r_scratch2);
205 			PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
206 			PPC_SUB(r_A, r_A, r_scratch1);
207 			break;
208 		case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
209 			if (K == 1)
210 				break;
211 			PPC_LI32(r_scratch1, K);
212 			PPC_DIVWU(r_A, r_A, r_scratch1);
213 			break;
214 		case BPF_ALU | BPF_AND | BPF_X:
215 			ctx->seen |= SEEN_XREG;
216 			PPC_AND(r_A, r_A, r_X);
217 			break;
218 		case BPF_ALU | BPF_AND | BPF_K:
219 			if (!IMM_H(K))
220 				PPC_ANDI(r_A, r_A, K);
221 			else {
222 				PPC_LI32(r_scratch1, K);
223 				PPC_AND(r_A, r_A, r_scratch1);
224 			}
225 			break;
226 		case BPF_ALU | BPF_OR | BPF_X:
227 			ctx->seen |= SEEN_XREG;
228 			PPC_OR(r_A, r_A, r_X);
229 			break;
230 		case BPF_ALU | BPF_OR | BPF_K:
231 			if (IMM_L(K))
232 				PPC_ORI(r_A, r_A, IMM_L(K));
233 			if (K >= 65536)
234 				PPC_ORIS(r_A, r_A, IMM_H(K));
235 			break;
236 		case BPF_ANC | SKF_AD_ALU_XOR_X:
237 		case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
238 			ctx->seen |= SEEN_XREG;
239 			PPC_XOR(r_A, r_A, r_X);
240 			break;
241 		case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
242 			if (IMM_L(K))
243 				PPC_XORI(r_A, r_A, IMM_L(K));
244 			if (K >= 65536)
245 				PPC_XORIS(r_A, r_A, IMM_H(K));
246 			break;
247 		case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
248 			ctx->seen |= SEEN_XREG;
249 			PPC_SLW(r_A, r_A, r_X);
250 			break;
251 		case BPF_ALU | BPF_LSH | BPF_K:
252 			if (K == 0)
253 				break;
254 			else
255 				PPC_SLWI(r_A, r_A, K);
256 			break;
257 		case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
258 			ctx->seen |= SEEN_XREG;
259 			PPC_SRW(r_A, r_A, r_X);
260 			break;
261 		case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
262 			if (K == 0)
263 				break;
264 			else
265 				PPC_SRWI(r_A, r_A, K);
266 			break;
267 		case BPF_ALU | BPF_NEG:
268 			PPC_NEG(r_A, r_A);
269 			break;
270 		case BPF_RET | BPF_K:
271 			PPC_LI32(r_ret, K);
272 			if (!K) {
273 				if (ctx->pc_ret0 == -1)
274 					ctx->pc_ret0 = i;
275 			}
276 			/*
277 			 * If this isn't the very last instruction, branch to
278 			 * the epilogue if we've stuff to clean up.  Otherwise,
279 			 * if there's nothing to tidy, just return.  If we /are/
280 			 * the last instruction, we're about to fall through to
281 			 * the epilogue to return.
282 			 */
283 			if (i != flen - 1) {
284 				/*
285 				 * Note: 'seen' is properly valid only on pass
286 				 * #2.	Both parts of this conditional are the
287 				 * same instruction size though, meaning the
288 				 * first pass will still correctly determine the
289 				 * code size/addresses.
290 				 */
291 				if (ctx->seen)
292 					PPC_JMP(exit_addr);
293 				else
294 					PPC_BLR();
295 			}
296 			break;
297 		case BPF_RET | BPF_A:
298 			PPC_MR(r_ret, r_A);
299 			if (i != flen - 1) {
300 				if (ctx->seen)
301 					PPC_JMP(exit_addr);
302 				else
303 					PPC_BLR();
304 			}
305 			break;
306 		case BPF_MISC | BPF_TAX: /* X = A */
307 			PPC_MR(r_X, r_A);
308 			break;
309 		case BPF_MISC | BPF_TXA: /* A = X */
310 			ctx->seen |= SEEN_XREG;
311 			PPC_MR(r_A, r_X);
312 			break;
313 
314 			/*** Constant loads/M[] access ***/
315 		case BPF_LD | BPF_IMM: /* A = K */
316 			PPC_LI32(r_A, K);
317 			break;
318 		case BPF_LDX | BPF_IMM: /* X = K */
319 			PPC_LI32(r_X, K);
320 			break;
321 		case BPF_LD | BPF_MEM: /* A = mem[K] */
322 			PPC_MR(r_A, r_M + (K & 0xf));
323 			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
324 			break;
325 		case BPF_LDX | BPF_MEM: /* X = mem[K] */
326 			PPC_MR(r_X, r_M + (K & 0xf));
327 			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
328 			break;
329 		case BPF_ST: /* mem[K] = A */
330 			PPC_MR(r_M + (K & 0xf), r_A);
331 			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
332 			break;
333 		case BPF_STX: /* mem[K] = X */
334 			PPC_MR(r_M + (K & 0xf), r_X);
335 			ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
336 			break;
337 		case BPF_LD | BPF_W | BPF_LEN: /*	A = skb->len; */
338 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
339 			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
340 			break;
341 		case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
342 			PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
343 			break;
344 
345 			/*** Ancillary info loads ***/
346 		case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
347 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
348 						  protocol) != 2);
349 			PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
350 							    protocol));
351 			break;
352 		case BPF_ANC | SKF_AD_IFINDEX:
353 		case BPF_ANC | SKF_AD_HATYPE:
354 			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
355 						ifindex) != 4);
356 			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
357 						type) != 2);
358 			PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
359 								dev));
360 			PPC_CMPDI(r_scratch1, 0);
361 			if (ctx->pc_ret0 != -1) {
362 				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
363 			} else {
364 				/* Exit, returning 0; first pass hits here. */
365 				PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
366 				PPC_LI(r_ret, 0);
367 				PPC_JMP(exit_addr);
368 			}
369 			if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
370 				PPC_LWZ_OFFS(r_A, r_scratch1,
371 				     offsetof(struct net_device, ifindex));
372 			} else {
373 				PPC_LHZ_OFFS(r_A, r_scratch1,
374 				     offsetof(struct net_device, type));
375 			}
376 
377 			break;
378 		case BPF_ANC | SKF_AD_MARK:
379 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
380 			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
381 							  mark));
382 			break;
383 		case BPF_ANC | SKF_AD_RXHASH:
384 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
385 			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
386 							  hash));
387 			break;
388 		case BPF_ANC | SKF_AD_VLAN_TAG:
389 		case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
390 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
391 			BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
392 
393 			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
394 							  vlan_tci));
395 			if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
396 				PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
397 			} else {
398 				PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
399 				PPC_SRWI(r_A, r_A, 12);
400 			}
401 			break;
402 		case BPF_ANC | SKF_AD_QUEUE:
403 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
404 						  queue_mapping) != 2);
405 			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
406 							  queue_mapping));
407 			break;
408 		case BPF_ANC | SKF_AD_PKTTYPE:
409 			PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
410 			PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
411 			PPC_SRWI(r_A, r_A, 5);
412 			break;
413 		case BPF_ANC | SKF_AD_CPU:
414 #ifdef CONFIG_SMP
415 			/*
416 			 * PACA ptr is r13:
417 			 * raw_smp_processor_id() = local_paca->paca_index
418 			 */
419 			BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
420 						  paca_index) != 2);
421 			PPC_LHZ_OFFS(r_A, 13,
422 				     offsetof(struct paca_struct, paca_index));
423 #else
424 			PPC_LI(r_A, 0);
425 #endif
426 			break;
427 
428 			/*** Absolute loads from packet header/data ***/
429 		case BPF_LD | BPF_W | BPF_ABS:
430 			func = CHOOSE_LOAD_FUNC(K, sk_load_word);
431 			goto common_load;
432 		case BPF_LD | BPF_H | BPF_ABS:
433 			func = CHOOSE_LOAD_FUNC(K, sk_load_half);
434 			goto common_load;
435 		case BPF_LD | BPF_B | BPF_ABS:
436 			func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
437 		common_load:
438 			/* Load from [K]. */
439 			ctx->seen |= SEEN_DATAREF;
440 			PPC_LI64(r_scratch1, func);
441 			PPC_MTLR(r_scratch1);
442 			PPC_LI32(r_addr, K);
443 			PPC_BLRL();
444 			/*
445 			 * Helper returns 'lt' condition on error, and an
446 			 * appropriate return value in r3
447 			 */
448 			PPC_BCC(COND_LT, exit_addr);
449 			break;
450 
451 			/*** Indirect loads from packet header/data ***/
452 		case BPF_LD | BPF_W | BPF_IND:
453 			func = sk_load_word;
454 			goto common_load_ind;
455 		case BPF_LD | BPF_H | BPF_IND:
456 			func = sk_load_half;
457 			goto common_load_ind;
458 		case BPF_LD | BPF_B | BPF_IND:
459 			func = sk_load_byte;
460 		common_load_ind:
461 			/*
462 			 * Load from [X + K].  Negative offsets are tested for
463 			 * in the helper functions.
464 			 */
465 			ctx->seen |= SEEN_DATAREF | SEEN_XREG;
466 			PPC_LI64(r_scratch1, func);
467 			PPC_MTLR(r_scratch1);
468 			PPC_ADDI(r_addr, r_X, IMM_L(K));
469 			if (K >= 32768)
470 				PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
471 			PPC_BLRL();
472 			/* If error, cr0.LT set */
473 			PPC_BCC(COND_LT, exit_addr);
474 			break;
475 
476 		case BPF_LDX | BPF_B | BPF_MSH:
477 			func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
478 			goto common_load;
479 			break;
480 
481 			/*** Jump and branches ***/
482 		case BPF_JMP | BPF_JA:
483 			if (K != 0)
484 				PPC_JMP(addrs[i + 1 + K]);
485 			break;
486 
487 		case BPF_JMP | BPF_JGT | BPF_K:
488 		case BPF_JMP | BPF_JGT | BPF_X:
489 			true_cond = COND_GT;
490 			goto cond_branch;
491 		case BPF_JMP | BPF_JGE | BPF_K:
492 		case BPF_JMP | BPF_JGE | BPF_X:
493 			true_cond = COND_GE;
494 			goto cond_branch;
495 		case BPF_JMP | BPF_JEQ | BPF_K:
496 		case BPF_JMP | BPF_JEQ | BPF_X:
497 			true_cond = COND_EQ;
498 			goto cond_branch;
499 		case BPF_JMP | BPF_JSET | BPF_K:
500 		case BPF_JMP | BPF_JSET | BPF_X:
501 			true_cond = COND_NE;
502 			/* Fall through */
503 		cond_branch:
504 			/* same targets, can avoid doing the test :) */
505 			if (filter[i].jt == filter[i].jf) {
506 				if (filter[i].jt > 0)
507 					PPC_JMP(addrs[i + 1 + filter[i].jt]);
508 				break;
509 			}
510 
511 			switch (code) {
512 			case BPF_JMP | BPF_JGT | BPF_X:
513 			case BPF_JMP | BPF_JGE | BPF_X:
514 			case BPF_JMP | BPF_JEQ | BPF_X:
515 				ctx->seen |= SEEN_XREG;
516 				PPC_CMPLW(r_A, r_X);
517 				break;
518 			case BPF_JMP | BPF_JSET | BPF_X:
519 				ctx->seen |= SEEN_XREG;
520 				PPC_AND_DOT(r_scratch1, r_A, r_X);
521 				break;
522 			case BPF_JMP | BPF_JEQ | BPF_K:
523 			case BPF_JMP | BPF_JGT | BPF_K:
524 			case BPF_JMP | BPF_JGE | BPF_K:
525 				if (K < 32768)
526 					PPC_CMPLWI(r_A, K);
527 				else {
528 					PPC_LI32(r_scratch1, K);
529 					PPC_CMPLW(r_A, r_scratch1);
530 				}
531 				break;
532 			case BPF_JMP | BPF_JSET | BPF_K:
533 				if (K < 32768)
534 					/* PPC_ANDI is /only/ dot-form */
535 					PPC_ANDI(r_scratch1, r_A, K);
536 				else {
537 					PPC_LI32(r_scratch1, K);
538 					PPC_AND_DOT(r_scratch1, r_A,
539 						    r_scratch1);
540 				}
541 				break;
542 			}
543 			/* Sometimes branches are constructed "backward", with
544 			 * the false path being the branch and true path being
545 			 * a fallthrough to the next instruction.
546 			 */
547 			if (filter[i].jt == 0)
548 				/* Swap the sense of the branch */
549 				PPC_BCC(true_cond ^ COND_CMP_TRUE,
550 					addrs[i + 1 + filter[i].jf]);
551 			else {
552 				PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
553 				if (filter[i].jf != 0)
554 					PPC_JMP(addrs[i + 1 + filter[i].jf]);
555 			}
556 			break;
557 		default:
558 			/* The filter contains something cruel & unusual.
559 			 * We don't handle it, but also there shouldn't be
560 			 * anything missing from our list.
561 			 */
562 			if (printk_ratelimit())
563 				pr_err("BPF filter opcode %04x (@%d) unsupported\n",
564 				       filter[i].code, i);
565 			return -ENOTSUPP;
566 		}
567 
568 	}
569 	/* Set end-of-body-code address for exit. */
570 	addrs[i] = ctx->idx * 4;
571 
572 	return 0;
573 }
574 
575 void bpf_jit_compile(struct bpf_prog *fp)
576 {
577 	unsigned int proglen;
578 	unsigned int alloclen;
579 	u32 *image = NULL;
580 	u32 *code_base;
581 	unsigned int *addrs;
582 	struct codegen_context cgctx;
583 	int pass;
584 	int flen = fp->len;
585 
586 	if (!bpf_jit_enable)
587 		return;
588 
589 	addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
590 	if (addrs == NULL)
591 		return;
592 
593 	/*
594 	 * There are multiple assembly passes as the generated code will change
595 	 * size as it settles down, figuring out the max branch offsets/exit
596 	 * paths required.
597 	 *
598 	 * The range of standard conditional branches is +/- 32Kbytes.	Since
599 	 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
600 	 * finish with 8 bytes/instruction.  Not feasible, so long jumps are
601 	 * used, distinct from short branches.
602 	 *
603 	 * Current:
604 	 *
605 	 * For now, both branch types assemble to 2 words (short branches padded
606 	 * with a NOP); this is less efficient, but assembly will always complete
607 	 * after exactly 3 passes:
608 	 *
609 	 * First pass: No code buffer; Program is "faux-generated" -- no code
610 	 * emitted but maximum size of output determined (and addrs[] filled
611 	 * in).	 Also, we note whether we use M[], whether we use skb data, etc.
612 	 * All generation choices assumed to be 'worst-case', e.g. branches all
613 	 * far (2 instructions), return path code reduction not available, etc.
614 	 *
615 	 * Second pass: Code buffer allocated with size determined previously.
616 	 * Prologue generated to support features we have seen used.  Exit paths
617 	 * determined and addrs[] is filled in again, as code may be slightly
618 	 * smaller as a result.
619 	 *
620 	 * Third pass: Code generated 'for real', and branch destinations
621 	 * determined from now-accurate addrs[] map.
622 	 *
623 	 * Ideal:
624 	 *
625 	 * If we optimise this, near branches will be shorter.	On the
626 	 * first assembly pass, we should err on the side of caution and
627 	 * generate the biggest code.  On subsequent passes, branches will be
628 	 * generated short or long and code size will reduce.  With smaller
629 	 * code, more branches may fall into the short category, and code will
630 	 * reduce more.
631 	 *
632 	 * Finally, if we see one pass generate code the same size as the
633 	 * previous pass we have converged and should now generate code for
634 	 * real.  Allocating at the end will also save the memory that would
635 	 * otherwise be wasted by the (small) current code shrinkage.
636 	 * Preferably, we should do a small number of passes (e.g. 5) and if we
637 	 * haven't converged by then, get impatient and force code to generate
638 	 * as-is, even if the odd branch would be left long.  The chances of a
639 	 * long jump are tiny with all but the most enormous of BPF filter
640 	 * inputs, so we should usually converge on the third pass.
641 	 */
642 
643 	cgctx.idx = 0;
644 	cgctx.seen = 0;
645 	cgctx.pc_ret0 = -1;
646 	/* Scouting faux-generate pass 0 */
647 	if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
648 		/* We hit something illegal or unsupported. */
649 		goto out;
650 
651 	/*
652 	 * Pretend to build prologue, given the features we've seen.  This will
653 	 * update ctgtx.idx as it pretends to output instructions, then we can
654 	 * calculate total size from idx.
655 	 */
656 	bpf_jit_build_prologue(fp, 0, &cgctx);
657 	bpf_jit_build_epilogue(0, &cgctx);
658 
659 	proglen = cgctx.idx * 4;
660 	alloclen = proglen + FUNCTION_DESCR_SIZE;
661 	image = module_alloc(alloclen);
662 	if (!image)
663 		goto out;
664 
665 	code_base = image + (FUNCTION_DESCR_SIZE/4);
666 
667 	/* Code generation passes 1-2 */
668 	for (pass = 1; pass < 3; pass++) {
669 		/* Now build the prologue, body code & epilogue for real. */
670 		cgctx.idx = 0;
671 		bpf_jit_build_prologue(fp, code_base, &cgctx);
672 		bpf_jit_build_body(fp, code_base, &cgctx, addrs);
673 		bpf_jit_build_epilogue(code_base, &cgctx);
674 
675 		if (bpf_jit_enable > 1)
676 			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
677 				proglen - (cgctx.idx * 4), cgctx.seen);
678 	}
679 
680 	if (bpf_jit_enable > 1)
681 		/* Note that we output the base address of the code_base
682 		 * rather than image, since opcodes are in code_base.
683 		 */
684 		bpf_jit_dump(flen, proglen, pass, code_base);
685 
686 	if (image) {
687 		bpf_flush_icache(code_base, code_base + (proglen/4));
688 		/* Function descriptor nastiness: Address + TOC */
689 		((u64 *)image)[0] = (u64)code_base;
690 		((u64 *)image)[1] = local_paca->kernel_toc;
691 		fp->bpf_func = (void *)image;
692 		fp->jited = true;
693 	}
694 out:
695 	kfree(addrs);
696 	return;
697 }
698 
699 void bpf_jit_free(struct bpf_prog *fp)
700 {
701 	if (fp->jited)
702 		module_memfree(fp->bpf_func);
703 
704 	bpf_prog_unlock_free(fp);
705 }
706