xref: /openbmc/linux/arch/s390/net/bpf_jit_comp.c (revision 4f3db074)
1 /*
2  * BPF Jit compiler for s390.
3  *
4  * Minimum build requirements:
5  *
6  *  - HAVE_MARCH_Z196_FEATURES: laal, laalg
7  *  - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
8  *  - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
9  *  - PACK_STACK
10  *  - 64BIT
11  *
12  * Copyright IBM Corp. 2012,2015
13  *
14  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
15  *	      Michael Holzheu <holzheu@linux.vnet.ibm.com>
16  */
17 
18 #define KMSG_COMPONENT "bpf_jit"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20 
21 #include <linux/netdevice.h>
22 #include <linux/filter.h>
23 #include <linux/init.h>
24 #include <asm/cacheflush.h>
25 #include <asm/dis.h>
26 #include "bpf_jit.h"
27 
28 int bpf_jit_enable __read_mostly;
29 
30 struct bpf_jit {
31 	u32 seen;		/* Flags to remember seen eBPF instructions */
32 	u32 seen_reg[16];	/* Array to remember which registers are used */
33 	u32 *addrs;		/* Array with relative instruction addresses */
34 	u8 *prg_buf;		/* Start of program */
35 	int size;		/* Size of program and literal pool */
36 	int size_prg;		/* Size of program */
37 	int prg;		/* Current position in program */
38 	int lit_start;		/* Start of literal pool */
39 	int lit;		/* Current position in literal pool */
40 	int base_ip;		/* Base address for literal pool */
41 	int ret0_ip;		/* Address of return 0 */
42 	int exit_ip;		/* Address of exit */
43 };
44 
45 #define BPF_SIZE_MAX	4096	/* Max size for program */
46 
47 #define SEEN_SKB	1	/* skb access */
48 #define SEEN_MEM	2	/* use mem[] for temporary storage */
49 #define SEEN_RET0	4	/* ret0_ip points to a valid return 0 */
50 #define SEEN_LITERAL	8	/* code uses literals */
51 #define SEEN_FUNC	16	/* calls C functions */
52 #define SEEN_STACK	(SEEN_FUNC | SEEN_MEM | SEEN_SKB)
53 
54 /*
55  * s390 registers
56  */
57 #define REG_W0		(__MAX_BPF_REG+0)	/* Work register 1 (even) */
58 #define REG_W1		(__MAX_BPF_REG+1)	/* Work register 2 (odd) */
59 #define REG_SKB_DATA	(__MAX_BPF_REG+2)	/* SKB data register */
60 #define REG_L		(__MAX_BPF_REG+3)	/* Literal pool register */
61 #define REG_15		(__MAX_BPF_REG+4)	/* Register 15 */
62 #define REG_0		REG_W0			/* Register 0 */
63 #define REG_2		BPF_REG_1		/* Register 2 */
64 #define REG_14		BPF_REG_0		/* Register 14 */
65 
66 /*
67  * Mapping of BPF registers to s390 registers
68  */
69 static const int reg2hex[] = {
70 	/* Return code */
71 	[BPF_REG_0]	= 14,
72 	/* Function parameters */
73 	[BPF_REG_1]	= 2,
74 	[BPF_REG_2]	= 3,
75 	[BPF_REG_3]	= 4,
76 	[BPF_REG_4]	= 5,
77 	[BPF_REG_5]	= 6,
78 	/* Call saved registers */
79 	[BPF_REG_6]	= 7,
80 	[BPF_REG_7]	= 8,
81 	[BPF_REG_8]	= 9,
82 	[BPF_REG_9]	= 10,
83 	/* BPF stack pointer */
84 	[BPF_REG_FP]	= 13,
85 	/* SKB data pointer */
86 	[REG_SKB_DATA]	= 12,
87 	/* Work registers for s390x backend */
88 	[REG_W0]	= 0,
89 	[REG_W1]	= 1,
90 	[REG_L]		= 11,
91 	[REG_15]	= 15,
92 };
93 
94 static inline u32 reg(u32 dst_reg, u32 src_reg)
95 {
96 	return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
97 }
98 
99 static inline u32 reg_high(u32 reg)
100 {
101 	return reg2hex[reg] << 4;
102 }
103 
104 static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
105 {
106 	u32 r1 = reg2hex[b1];
107 
108 	if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
109 		jit->seen_reg[r1] = 1;
110 }
111 
112 #define REG_SET_SEEN(b1)					\
113 ({								\
114 	reg_set_seen(jit, b1);					\
115 })
116 
117 #define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
118 
119 /*
120  * EMIT macros for code generation
121  */
122 
123 #define _EMIT2(op)						\
124 ({								\
125 	if (jit->prg_buf)					\
126 		*(u16 *) (jit->prg_buf + jit->prg) = op;	\
127 	jit->prg += 2;						\
128 })
129 
130 #define EMIT2(op, b1, b2)					\
131 ({								\
132 	_EMIT2(op | reg(b1, b2));				\
133 	REG_SET_SEEN(b1);					\
134 	REG_SET_SEEN(b2);					\
135 })
136 
137 #define _EMIT4(op)						\
138 ({								\
139 	if (jit->prg_buf)					\
140 		*(u32 *) (jit->prg_buf + jit->prg) = op;	\
141 	jit->prg += 4;						\
142 })
143 
144 #define EMIT4(op, b1, b2)					\
145 ({								\
146 	_EMIT4(op | reg(b1, b2));				\
147 	REG_SET_SEEN(b1);					\
148 	REG_SET_SEEN(b2);					\
149 })
150 
151 #define EMIT4_RRF(op, b1, b2, b3)				\
152 ({								\
153 	_EMIT4(op | reg_high(b3) << 8 | reg(b1, b2));		\
154 	REG_SET_SEEN(b1);					\
155 	REG_SET_SEEN(b2);					\
156 	REG_SET_SEEN(b3);					\
157 })
158 
159 #define _EMIT4_DISP(op, disp)					\
160 ({								\
161 	unsigned int __disp = (disp) & 0xfff;			\
162 	_EMIT4(op | __disp);					\
163 })
164 
165 #define EMIT4_DISP(op, b1, b2, disp)				\
166 ({								\
167 	_EMIT4_DISP(op | reg_high(b1) << 16 |			\
168 		    reg_high(b2) << 8, disp);			\
169 	REG_SET_SEEN(b1);					\
170 	REG_SET_SEEN(b2);					\
171 })
172 
173 #define EMIT4_IMM(op, b1, imm)					\
174 ({								\
175 	unsigned int __imm = (imm) & 0xffff;			\
176 	_EMIT4(op | reg_high(b1) << 16 | __imm);		\
177 	REG_SET_SEEN(b1);					\
178 })
179 
180 #define EMIT4_PCREL(op, pcrel)					\
181 ({								\
182 	long __pcrel = ((pcrel) >> 1) & 0xffff;			\
183 	_EMIT4(op | __pcrel);					\
184 })
185 
186 #define _EMIT6(op1, op2)					\
187 ({								\
188 	if (jit->prg_buf) {					\
189 		*(u32 *) (jit->prg_buf + jit->prg) = op1;	\
190 		*(u16 *) (jit->prg_buf + jit->prg + 4) = op2;	\
191 	}							\
192 	jit->prg += 6;						\
193 })
194 
195 #define _EMIT6_DISP(op1, op2, disp)				\
196 ({								\
197 	unsigned int __disp = (disp) & 0xfff;			\
198 	_EMIT6(op1 | __disp, op2);				\
199 })
200 
201 #define EMIT6_DISP(op1, op2, b1, b2, b3, disp)			\
202 ({								\
203 	_EMIT6_DISP(op1 | reg(b1, b2) << 16 |			\
204 		    reg_high(b3) << 8, op2, disp);		\
205 	REG_SET_SEEN(b1);					\
206 	REG_SET_SEEN(b2);					\
207 	REG_SET_SEEN(b3);					\
208 })
209 
210 #define _EMIT6_DISP_LH(op1, op2, disp)				\
211 ({								\
212 	unsigned int __disp_h = ((u32)disp) & 0xff000;		\
213 	unsigned int __disp_l = ((u32)disp) & 0x00fff;		\
214 	_EMIT6(op1 | __disp_l, op2 | __disp_h >> 4);		\
215 })
216 
217 #define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp)		\
218 ({								\
219 	_EMIT6_DISP_LH(op1 | reg(b1, b2) << 16 |		\
220 		       reg_high(b3) << 8, op2, disp);		\
221 	REG_SET_SEEN(b1);					\
222 	REG_SET_SEEN(b2);					\
223 	REG_SET_SEEN(b3);					\
224 })
225 
226 #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)		\
227 ({								\
228 	/* Branch instruction needs 6 bytes */			\
229 	int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\
230 	_EMIT6(op1 | reg(b1, b2) << 16 | rel, op2 | mask);	\
231 	REG_SET_SEEN(b1);					\
232 	REG_SET_SEEN(b2);					\
233 })
234 
235 #define _EMIT6_IMM(op, imm)					\
236 ({								\
237 	unsigned int __imm = (imm);				\
238 	_EMIT6(op | (__imm >> 16), __imm & 0xffff);		\
239 })
240 
241 #define EMIT6_IMM(op, b1, imm)					\
242 ({								\
243 	_EMIT6_IMM(op | reg_high(b1) << 16, imm);		\
244 	REG_SET_SEEN(b1);					\
245 })
246 
247 #define EMIT_CONST_U32(val)					\
248 ({								\
249 	unsigned int ret;					\
250 	ret = jit->lit - jit->base_ip;				\
251 	jit->seen |= SEEN_LITERAL;				\
252 	if (jit->prg_buf)					\
253 		*(u32 *) (jit->prg_buf + jit->lit) = (u32) val;	\
254 	jit->lit += 4;						\
255 	ret;							\
256 })
257 
258 #define EMIT_CONST_U64(val)					\
259 ({								\
260 	unsigned int ret;					\
261 	ret = jit->lit - jit->base_ip;				\
262 	jit->seen |= SEEN_LITERAL;				\
263 	if (jit->prg_buf)					\
264 		*(u64 *) (jit->prg_buf + jit->lit) = (u64) val;	\
265 	jit->lit += 8;						\
266 	ret;							\
267 })
268 
269 #define EMIT_ZERO(b1)						\
270 ({								\
271 	/* llgfr %dst,%dst (zero extend to 64 bit) */		\
272 	EMIT4(0xb9160000, b1, b1);				\
273 	REG_SET_SEEN(b1);					\
274 })
275 
276 /*
277  * Fill whole space with illegal instructions
278  */
279 static void jit_fill_hole(void *area, unsigned int size)
280 {
281 	memset(area, 0, size);
282 }
283 
284 /*
285  * Save registers from "rs" (register start) to "re" (register end) on stack
286  */
287 static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
288 {
289 	u32 off = 72 + (rs - 6) * 8;
290 
291 	if (rs == re)
292 		/* stg %rs,off(%r15) */
293 		_EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
294 	else
295 		/* stmg %rs,%re,off(%r15) */
296 		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
297 }
298 
299 /*
300  * Restore registers from "rs" (register start) to "re" (register end) on stack
301  */
302 static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
303 {
304 	u32 off = 72 + (rs - 6) * 8;
305 
306 	if (jit->seen & SEEN_STACK)
307 		off += STK_OFF;
308 
309 	if (rs == re)
310 		/* lg %rs,off(%r15) */
311 		_EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
312 	else
313 		/* lmg %rs,%re,off(%r15) */
314 		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
315 }
316 
317 /*
318  * Return first seen register (from start)
319  */
320 static int get_start(struct bpf_jit *jit, int start)
321 {
322 	int i;
323 
324 	for (i = start; i <= 15; i++) {
325 		if (jit->seen_reg[i])
326 			return i;
327 	}
328 	return 0;
329 }
330 
331 /*
332  * Return last seen register (from start) (gap >= 2)
333  */
334 static int get_end(struct bpf_jit *jit, int start)
335 {
336 	int i;
337 
338 	for (i = start; i < 15; i++) {
339 		if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
340 			return i - 1;
341 	}
342 	return jit->seen_reg[15] ? 15 : 14;
343 }
344 
345 #define REGS_SAVE	1
346 #define REGS_RESTORE	0
347 /*
348  * Save and restore clobbered registers (6-15) on stack.
349  * We save/restore registers in chunks with gap >= 2 registers.
350  */
351 static void save_restore_regs(struct bpf_jit *jit, int op)
352 {
353 
354 	int re = 6, rs;
355 
356 	do {
357 		rs = get_start(jit, re);
358 		if (!rs)
359 			break;
360 		re = get_end(jit, rs + 1);
361 		if (op == REGS_SAVE)
362 			save_regs(jit, rs, re);
363 		else
364 			restore_regs(jit, rs, re);
365 		re++;
366 	} while (re <= 15);
367 }
368 
369 /*
370  * Emit function prologue
371  *
372  * Save registers and create stack frame if necessary.
373  * See stack frame layout desription in "bpf_jit.h"!
374  */
375 static void bpf_jit_prologue(struct bpf_jit *jit)
376 {
377 	/* Save registers */
378 	save_restore_regs(jit, REGS_SAVE);
379 	/* Setup literal pool */
380 	if (jit->seen & SEEN_LITERAL) {
381 		/* basr %r13,0 */
382 		EMIT2(0x0d00, REG_L, REG_0);
383 		jit->base_ip = jit->prg;
384 	}
385 	/* Setup stack and backchain */
386 	if (jit->seen & SEEN_STACK) {
387 		/* lgr %bfp,%r15 (BPF frame pointer) */
388 		EMIT4(0xb9040000, BPF_REG_FP, REG_15);
389 		/* aghi %r15,-STK_OFF */
390 		EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
391 		if (jit->seen & SEEN_FUNC)
392 			/* stg %bfp,152(%r15) (backchain) */
393 			EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_FP, REG_0,
394 				      REG_15, 152);
395 	}
396 	/*
397 	 * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
398 	 * we store the SKB header length on the stack and the SKB data
399 	 * pointer in REG_SKB_DATA.
400 	 */
401 	if (jit->seen & SEEN_SKB) {
402 		/* Header length: llgf %w1,<len>(%b1) */
403 		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
404 			      offsetof(struct sk_buff, len));
405 		/* s %w1,<data_len>(%b1) */
406 		EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
407 			   offsetof(struct sk_buff, data_len));
408 		/* stg %w1,ST_OFF_HLEN(%r0,%r15) */
409 		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
410 			      STK_OFF_HLEN);
411 		/* lg %skb_data,data_off(%b1) */
412 		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
413 			      BPF_REG_1, offsetof(struct sk_buff, data));
414 	}
415 	/* BPF compatibility: clear A (%b7) and X (%b8) registers */
416 	if (REG_SEEN(BPF_REG_7))
417 		/* lghi %b7,0 */
418 		EMIT4_IMM(0xa7090000, BPF_REG_7, 0);
419 	if (REG_SEEN(BPF_REG_8))
420 		/* lghi %b8,0 */
421 		EMIT4_IMM(0xa7090000, BPF_REG_8, 0);
422 }
423 
424 /*
425  * Function epilogue
426  */
427 static void bpf_jit_epilogue(struct bpf_jit *jit)
428 {
429 	/* Return 0 */
430 	if (jit->seen & SEEN_RET0) {
431 		jit->ret0_ip = jit->prg;
432 		/* lghi %b0,0 */
433 		EMIT4_IMM(0xa7090000, BPF_REG_0, 0);
434 	}
435 	jit->exit_ip = jit->prg;
436 	/* Load exit code: lgr %r2,%b0 */
437 	EMIT4(0xb9040000, REG_2, BPF_REG_0);
438 	/* Restore registers */
439 	save_restore_regs(jit, REGS_RESTORE);
440 	/* br %r14 */
441 	_EMIT2(0x07fe);
442 }
443 
444 /*
445  * Compile one eBPF instruction into s390x code
446  */
447 static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
448 {
449 	struct bpf_insn *insn = &fp->insnsi[i];
450 	int jmp_off, last, insn_count = 1;
451 	unsigned int func_addr, mask;
452 	u32 dst_reg = insn->dst_reg;
453 	u32 src_reg = insn->src_reg;
454 	u32 *addrs = jit->addrs;
455 	s32 imm = insn->imm;
456 	s16 off = insn->off;
457 
458 	switch (insn->code) {
459 	/*
460 	 * BPF_MOV
461 	 */
462 	case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */
463 		/* llgfr %dst,%src */
464 		EMIT4(0xb9160000, dst_reg, src_reg);
465 		break;
466 	case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
467 		/* lgr %dst,%src */
468 		EMIT4(0xb9040000, dst_reg, src_reg);
469 		break;
470 	case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
471 		/* llilf %dst,imm */
472 		EMIT6_IMM(0xc00f0000, dst_reg, imm);
473 		break;
474 	case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
475 		/* lgfi %dst,imm */
476 		EMIT6_IMM(0xc0010000, dst_reg, imm);
477 		break;
478 	/*
479 	 * BPF_LD 64
480 	 */
481 	case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
482 	{
483 		/* 16 byte instruction that uses two 'struct bpf_insn' */
484 		u64 imm64;
485 
486 		imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
487 		/* lg %dst,<d(imm)>(%l) */
488 		EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, REG_0, REG_L,
489 			      EMIT_CONST_U64(imm64));
490 		insn_count = 2;
491 		break;
492 	}
493 	/*
494 	 * BPF_ADD
495 	 */
496 	case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
497 		/* ar %dst,%src */
498 		EMIT2(0x1a00, dst_reg, src_reg);
499 		EMIT_ZERO(dst_reg);
500 		break;
501 	case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
502 		/* agr %dst,%src */
503 		EMIT4(0xb9080000, dst_reg, src_reg);
504 		break;
505 	case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
506 		if (!imm)
507 			break;
508 		/* alfi %dst,imm */
509 		EMIT6_IMM(0xc20b0000, dst_reg, imm);
510 		EMIT_ZERO(dst_reg);
511 		break;
512 	case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
513 		if (!imm)
514 			break;
515 		/* agfi %dst,imm */
516 		EMIT6_IMM(0xc2080000, dst_reg, imm);
517 		break;
518 	/*
519 	 * BPF_SUB
520 	 */
521 	case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
522 		/* sr %dst,%src */
523 		EMIT2(0x1b00, dst_reg, src_reg);
524 		EMIT_ZERO(dst_reg);
525 		break;
526 	case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
527 		/* sgr %dst,%src */
528 		EMIT4(0xb9090000, dst_reg, src_reg);
529 		break;
530 	case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
531 		if (!imm)
532 			break;
533 		/* alfi %dst,-imm */
534 		EMIT6_IMM(0xc20b0000, dst_reg, -imm);
535 		EMIT_ZERO(dst_reg);
536 		break;
537 	case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
538 		if (!imm)
539 			break;
540 		/* agfi %dst,-imm */
541 		EMIT6_IMM(0xc2080000, dst_reg, -imm);
542 		break;
543 	/*
544 	 * BPF_MUL
545 	 */
546 	case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
547 		/* msr %dst,%src */
548 		EMIT4(0xb2520000, dst_reg, src_reg);
549 		EMIT_ZERO(dst_reg);
550 		break;
551 	case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
552 		/* msgr %dst,%src */
553 		EMIT4(0xb90c0000, dst_reg, src_reg);
554 		break;
555 	case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
556 		if (imm == 1)
557 			break;
558 		/* msfi %r5,imm */
559 		EMIT6_IMM(0xc2010000, dst_reg, imm);
560 		EMIT_ZERO(dst_reg);
561 		break;
562 	case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
563 		if (imm == 1)
564 			break;
565 		/* msgfi %dst,imm */
566 		EMIT6_IMM(0xc2000000, dst_reg, imm);
567 		break;
568 	/*
569 	 * BPF_DIV / BPF_MOD
570 	 */
571 	case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */
572 	case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */
573 	{
574 		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
575 
576 		jit->seen |= SEEN_RET0;
577 		/* ltr %src,%src (if src == 0 goto fail) */
578 		EMIT2(0x1200, src_reg, src_reg);
579 		/* jz <ret0> */
580 		EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
581 		/* lhi %w0,0 */
582 		EMIT4_IMM(0xa7080000, REG_W0, 0);
583 		/* lr %w1,%dst */
584 		EMIT2(0x1800, REG_W1, dst_reg);
585 		/* dlr %w0,%src */
586 		EMIT4(0xb9970000, REG_W0, src_reg);
587 		/* llgfr %dst,%rc */
588 		EMIT4(0xb9160000, dst_reg, rc_reg);
589 		break;
590 	}
591 	case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / (u32) src */
592 	case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % (u32) src */
593 	{
594 		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
595 
596 		jit->seen |= SEEN_RET0;
597 		/* ltgr %src,%src (if src == 0 goto fail) */
598 		EMIT4(0xb9020000, src_reg, src_reg);
599 		/* jz <ret0> */
600 		EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
601 		/* lghi %w0,0 */
602 		EMIT4_IMM(0xa7090000, REG_W0, 0);
603 		/* lgr %w1,%dst */
604 		EMIT4(0xb9040000, REG_W1, dst_reg);
605 		/* llgfr %dst,%src (u32 cast) */
606 		EMIT4(0xb9160000, dst_reg, src_reg);
607 		/* dlgr %w0,%dst */
608 		EMIT4(0xb9870000, REG_W0, dst_reg);
609 		/* lgr %dst,%rc */
610 		EMIT4(0xb9040000, dst_reg, rc_reg);
611 		break;
612 	}
613 	case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */
614 	case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */
615 	{
616 		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
617 
618 		if (imm == 1) {
619 			if (BPF_OP(insn->code) == BPF_MOD)
620 				/* lhgi %dst,0 */
621 				EMIT4_IMM(0xa7090000, dst_reg, 0);
622 			break;
623 		}
624 		/* lhi %w0,0 */
625 		EMIT4_IMM(0xa7080000, REG_W0, 0);
626 		/* lr %w1,%dst */
627 		EMIT2(0x1800, REG_W1, dst_reg);
628 		/* dl %w0,<d(imm)>(%l) */
629 		EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
630 			      EMIT_CONST_U32(imm));
631 		/* llgfr %dst,%rc */
632 		EMIT4(0xb9160000, dst_reg, rc_reg);
633 		break;
634 	}
635 	case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / (u32) imm */
636 	case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % (u32) imm */
637 	{
638 		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
639 
640 		if (imm == 1) {
641 			if (BPF_OP(insn->code) == BPF_MOD)
642 				/* lhgi %dst,0 */
643 				EMIT4_IMM(0xa7090000, dst_reg, 0);
644 			break;
645 		}
646 		/* lghi %w0,0 */
647 		EMIT4_IMM(0xa7090000, REG_W0, 0);
648 		/* lgr %w1,%dst */
649 		EMIT4(0xb9040000, REG_W1, dst_reg);
650 		/* dlg %w0,<d(imm)>(%l) */
651 		EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
652 			      EMIT_CONST_U64((u32) imm));
653 		/* lgr %dst,%rc */
654 		EMIT4(0xb9040000, dst_reg, rc_reg);
655 		break;
656 	}
657 	/*
658 	 * BPF_AND
659 	 */
660 	case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
661 		/* nr %dst,%src */
662 		EMIT2(0x1400, dst_reg, src_reg);
663 		EMIT_ZERO(dst_reg);
664 		break;
665 	case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
666 		/* ngr %dst,%src */
667 		EMIT4(0xb9800000, dst_reg, src_reg);
668 		break;
669 	case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
670 		/* nilf %dst,imm */
671 		EMIT6_IMM(0xc00b0000, dst_reg, imm);
672 		EMIT_ZERO(dst_reg);
673 		break;
674 	case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
675 		/* ng %dst,<d(imm)>(%l) */
676 		EMIT6_DISP_LH(0xe3000000, 0x0080, dst_reg, REG_0, REG_L,
677 			      EMIT_CONST_U64(imm));
678 		break;
679 	/*
680 	 * BPF_OR
681 	 */
682 	case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
683 		/* or %dst,%src */
684 		EMIT2(0x1600, dst_reg, src_reg);
685 		EMIT_ZERO(dst_reg);
686 		break;
687 	case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
688 		/* ogr %dst,%src */
689 		EMIT4(0xb9810000, dst_reg, src_reg);
690 		break;
691 	case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
692 		/* oilf %dst,imm */
693 		EMIT6_IMM(0xc00d0000, dst_reg, imm);
694 		EMIT_ZERO(dst_reg);
695 		break;
696 	case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
697 		/* og %dst,<d(imm)>(%l) */
698 		EMIT6_DISP_LH(0xe3000000, 0x0081, dst_reg, REG_0, REG_L,
699 			      EMIT_CONST_U64(imm));
700 		break;
701 	/*
702 	 * BPF_XOR
703 	 */
704 	case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
705 		/* xr %dst,%src */
706 		EMIT2(0x1700, dst_reg, src_reg);
707 		EMIT_ZERO(dst_reg);
708 		break;
709 	case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
710 		/* xgr %dst,%src */
711 		EMIT4(0xb9820000, dst_reg, src_reg);
712 		break;
713 	case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
714 		if (!imm)
715 			break;
716 		/* xilf %dst,imm */
717 		EMIT6_IMM(0xc0070000, dst_reg, imm);
718 		EMIT_ZERO(dst_reg);
719 		break;
720 	case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
721 		/* xg %dst,<d(imm)>(%l) */
722 		EMIT6_DISP_LH(0xe3000000, 0x0082, dst_reg, REG_0, REG_L,
723 			      EMIT_CONST_U64(imm));
724 		break;
725 	/*
726 	 * BPF_LSH
727 	 */
728 	case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
729 		/* sll %dst,0(%src) */
730 		EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
731 		EMIT_ZERO(dst_reg);
732 		break;
733 	case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
734 		/* sllg %dst,%dst,0(%src) */
735 		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
736 		break;
737 	case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
738 		if (imm == 0)
739 			break;
740 		/* sll %dst,imm(%r0) */
741 		EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
742 		EMIT_ZERO(dst_reg);
743 		break;
744 	case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
745 		if (imm == 0)
746 			break;
747 		/* sllg %dst,%dst,imm(%r0) */
748 		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
749 		break;
750 	/*
751 	 * BPF_RSH
752 	 */
753 	case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
754 		/* srl %dst,0(%src) */
755 		EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
756 		EMIT_ZERO(dst_reg);
757 		break;
758 	case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
759 		/* srlg %dst,%dst,0(%src) */
760 		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
761 		break;
762 	case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
763 		if (imm == 0)
764 			break;
765 		/* srl %dst,imm(%r0) */
766 		EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
767 		EMIT_ZERO(dst_reg);
768 		break;
769 	case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
770 		if (imm == 0)
771 			break;
772 		/* srlg %dst,%dst,imm(%r0) */
773 		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
774 		break;
775 	/*
776 	 * BPF_ARSH
777 	 */
778 	case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
779 		/* srag %dst,%dst,0(%src) */
780 		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
781 		break;
782 	case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
783 		if (imm == 0)
784 			break;
785 		/* srag %dst,%dst,imm(%r0) */
786 		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
787 		break;
788 	/*
789 	 * BPF_NEG
790 	 */
791 	case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
792 		/* lcr %dst,%dst */
793 		EMIT2(0x1300, dst_reg, dst_reg);
794 		EMIT_ZERO(dst_reg);
795 		break;
796 	case BPF_ALU64 | BPF_NEG: /* dst = -dst */
797 		/* lcgr %dst,%dst */
798 		EMIT4(0xb9130000, dst_reg, dst_reg);
799 		break;
800 	/*
801 	 * BPF_FROM_BE/LE
802 	 */
803 	case BPF_ALU | BPF_END | BPF_FROM_BE:
804 		/* s390 is big endian, therefore only clear high order bytes */
805 		switch (imm) {
806 		case 16: /* dst = (u16) cpu_to_be16(dst) */
807 			/* llghr %dst,%dst */
808 			EMIT4(0xb9850000, dst_reg, dst_reg);
809 			break;
810 		case 32: /* dst = (u32) cpu_to_be32(dst) */
811 			/* llgfr %dst,%dst */
812 			EMIT4(0xb9160000, dst_reg, dst_reg);
813 			break;
814 		case 64: /* dst = (u64) cpu_to_be64(dst) */
815 			break;
816 		}
817 		break;
818 	case BPF_ALU | BPF_END | BPF_FROM_LE:
819 		switch (imm) {
820 		case 16: /* dst = (u16) cpu_to_le16(dst) */
821 			/* lrvr %dst,%dst */
822 			EMIT4(0xb91f0000, dst_reg, dst_reg);
823 			/* srl %dst,16(%r0) */
824 			EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
825 			/* llghr %dst,%dst */
826 			EMIT4(0xb9850000, dst_reg, dst_reg);
827 			break;
828 		case 32: /* dst = (u32) cpu_to_le32(dst) */
829 			/* lrvr %dst,%dst */
830 			EMIT4(0xb91f0000, dst_reg, dst_reg);
831 			/* llgfr %dst,%dst */
832 			EMIT4(0xb9160000, dst_reg, dst_reg);
833 			break;
834 		case 64: /* dst = (u64) cpu_to_le64(dst) */
835 			/* lrvgr %dst,%dst */
836 			EMIT4(0xb90f0000, dst_reg, dst_reg);
837 			break;
838 		}
839 		break;
840 	/*
841 	 * BPF_ST(X)
842 	 */
843 	case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
844 		/* stcy %src,off(%dst) */
845 		EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
846 		jit->seen |= SEEN_MEM;
847 		break;
848 	case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
849 		/* sthy %src,off(%dst) */
850 		EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
851 		jit->seen |= SEEN_MEM;
852 		break;
853 	case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
854 		/* sty %src,off(%dst) */
855 		EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
856 		jit->seen |= SEEN_MEM;
857 		break;
858 	case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
859 		/* stg %src,off(%dst) */
860 		EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
861 		jit->seen |= SEEN_MEM;
862 		break;
863 	case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
864 		/* lhi %w0,imm */
865 		EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
866 		/* stcy %w0,off(dst) */
867 		EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
868 		jit->seen |= SEEN_MEM;
869 		break;
870 	case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
871 		/* lhi %w0,imm */
872 		EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
873 		/* sthy %w0,off(dst) */
874 		EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
875 		jit->seen |= SEEN_MEM;
876 		break;
877 	case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
878 		/* llilf %w0,imm  */
879 		EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
880 		/* sty %w0,off(%dst) */
881 		EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
882 		jit->seen |= SEEN_MEM;
883 		break;
884 	case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
885 		/* lgfi %w0,imm */
886 		EMIT6_IMM(0xc0010000, REG_W0, imm);
887 		/* stg %w0,off(%dst) */
888 		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
889 		jit->seen |= SEEN_MEM;
890 		break;
891 	/*
892 	 * BPF_STX XADD (atomic_add)
893 	 */
894 	case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
895 		/* laal %w0,%src,off(%dst) */
896 		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg,
897 			      dst_reg, off);
898 		jit->seen |= SEEN_MEM;
899 		break;
900 	case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
901 		/* laalg %w0,%src,off(%dst) */
902 		EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg,
903 			      dst_reg, off);
904 		jit->seen |= SEEN_MEM;
905 		break;
906 	/*
907 	 * BPF_LDX
908 	 */
909 	case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
910 		/* llgc %dst,0(off,%src) */
911 		EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
912 		jit->seen |= SEEN_MEM;
913 		break;
914 	case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
915 		/* llgh %dst,0(off,%src) */
916 		EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
917 		jit->seen |= SEEN_MEM;
918 		break;
919 	case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
920 		/* llgf %dst,off(%src) */
921 		jit->seen |= SEEN_MEM;
922 		EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
923 		break;
924 	case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
925 		/* lg %dst,0(off,%src) */
926 		jit->seen |= SEEN_MEM;
927 		EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
928 		break;
929 	/*
930 	 * BPF_JMP / CALL
931 	 */
932 	case BPF_JMP | BPF_CALL:
933 	{
934 		/*
935 		 * b0 = (__bpf_call_base + imm)(b1, b2, b3, b4, b5)
936 		 */
937 		const u64 func = (u64)__bpf_call_base + imm;
938 
939 		REG_SET_SEEN(BPF_REG_5);
940 		jit->seen |= SEEN_FUNC;
941 		/* lg %w1,<d(imm)>(%l) */
942 		EMIT6_DISP(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
943 			   EMIT_CONST_U64(func));
944 		/* basr %r14,%w1 */
945 		EMIT2(0x0d00, REG_14, REG_W1);
946 		/* lgr %b0,%r2: load return value into %b0 */
947 		EMIT4(0xb9040000, BPF_REG_0, REG_2);
948 		break;
949 	}
950 	case BPF_JMP | BPF_EXIT: /* return b0 */
951 		last = (i == fp->len - 1) ? 1 : 0;
952 		if (last && !(jit->seen & SEEN_RET0))
953 			break;
954 		/* j <exit> */
955 		EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
956 		break;
957 	/*
958 	 * Branch relative (number of skipped instructions) to offset on
959 	 * condition.
960 	 *
961 	 * Condition code to mask mapping:
962 	 *
963 	 * CC | Description	   | Mask
964 	 * ------------------------------
965 	 * 0  | Operands equal	   |	8
966 	 * 1  | First operand low  |	4
967 	 * 2  | First operand high |	2
968 	 * 3  | Unused		   |	1
969 	 *
970 	 * For s390x relative branches: ip = ip + off_bytes
971 	 * For BPF relative branches:	insn = insn + off_insns + 1
972 	 *
973 	 * For example for s390x with offset 0 we jump to the branch
974 	 * instruction itself (loop) and for BPF with offset 0 we
975 	 * branch to the instruction behind the branch.
976 	 */
977 	case BPF_JMP | BPF_JA: /* if (true) */
978 		mask = 0xf000; /* j */
979 		goto branch_oc;
980 	case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
981 		mask = 0x2000; /* jh */
982 		goto branch_ks;
983 	case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
984 		mask = 0xa000; /* jhe */
985 		goto branch_ks;
986 	case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
987 		mask = 0x2000; /* jh */
988 		goto branch_ku;
989 	case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
990 		mask = 0xa000; /* jhe */
991 		goto branch_ku;
992 	case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
993 		mask = 0x7000; /* jne */
994 		goto branch_ku;
995 	case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
996 		mask = 0x8000; /* je */
997 		goto branch_ku;
998 	case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
999 		mask = 0x7000; /* jnz */
1000 		/* lgfi %w1,imm (load sign extend imm) */
1001 		EMIT6_IMM(0xc0010000, REG_W1, imm);
1002 		/* ngr %w1,%dst */
1003 		EMIT4(0xb9800000, REG_W1, dst_reg);
1004 		goto branch_oc;
1005 
1006 	case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
1007 		mask = 0x2000; /* jh */
1008 		goto branch_xs;
1009 	case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
1010 		mask = 0xa000; /* jhe */
1011 		goto branch_xs;
1012 	case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
1013 		mask = 0x2000; /* jh */
1014 		goto branch_xu;
1015 	case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
1016 		mask = 0xa000; /* jhe */
1017 		goto branch_xu;
1018 	case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
1019 		mask = 0x7000; /* jne */
1020 		goto branch_xu;
1021 	case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
1022 		mask = 0x8000; /* je */
1023 		goto branch_xu;
1024 	case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
1025 		mask = 0x7000; /* jnz */
1026 		/* ngrk %w1,%dst,%src */
1027 		EMIT4_RRF(0xb9e40000, REG_W1, dst_reg, src_reg);
1028 		goto branch_oc;
1029 branch_ks:
1030 		/* lgfi %w1,imm (load sign extend imm) */
1031 		EMIT6_IMM(0xc0010000, REG_W1, imm);
1032 		/* cgrj %dst,%w1,mask,off */
1033 		EMIT6_PCREL(0xec000000, 0x0064, dst_reg, REG_W1, i, off, mask);
1034 		break;
1035 branch_ku:
1036 		/* lgfi %w1,imm (load sign extend imm) */
1037 		EMIT6_IMM(0xc0010000, REG_W1, imm);
1038 		/* clgrj %dst,%w1,mask,off */
1039 		EMIT6_PCREL(0xec000000, 0x0065, dst_reg, REG_W1, i, off, mask);
1040 		break;
1041 branch_xs:
1042 		/* cgrj %dst,%src,mask,off */
1043 		EMIT6_PCREL(0xec000000, 0x0064, dst_reg, src_reg, i, off, mask);
1044 		break;
1045 branch_xu:
1046 		/* clgrj %dst,%src,mask,off */
1047 		EMIT6_PCREL(0xec000000, 0x0065, dst_reg, src_reg, i, off, mask);
1048 		break;
1049 branch_oc:
1050 		/* brc mask,jmp_off (branch instruction needs 4 bytes) */
1051 		jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4);
1052 		EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off);
1053 		break;
1054 	/*
1055 	 * BPF_LD
1056 	 */
1057 	case BPF_LD | BPF_ABS | BPF_B: /* b0 = *(u8 *) (skb->data+imm) */
1058 	case BPF_LD | BPF_IND | BPF_B: /* b0 = *(u8 *) (skb->data+imm+src) */
1059 		if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1060 			func_addr = __pa(sk_load_byte_pos);
1061 		else
1062 			func_addr = __pa(sk_load_byte);
1063 		goto call_fn;
1064 	case BPF_LD | BPF_ABS | BPF_H: /* b0 = *(u16 *) (skb->data+imm) */
1065 	case BPF_LD | BPF_IND | BPF_H: /* b0 = *(u16 *) (skb->data+imm+src) */
1066 		if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1067 			func_addr = __pa(sk_load_half_pos);
1068 		else
1069 			func_addr = __pa(sk_load_half);
1070 		goto call_fn;
1071 	case BPF_LD | BPF_ABS | BPF_W: /* b0 = *(u32 *) (skb->data+imm) */
1072 	case BPF_LD | BPF_IND | BPF_W: /* b0 = *(u32 *) (skb->data+imm+src) */
1073 		if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1074 			func_addr = __pa(sk_load_word_pos);
1075 		else
1076 			func_addr = __pa(sk_load_word);
1077 		goto call_fn;
1078 call_fn:
1079 		jit->seen |= SEEN_SKB | SEEN_RET0 | SEEN_FUNC;
1080 		REG_SET_SEEN(REG_14); /* Return address of possible func call */
1081 
1082 		/*
1083 		 * Implicit input:
1084 		 *  BPF_REG_6	 (R7) : skb pointer
1085 		 *  REG_SKB_DATA (R12): skb data pointer
1086 		 *
1087 		 * Calculated input:
1088 		 *  BPF_REG_2	 (R3) : offset of byte(s) to fetch in skb
1089 		 *  BPF_REG_5	 (R6) : return address
1090 		 *
1091 		 * Output:
1092 		 *  BPF_REG_0	 (R14): data read from skb
1093 		 *
1094 		 * Scratch registers (BPF_REG_1-5)
1095 		 */
1096 
1097 		/* Call function: llilf %w1,func_addr  */
1098 		EMIT6_IMM(0xc00f0000, REG_W1, func_addr);
1099 
1100 		/* Offset: lgfi %b2,imm */
1101 		EMIT6_IMM(0xc0010000, BPF_REG_2, imm);
1102 		if (BPF_MODE(insn->code) == BPF_IND)
1103 			/* agfr %b2,%src (%src is s32 here) */
1104 			EMIT4(0xb9180000, BPF_REG_2, src_reg);
1105 
1106 		/* basr %b5,%w1 (%b5 is call saved) */
1107 		EMIT2(0x0d00, BPF_REG_5, REG_W1);
1108 
1109 		/*
1110 		 * Note: For fast access we jump directly after the
1111 		 * jnz instruction from bpf_jit.S
1112 		 */
1113 		/* jnz <ret0> */
1114 		EMIT4_PCREL(0xa7740000, jit->ret0_ip - jit->prg);
1115 		break;
1116 	default: /* too complex, give up */
1117 		pr_err("Unknown opcode %02x\n", insn->code);
1118 		return -1;
1119 	}
1120 	return insn_count;
1121 }
1122 
1123 /*
1124  * Compile eBPF program into s390x code
1125  */
1126 static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
1127 {
1128 	int i, insn_count;
1129 
1130 	jit->lit = jit->lit_start;
1131 	jit->prg = 0;
1132 
1133 	bpf_jit_prologue(jit);
1134 	for (i = 0; i < fp->len; i += insn_count) {
1135 		insn_count = bpf_jit_insn(jit, fp, i);
1136 		if (insn_count < 0)
1137 			return -1;
1138 		jit->addrs[i + 1] = jit->prg; /* Next instruction address */
1139 	}
1140 	bpf_jit_epilogue(jit);
1141 
1142 	jit->lit_start = jit->prg;
1143 	jit->size = jit->lit;
1144 	jit->size_prg = jit->prg;
1145 	return 0;
1146 }
1147 
1148 /*
1149  * Classic BPF function stub. BPF programs will be converted into
1150  * eBPF and then bpf_int_jit_compile() will be called.
1151  */
1152 void bpf_jit_compile(struct bpf_prog *fp)
1153 {
1154 }
1155 
1156 /*
1157  * Compile eBPF program "fp"
1158  */
1159 void bpf_int_jit_compile(struct bpf_prog *fp)
1160 {
1161 	struct bpf_binary_header *header;
1162 	struct bpf_jit jit;
1163 	int pass;
1164 
1165 	if (!bpf_jit_enable)
1166 		return;
1167 	memset(&jit, 0, sizeof(jit));
1168 	jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
1169 	if (jit.addrs == NULL)
1170 		return;
1171 	/*
1172 	 * Three initial passes:
1173 	 *   - 1/2: Determine clobbered registers
1174 	 *   - 3:   Calculate program size and addrs arrray
1175 	 */
1176 	for (pass = 1; pass <= 3; pass++) {
1177 		if (bpf_jit_prog(&jit, fp))
1178 			goto free_addrs;
1179 	}
1180 	/*
1181 	 * Final pass: Allocate and generate program
1182 	 */
1183 	if (jit.size >= BPF_SIZE_MAX)
1184 		goto free_addrs;
1185 	header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole);
1186 	if (!header)
1187 		goto free_addrs;
1188 	if (bpf_jit_prog(&jit, fp))
1189 		goto free_addrs;
1190 	if (bpf_jit_enable > 1) {
1191 		bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
1192 		if (jit.prg_buf)
1193 			print_fn_code(jit.prg_buf, jit.size_prg);
1194 	}
1195 	if (jit.prg_buf) {
1196 		set_memory_ro((unsigned long)header, header->pages);
1197 		fp->bpf_func = (void *) jit.prg_buf;
1198 		fp->jited = true;
1199 	}
1200 free_addrs:
1201 	kfree(jit.addrs);
1202 }
1203 
1204 /*
1205  * Free eBPF program
1206  */
1207 void bpf_jit_free(struct bpf_prog *fp)
1208 {
1209 	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1210 	struct bpf_binary_header *header = (void *)addr;
1211 
1212 	if (!fp->jited)
1213 		goto free_filter;
1214 
1215 	set_memory_rw(addr, header->pages);
1216 	bpf_jit_binary_free(header);
1217 
1218 free_filter:
1219 	bpf_prog_unlock_free(fp);
1220 }
1221