1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37 #include <linux/nospec.h>
38 #include <linux/bpf_mem_alloc.h>
39 #include <linux/memcontrol.h>
40
41 #include <asm/barrier.h>
42 #include <asm/unaligned.h>
43
44 /* Registers */
45 #define BPF_R0 regs[BPF_REG_0]
46 #define BPF_R1 regs[BPF_REG_1]
47 #define BPF_R2 regs[BPF_REG_2]
48 #define BPF_R3 regs[BPF_REG_3]
49 #define BPF_R4 regs[BPF_REG_4]
50 #define BPF_R5 regs[BPF_REG_5]
51 #define BPF_R6 regs[BPF_REG_6]
52 #define BPF_R7 regs[BPF_REG_7]
53 #define BPF_R8 regs[BPF_REG_8]
54 #define BPF_R9 regs[BPF_REG_9]
55 #define BPF_R10 regs[BPF_REG_10]
56
57 /* Named registers */
58 #define DST regs[insn->dst_reg]
59 #define SRC regs[insn->src_reg]
60 #define FP regs[BPF_REG_FP]
61 #define AX regs[BPF_REG_AX]
62 #define ARG1 regs[BPF_REG_ARG1]
63 #define CTX regs[BPF_REG_CTX]
64 #define OFF insn->off
65 #define IMM insn->imm
66
67 struct bpf_mem_alloc bpf_global_ma;
68 bool bpf_global_ma_set;
69
70 /* No hurry in this branch
71 *
72 * Exported for the bpf jit load helper.
73 */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)74 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
75 {
76 u8 *ptr = NULL;
77
78 if (k >= SKF_NET_OFF) {
79 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
80 } else if (k >= SKF_LL_OFF) {
81 if (unlikely(!skb_mac_header_was_set(skb)))
82 return NULL;
83 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
84 }
85 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
86 return ptr;
87
88 return NULL;
89 }
90
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)91 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
92 {
93 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
94 struct bpf_prog_aux *aux;
95 struct bpf_prog *fp;
96
97 size = round_up(size, PAGE_SIZE);
98 fp = __vmalloc(size, gfp_flags);
99 if (fp == NULL)
100 return NULL;
101
102 aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
103 if (aux == NULL) {
104 vfree(fp);
105 return NULL;
106 }
107 fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
108 if (!fp->active) {
109 vfree(fp);
110 kfree(aux);
111 return NULL;
112 }
113
114 fp->pages = size / PAGE_SIZE;
115 fp->aux = aux;
116 fp->aux->prog = fp;
117 fp->jit_requested = ebpf_jit_enabled();
118 fp->blinding_requested = bpf_jit_blinding_enabled(fp);
119 #ifdef CONFIG_CGROUP_BPF
120 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
121 #endif
122
123 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
124 mutex_init(&fp->aux->used_maps_mutex);
125 mutex_init(&fp->aux->dst_mutex);
126
127 return fp;
128 }
129
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)130 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
131 {
132 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
133 struct bpf_prog *prog;
134 int cpu;
135
136 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
137 if (!prog)
138 return NULL;
139
140 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
141 if (!prog->stats) {
142 free_percpu(prog->active);
143 kfree(prog->aux);
144 vfree(prog);
145 return NULL;
146 }
147
148 for_each_possible_cpu(cpu) {
149 struct bpf_prog_stats *pstats;
150
151 pstats = per_cpu_ptr(prog->stats, cpu);
152 u64_stats_init(&pstats->syncp);
153 }
154 return prog;
155 }
156 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
157
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)158 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
159 {
160 if (!prog->aux->nr_linfo || !prog->jit_requested)
161 return 0;
162
163 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
164 sizeof(*prog->aux->jited_linfo),
165 bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
166 if (!prog->aux->jited_linfo)
167 return -ENOMEM;
168
169 return 0;
170 }
171
bpf_prog_jit_attempt_done(struct bpf_prog * prog)172 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
173 {
174 if (prog->aux->jited_linfo &&
175 (!prog->jited || !prog->aux->jited_linfo[0])) {
176 kvfree(prog->aux->jited_linfo);
177 prog->aux->jited_linfo = NULL;
178 }
179
180 kfree(prog->aux->kfunc_tab);
181 prog->aux->kfunc_tab = NULL;
182 }
183
184 /* The jit engine is responsible to provide an array
185 * for insn_off to the jited_off mapping (insn_to_jit_off).
186 *
187 * The idx to this array is the insn_off. Hence, the insn_off
188 * here is relative to the prog itself instead of the main prog.
189 * This array has one entry for each xlated bpf insn.
190 *
191 * jited_off is the byte off to the end of the jited insn.
192 *
193 * Hence, with
194 * insn_start:
195 * The first bpf insn off of the prog. The insn off
196 * here is relative to the main prog.
197 * e.g. if prog is a subprog, insn_start > 0
198 * linfo_idx:
199 * The prog's idx to prog->aux->linfo and jited_linfo
200 *
201 * jited_linfo[linfo_idx] = prog->bpf_func
202 *
203 * For i > linfo_idx,
204 *
205 * jited_linfo[i] = prog->bpf_func +
206 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
207 */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)208 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
209 const u32 *insn_to_jit_off)
210 {
211 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
212 const struct bpf_line_info *linfo;
213 void **jited_linfo;
214
215 if (!prog->aux->jited_linfo)
216 /* Userspace did not provide linfo */
217 return;
218
219 linfo_idx = prog->aux->linfo_idx;
220 linfo = &prog->aux->linfo[linfo_idx];
221 insn_start = linfo[0].insn_off;
222 insn_end = insn_start + prog->len;
223
224 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
225 jited_linfo[0] = prog->bpf_func;
226
227 nr_linfo = prog->aux->nr_linfo - linfo_idx;
228
229 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
230 /* The verifier ensures that linfo[i].insn_off is
231 * strictly increasing
232 */
233 jited_linfo[i] = prog->bpf_func +
234 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
235 }
236
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)237 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
238 gfp_t gfp_extra_flags)
239 {
240 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
241 struct bpf_prog *fp;
242 u32 pages;
243
244 size = round_up(size, PAGE_SIZE);
245 pages = size / PAGE_SIZE;
246 if (pages <= fp_old->pages)
247 return fp_old;
248
249 fp = __vmalloc(size, gfp_flags);
250 if (fp) {
251 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
252 fp->pages = pages;
253 fp->aux->prog = fp;
254
255 /* We keep fp->aux from fp_old around in the new
256 * reallocated structure.
257 */
258 fp_old->aux = NULL;
259 fp_old->stats = NULL;
260 fp_old->active = NULL;
261 __bpf_prog_free(fp_old);
262 }
263
264 return fp;
265 }
266
__bpf_prog_free(struct bpf_prog * fp)267 void __bpf_prog_free(struct bpf_prog *fp)
268 {
269 if (fp->aux) {
270 mutex_destroy(&fp->aux->used_maps_mutex);
271 mutex_destroy(&fp->aux->dst_mutex);
272 kfree(fp->aux->poke_tab);
273 kfree(fp->aux);
274 }
275 free_percpu(fp->stats);
276 free_percpu(fp->active);
277 vfree(fp);
278 }
279
bpf_prog_calc_tag(struct bpf_prog * fp)280 int bpf_prog_calc_tag(struct bpf_prog *fp)
281 {
282 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
283 u32 raw_size = bpf_prog_tag_scratch_size(fp);
284 u32 digest[SHA1_DIGEST_WORDS];
285 u32 ws[SHA1_WORKSPACE_WORDS];
286 u32 i, bsize, psize, blocks;
287 struct bpf_insn *dst;
288 bool was_ld_map;
289 u8 *raw, *todo;
290 __be32 *result;
291 __be64 *bits;
292
293 raw = vmalloc(raw_size);
294 if (!raw)
295 return -ENOMEM;
296
297 sha1_init(digest);
298 memset(ws, 0, sizeof(ws));
299
300 /* We need to take out the map fd for the digest calculation
301 * since they are unstable from user space side.
302 */
303 dst = (void *)raw;
304 for (i = 0, was_ld_map = false; i < fp->len; i++) {
305 dst[i] = fp->insnsi[i];
306 if (!was_ld_map &&
307 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
308 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
309 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
310 was_ld_map = true;
311 dst[i].imm = 0;
312 } else if (was_ld_map &&
313 dst[i].code == 0 &&
314 dst[i].dst_reg == 0 &&
315 dst[i].src_reg == 0 &&
316 dst[i].off == 0) {
317 was_ld_map = false;
318 dst[i].imm = 0;
319 } else {
320 was_ld_map = false;
321 }
322 }
323
324 psize = bpf_prog_insn_size(fp);
325 memset(&raw[psize], 0, raw_size - psize);
326 raw[psize++] = 0x80;
327
328 bsize = round_up(psize, SHA1_BLOCK_SIZE);
329 blocks = bsize / SHA1_BLOCK_SIZE;
330 todo = raw;
331 if (bsize - psize >= sizeof(__be64)) {
332 bits = (__be64 *)(todo + bsize - sizeof(__be64));
333 } else {
334 bits = (__be64 *)(todo + bsize + bits_offset);
335 blocks++;
336 }
337 *bits = cpu_to_be64((psize - 1) << 3);
338
339 while (blocks--) {
340 sha1_transform(digest, todo, ws);
341 todo += SHA1_BLOCK_SIZE;
342 }
343
344 result = (__force __be32 *)digest;
345 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
346 result[i] = cpu_to_be32(digest[i]);
347 memcpy(fp->tag, result, sizeof(fp->tag));
348
349 vfree(raw);
350 return 0;
351 }
352
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)353 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
354 s32 end_new, s32 curr, const bool probe_pass)
355 {
356 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
357 s32 delta = end_new - end_old;
358 s64 imm = insn->imm;
359
360 if (curr < pos && curr + imm + 1 >= end_old)
361 imm += delta;
362 else if (curr >= end_new && curr + imm + 1 < end_new)
363 imm -= delta;
364 if (imm < imm_min || imm > imm_max)
365 return -ERANGE;
366 if (!probe_pass)
367 insn->imm = imm;
368 return 0;
369 }
370
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)371 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
372 s32 end_new, s32 curr, const bool probe_pass)
373 {
374 s64 off_min, off_max, off;
375 s32 delta = end_new - end_old;
376
377 if (insn->code == (BPF_JMP32 | BPF_JA)) {
378 off = insn->imm;
379 off_min = S32_MIN;
380 off_max = S32_MAX;
381 } else {
382 off = insn->off;
383 off_min = S16_MIN;
384 off_max = S16_MAX;
385 }
386
387 if (curr < pos && curr + off + 1 >= end_old)
388 off += delta;
389 else if (curr >= end_new && curr + off + 1 < end_new)
390 off -= delta;
391 if (off < off_min || off > off_max)
392 return -ERANGE;
393 if (!probe_pass) {
394 if (insn->code == (BPF_JMP32 | BPF_JA))
395 insn->imm = off;
396 else
397 insn->off = off;
398 }
399 return 0;
400 }
401
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)402 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
403 s32 end_new, const bool probe_pass)
404 {
405 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
406 struct bpf_insn *insn = prog->insnsi;
407 int ret = 0;
408
409 for (i = 0; i < insn_cnt; i++, insn++) {
410 u8 code;
411
412 /* In the probing pass we still operate on the original,
413 * unpatched image in order to check overflows before we
414 * do any other adjustments. Therefore skip the patchlet.
415 */
416 if (probe_pass && i == pos) {
417 i = end_new;
418 insn = prog->insnsi + end_old;
419 }
420 if (bpf_pseudo_func(insn)) {
421 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
422 end_new, i, probe_pass);
423 if (ret)
424 return ret;
425 continue;
426 }
427 code = insn->code;
428 if ((BPF_CLASS(code) != BPF_JMP &&
429 BPF_CLASS(code) != BPF_JMP32) ||
430 BPF_OP(code) == BPF_EXIT)
431 continue;
432 /* Adjust offset of jmps if we cross patch boundaries. */
433 if (BPF_OP(code) == BPF_CALL) {
434 if (insn->src_reg != BPF_PSEUDO_CALL)
435 continue;
436 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
437 end_new, i, probe_pass);
438 } else {
439 ret = bpf_adj_delta_to_off(insn, pos, end_old,
440 end_new, i, probe_pass);
441 }
442 if (ret)
443 break;
444 }
445
446 return ret;
447 }
448
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)449 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
450 {
451 struct bpf_line_info *linfo;
452 u32 i, nr_linfo;
453
454 nr_linfo = prog->aux->nr_linfo;
455 if (!nr_linfo || !delta)
456 return;
457
458 linfo = prog->aux->linfo;
459
460 for (i = 0; i < nr_linfo; i++)
461 if (off < linfo[i].insn_off)
462 break;
463
464 /* Push all off < linfo[i].insn_off by delta */
465 for (; i < nr_linfo; i++)
466 linfo[i].insn_off += delta;
467 }
468
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)469 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
470 const struct bpf_insn *patch, u32 len)
471 {
472 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
473 const u32 cnt_max = S16_MAX;
474 struct bpf_prog *prog_adj;
475 int err;
476
477 /* Since our patchlet doesn't expand the image, we're done. */
478 if (insn_delta == 0) {
479 memcpy(prog->insnsi + off, patch, sizeof(*patch));
480 return prog;
481 }
482
483 insn_adj_cnt = prog->len + insn_delta;
484
485 /* Reject anything that would potentially let the insn->off
486 * target overflow when we have excessive program expansions.
487 * We need to probe here before we do any reallocation where
488 * we afterwards may not fail anymore.
489 */
490 if (insn_adj_cnt > cnt_max &&
491 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
492 return ERR_PTR(err);
493
494 /* Several new instructions need to be inserted. Make room
495 * for them. Likely, there's no need for a new allocation as
496 * last page could have large enough tailroom.
497 */
498 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
499 GFP_USER);
500 if (!prog_adj)
501 return ERR_PTR(-ENOMEM);
502
503 prog_adj->len = insn_adj_cnt;
504
505 /* Patching happens in 3 steps:
506 *
507 * 1) Move over tail of insnsi from next instruction onwards,
508 * so we can patch the single target insn with one or more
509 * new ones (patching is always from 1 to n insns, n > 0).
510 * 2) Inject new instructions at the target location.
511 * 3) Adjust branch offsets if necessary.
512 */
513 insn_rest = insn_adj_cnt - off - len;
514
515 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
516 sizeof(*patch) * insn_rest);
517 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
518
519 /* We are guaranteed to not fail at this point, otherwise
520 * the ship has sailed to reverse to the original state. An
521 * overflow cannot happen at this point.
522 */
523 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
524
525 bpf_adj_linfo(prog_adj, off, insn_delta);
526
527 return prog_adj;
528 }
529
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)530 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
531 {
532 int err;
533
534 /* Branch offsets can't overflow when program is shrinking, no need
535 * to call bpf_adj_branches(..., true) here
536 */
537 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
538 sizeof(struct bpf_insn) * (prog->len - off - cnt));
539 prog->len -= cnt;
540
541 err = bpf_adj_branches(prog, off, off + cnt, off, false);
542 WARN_ON_ONCE(err);
543 return err;
544 }
545
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)546 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
547 {
548 int i;
549
550 for (i = 0; i < fp->aux->func_cnt; i++)
551 bpf_prog_kallsyms_del(fp->aux->func[i]);
552 }
553
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)554 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
555 {
556 bpf_prog_kallsyms_del_subprogs(fp);
557 bpf_prog_kallsyms_del(fp);
558 }
559
560 #ifdef CONFIG_BPF_JIT
561 /* All BPF JIT sysctl knobs here. */
562 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
563 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
564 int bpf_jit_harden __read_mostly;
565 long bpf_jit_limit __read_mostly;
566 long bpf_jit_limit_max __read_mostly;
567
568 static void
bpf_prog_ksym_set_addr(struct bpf_prog * prog)569 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
570 {
571 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
572
573 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
574 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len;
575 }
576
577 static void
bpf_prog_ksym_set_name(struct bpf_prog * prog)578 bpf_prog_ksym_set_name(struct bpf_prog *prog)
579 {
580 char *sym = prog->aux->ksym.name;
581 const char *end = sym + KSYM_NAME_LEN;
582 const struct btf_type *type;
583 const char *func_name;
584
585 BUILD_BUG_ON(sizeof("bpf_prog_") +
586 sizeof(prog->tag) * 2 +
587 /* name has been null terminated.
588 * We should need +1 for the '_' preceding
589 * the name. However, the null character
590 * is double counted between the name and the
591 * sizeof("bpf_prog_") above, so we omit
592 * the +1 here.
593 */
594 sizeof(prog->aux->name) > KSYM_NAME_LEN);
595
596 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
597 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
598
599 /* prog->aux->name will be ignored if full btf name is available */
600 if (prog->aux->func_info_cnt) {
601 type = btf_type_by_id(prog->aux->btf,
602 prog->aux->func_info[prog->aux->func_idx].type_id);
603 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
604 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
605 return;
606 }
607
608 if (prog->aux->name[0])
609 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
610 else
611 *sym = 0;
612 }
613
bpf_get_ksym_start(struct latch_tree_node * n)614 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
615 {
616 return container_of(n, struct bpf_ksym, tnode)->start;
617 }
618
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)619 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
620 struct latch_tree_node *b)
621 {
622 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
623 }
624
bpf_tree_comp(void * key,struct latch_tree_node * n)625 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
626 {
627 unsigned long val = (unsigned long)key;
628 const struct bpf_ksym *ksym;
629
630 ksym = container_of(n, struct bpf_ksym, tnode);
631
632 if (val < ksym->start)
633 return -1;
634 /* Ensure that we detect return addresses as part of the program, when
635 * the final instruction is a call for a program part of the stack
636 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
637 */
638 if (val > ksym->end)
639 return 1;
640
641 return 0;
642 }
643
644 static const struct latch_tree_ops bpf_tree_ops = {
645 .less = bpf_tree_less,
646 .comp = bpf_tree_comp,
647 };
648
649 static DEFINE_SPINLOCK(bpf_lock);
650 static LIST_HEAD(bpf_kallsyms);
651 static struct latch_tree_root bpf_tree __cacheline_aligned;
652
bpf_ksym_add(struct bpf_ksym * ksym)653 void bpf_ksym_add(struct bpf_ksym *ksym)
654 {
655 spin_lock_bh(&bpf_lock);
656 WARN_ON_ONCE(!list_empty(&ksym->lnode));
657 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
658 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
659 spin_unlock_bh(&bpf_lock);
660 }
661
__bpf_ksym_del(struct bpf_ksym * ksym)662 static void __bpf_ksym_del(struct bpf_ksym *ksym)
663 {
664 if (list_empty(&ksym->lnode))
665 return;
666
667 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
668 list_del_rcu(&ksym->lnode);
669 }
670
bpf_ksym_del(struct bpf_ksym * ksym)671 void bpf_ksym_del(struct bpf_ksym *ksym)
672 {
673 spin_lock_bh(&bpf_lock);
674 __bpf_ksym_del(ksym);
675 spin_unlock_bh(&bpf_lock);
676 }
677
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)678 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
679 {
680 return fp->jited && !bpf_prog_was_classic(fp);
681 }
682
bpf_prog_kallsyms_add(struct bpf_prog * fp)683 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
684 {
685 if (!bpf_prog_kallsyms_candidate(fp) ||
686 !bpf_capable())
687 return;
688
689 bpf_prog_ksym_set_addr(fp);
690 bpf_prog_ksym_set_name(fp);
691 fp->aux->ksym.prog = true;
692
693 bpf_ksym_add(&fp->aux->ksym);
694 }
695
bpf_prog_kallsyms_del(struct bpf_prog * fp)696 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
697 {
698 if (!bpf_prog_kallsyms_candidate(fp))
699 return;
700
701 bpf_ksym_del(&fp->aux->ksym);
702 }
703
bpf_ksym_find(unsigned long addr)704 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
705 {
706 struct latch_tree_node *n;
707
708 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
709 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
710 }
711
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)712 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
713 unsigned long *off, char *sym)
714 {
715 struct bpf_ksym *ksym;
716 char *ret = NULL;
717
718 rcu_read_lock();
719 ksym = bpf_ksym_find(addr);
720 if (ksym) {
721 unsigned long symbol_start = ksym->start;
722 unsigned long symbol_end = ksym->end;
723
724 strncpy(sym, ksym->name, KSYM_NAME_LEN);
725
726 ret = sym;
727 if (size)
728 *size = symbol_end - symbol_start;
729 if (off)
730 *off = addr - symbol_start;
731 }
732 rcu_read_unlock();
733
734 return ret;
735 }
736
is_bpf_text_address(unsigned long addr)737 bool is_bpf_text_address(unsigned long addr)
738 {
739 bool ret;
740
741 rcu_read_lock();
742 ret = bpf_ksym_find(addr) != NULL;
743 rcu_read_unlock();
744
745 return ret;
746 }
747
bpf_prog_ksym_find(unsigned long addr)748 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
749 {
750 struct bpf_ksym *ksym = bpf_ksym_find(addr);
751
752 return ksym && ksym->prog ?
753 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
754 NULL;
755 }
756
search_bpf_extables(unsigned long addr)757 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
758 {
759 const struct exception_table_entry *e = NULL;
760 struct bpf_prog *prog;
761
762 rcu_read_lock();
763 prog = bpf_prog_ksym_find(addr);
764 if (!prog)
765 goto out;
766 if (!prog->aux->num_exentries)
767 goto out;
768
769 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
770 out:
771 rcu_read_unlock();
772 return e;
773 }
774
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)775 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
776 char *sym)
777 {
778 struct bpf_ksym *ksym;
779 unsigned int it = 0;
780 int ret = -ERANGE;
781
782 if (!bpf_jit_kallsyms_enabled())
783 return ret;
784
785 rcu_read_lock();
786 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
787 if (it++ != symnum)
788 continue;
789
790 strncpy(sym, ksym->name, KSYM_NAME_LEN);
791
792 *value = ksym->start;
793 *type = BPF_SYM_ELF_TYPE;
794
795 ret = 0;
796 break;
797 }
798 rcu_read_unlock();
799
800 return ret;
801 }
802
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)803 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
804 struct bpf_jit_poke_descriptor *poke)
805 {
806 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
807 static const u32 poke_tab_max = 1024;
808 u32 slot = prog->aux->size_poke_tab;
809 u32 size = slot + 1;
810
811 if (size > poke_tab_max)
812 return -ENOSPC;
813 if (poke->tailcall_target || poke->tailcall_target_stable ||
814 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
815 return -EINVAL;
816
817 switch (poke->reason) {
818 case BPF_POKE_REASON_TAIL_CALL:
819 if (!poke->tail_call.map)
820 return -EINVAL;
821 break;
822 default:
823 return -EINVAL;
824 }
825
826 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
827 if (!tab)
828 return -ENOMEM;
829
830 memcpy(&tab[slot], poke, sizeof(*poke));
831 prog->aux->size_poke_tab = size;
832 prog->aux->poke_tab = tab;
833
834 return slot;
835 }
836
837 /*
838 * BPF program pack allocator.
839 *
840 * Most BPF programs are pretty small. Allocating a hole page for each
841 * program is sometime a waste. Many small bpf program also adds pressure
842 * to instruction TLB. To solve this issue, we introduce a BPF program pack
843 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
844 * to host BPF programs.
845 */
846 #define BPF_PROG_CHUNK_SHIFT 6
847 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
848 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
849
850 struct bpf_prog_pack {
851 struct list_head list;
852 void *ptr;
853 unsigned long bitmap[];
854 };
855
bpf_jit_fill_hole_with_zero(void * area,unsigned int size)856 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
857 {
858 memset(area, 0, size);
859 }
860
861 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
862
863 static DEFINE_MUTEX(pack_mutex);
864 static LIST_HEAD(pack_list);
865
866 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
867 * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
868 */
869 #ifdef PMD_SIZE
870 /* PMD_SIZE is really big for some archs. It doesn't make sense to
871 * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
872 * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
873 * greater than or equal to 2MB.
874 */
875 #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
876 #else
877 #define BPF_PROG_PACK_SIZE PAGE_SIZE
878 #endif
879
880 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
881
alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)882 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
883 {
884 struct bpf_prog_pack *pack;
885
886 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
887 GFP_KERNEL);
888 if (!pack)
889 return NULL;
890 pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
891 if (!pack->ptr) {
892 kfree(pack);
893 return NULL;
894 }
895 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
896 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
897 list_add_tail(&pack->list, &pack_list);
898
899 set_vm_flush_reset_perms(pack->ptr);
900 set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
901 return pack;
902 }
903
bpf_prog_pack_alloc(u32 size,bpf_jit_fill_hole_t bpf_fill_ill_insns)904 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
905 {
906 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
907 struct bpf_prog_pack *pack;
908 unsigned long pos;
909 void *ptr = NULL;
910
911 mutex_lock(&pack_mutex);
912 if (size > BPF_PROG_PACK_SIZE) {
913 size = round_up(size, PAGE_SIZE);
914 ptr = bpf_jit_alloc_exec(size);
915 if (ptr) {
916 bpf_fill_ill_insns(ptr, size);
917 set_vm_flush_reset_perms(ptr);
918 set_memory_rox((unsigned long)ptr, size / PAGE_SIZE);
919 }
920 goto out;
921 }
922 list_for_each_entry(pack, &pack_list, list) {
923 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
924 nbits, 0);
925 if (pos < BPF_PROG_CHUNK_COUNT)
926 goto found_free_area;
927 }
928
929 pack = alloc_new_pack(bpf_fill_ill_insns);
930 if (!pack)
931 goto out;
932
933 pos = 0;
934
935 found_free_area:
936 bitmap_set(pack->bitmap, pos, nbits);
937 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
938
939 out:
940 mutex_unlock(&pack_mutex);
941 return ptr;
942 }
943
bpf_prog_pack_free(struct bpf_binary_header * hdr)944 void bpf_prog_pack_free(struct bpf_binary_header *hdr)
945 {
946 struct bpf_prog_pack *pack = NULL, *tmp;
947 unsigned int nbits;
948 unsigned long pos;
949
950 mutex_lock(&pack_mutex);
951 if (hdr->size > BPF_PROG_PACK_SIZE) {
952 bpf_jit_free_exec(hdr);
953 goto out;
954 }
955
956 list_for_each_entry(tmp, &pack_list, list) {
957 if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void *)hdr) {
958 pack = tmp;
959 break;
960 }
961 }
962
963 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
964 goto out;
965
966 nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
967 pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
968
969 WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
970 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
971
972 bitmap_clear(pack->bitmap, pos, nbits);
973 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
974 BPF_PROG_CHUNK_COUNT, 0) == 0) {
975 list_del(&pack->list);
976 bpf_jit_free_exec(pack->ptr);
977 kfree(pack);
978 }
979 out:
980 mutex_unlock(&pack_mutex);
981 }
982
983 static atomic_long_t bpf_jit_current;
984
985 /* Can be overridden by an arch's JIT compiler if it has a custom,
986 * dedicated BPF backend memory area, or if neither of the two
987 * below apply.
988 */
bpf_jit_alloc_exec_limit(void)989 u64 __weak bpf_jit_alloc_exec_limit(void)
990 {
991 #if defined(MODULES_VADDR)
992 return MODULES_END - MODULES_VADDR;
993 #else
994 return VMALLOC_END - VMALLOC_START;
995 #endif
996 }
997
bpf_jit_charge_init(void)998 static int __init bpf_jit_charge_init(void)
999 {
1000 /* Only used as heuristic here to derive limit. */
1001 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
1002 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
1003 PAGE_SIZE), LONG_MAX);
1004 return 0;
1005 }
1006 pure_initcall(bpf_jit_charge_init);
1007
bpf_jit_charge_modmem(u32 size)1008 int bpf_jit_charge_modmem(u32 size)
1009 {
1010 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
1011 if (!bpf_capable()) {
1012 atomic_long_sub(size, &bpf_jit_current);
1013 return -EPERM;
1014 }
1015 }
1016
1017 return 0;
1018 }
1019
bpf_jit_uncharge_modmem(u32 size)1020 void bpf_jit_uncharge_modmem(u32 size)
1021 {
1022 atomic_long_sub(size, &bpf_jit_current);
1023 }
1024
bpf_jit_alloc_exec(unsigned long size)1025 void *__weak bpf_jit_alloc_exec(unsigned long size)
1026 {
1027 return module_alloc(size);
1028 }
1029
bpf_jit_free_exec(void * addr)1030 void __weak bpf_jit_free_exec(void *addr)
1031 {
1032 module_memfree(addr);
1033 }
1034
1035 struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)1036 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1037 unsigned int alignment,
1038 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1039 {
1040 struct bpf_binary_header *hdr;
1041 u32 size, hole, start;
1042
1043 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1044 alignment > BPF_IMAGE_ALIGNMENT);
1045
1046 /* Most of BPF filters are really small, but if some of them
1047 * fill a page, allow at least 128 extra bytes to insert a
1048 * random section of illegal instructions.
1049 */
1050 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1051
1052 if (bpf_jit_charge_modmem(size))
1053 return NULL;
1054 hdr = bpf_jit_alloc_exec(size);
1055 if (!hdr) {
1056 bpf_jit_uncharge_modmem(size);
1057 return NULL;
1058 }
1059
1060 /* Fill space with illegal/arch-dep instructions. */
1061 bpf_fill_ill_insns(hdr, size);
1062
1063 hdr->size = size;
1064 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1065 PAGE_SIZE - sizeof(*hdr));
1066 start = get_random_u32_below(hole) & ~(alignment - 1);
1067
1068 /* Leave a random number of instructions before BPF code. */
1069 *image_ptr = &hdr->image[start];
1070
1071 return hdr;
1072 }
1073
bpf_jit_binary_free(struct bpf_binary_header * hdr)1074 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1075 {
1076 u32 size = hdr->size;
1077
1078 bpf_jit_free_exec(hdr);
1079 bpf_jit_uncharge_modmem(size);
1080 }
1081
1082 /* Allocate jit binary from bpf_prog_pack allocator.
1083 * Since the allocated memory is RO+X, the JIT engine cannot write directly
1084 * to the memory. To solve this problem, a RW buffer is also allocated at
1085 * as the same time. The JIT engine should calculate offsets based on the
1086 * RO memory address, but write JITed program to the RW buffer. Once the
1087 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1088 * the JITed program to the RO memory.
1089 */
1090 struct bpf_binary_header *
bpf_jit_binary_pack_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,struct bpf_binary_header ** rw_header,u8 ** rw_image,bpf_jit_fill_hole_t bpf_fill_ill_insns)1091 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1092 unsigned int alignment,
1093 struct bpf_binary_header **rw_header,
1094 u8 **rw_image,
1095 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1096 {
1097 struct bpf_binary_header *ro_header;
1098 u32 size, hole, start;
1099
1100 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1101 alignment > BPF_IMAGE_ALIGNMENT);
1102
1103 /* add 16 bytes for a random section of illegal instructions */
1104 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1105
1106 if (bpf_jit_charge_modmem(size))
1107 return NULL;
1108 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1109 if (!ro_header) {
1110 bpf_jit_uncharge_modmem(size);
1111 return NULL;
1112 }
1113
1114 *rw_header = kvmalloc(size, GFP_KERNEL);
1115 if (!*rw_header) {
1116 bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
1117 bpf_prog_pack_free(ro_header);
1118 bpf_jit_uncharge_modmem(size);
1119 return NULL;
1120 }
1121
1122 /* Fill space with illegal/arch-dep instructions. */
1123 bpf_fill_ill_insns(*rw_header, size);
1124 (*rw_header)->size = size;
1125
1126 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1127 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1128 start = get_random_u32_below(hole) & ~(alignment - 1);
1129
1130 *image_ptr = &ro_header->image[start];
1131 *rw_image = &(*rw_header)->image[start];
1132
1133 return ro_header;
1134 }
1135
1136 /* Copy JITed text from rw_header to its final location, the ro_header. */
bpf_jit_binary_pack_finalize(struct bpf_prog * prog,struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1137 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1138 struct bpf_binary_header *ro_header,
1139 struct bpf_binary_header *rw_header)
1140 {
1141 void *ptr;
1142
1143 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1144
1145 kvfree(rw_header);
1146
1147 if (IS_ERR(ptr)) {
1148 bpf_prog_pack_free(ro_header);
1149 return PTR_ERR(ptr);
1150 }
1151 return 0;
1152 }
1153
1154 /* bpf_jit_binary_pack_free is called in two different scenarios:
1155 * 1) when the program is freed after;
1156 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1157 * For case 2), we need to free both the RO memory and the RW buffer.
1158 *
1159 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1160 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1161 * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1162 * bpf_arch_text_copy (when jit fails).
1163 */
bpf_jit_binary_pack_free(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1164 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1165 struct bpf_binary_header *rw_header)
1166 {
1167 u32 size = ro_header->size;
1168
1169 bpf_prog_pack_free(ro_header);
1170 kvfree(rw_header);
1171 bpf_jit_uncharge_modmem(size);
1172 }
1173
1174 struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog * fp)1175 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1176 {
1177 unsigned long real_start = (unsigned long)fp->bpf_func;
1178 unsigned long addr;
1179
1180 addr = real_start & BPF_PROG_CHUNK_MASK;
1181 return (void *)addr;
1182 }
1183
1184 static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog * fp)1185 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1186 {
1187 unsigned long real_start = (unsigned long)fp->bpf_func;
1188 unsigned long addr;
1189
1190 addr = real_start & PAGE_MASK;
1191 return (void *)addr;
1192 }
1193
1194 /* This symbol is only overridden by archs that have different
1195 * requirements than the usual eBPF JITs, f.e. when they only
1196 * implement cBPF JIT, do not set images read-only, etc.
1197 */
bpf_jit_free(struct bpf_prog * fp)1198 void __weak bpf_jit_free(struct bpf_prog *fp)
1199 {
1200 if (fp->jited) {
1201 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1202
1203 bpf_jit_binary_free(hdr);
1204 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1205 }
1206
1207 bpf_prog_unlock_free(fp);
1208 }
1209
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)1210 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1211 const struct bpf_insn *insn, bool extra_pass,
1212 u64 *func_addr, bool *func_addr_fixed)
1213 {
1214 s16 off = insn->off;
1215 s32 imm = insn->imm;
1216 u8 *addr;
1217 int err;
1218
1219 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1220 if (!*func_addr_fixed) {
1221 /* Place-holder address till the last pass has collected
1222 * all addresses for JITed subprograms in which case we
1223 * can pick them up from prog->aux.
1224 */
1225 if (!extra_pass)
1226 addr = NULL;
1227 else if (prog->aux->func &&
1228 off >= 0 && off < prog->aux->func_cnt)
1229 addr = (u8 *)prog->aux->func[off]->bpf_func;
1230 else
1231 return -EINVAL;
1232 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
1233 bpf_jit_supports_far_kfunc_call()) {
1234 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
1235 if (err)
1236 return err;
1237 } else {
1238 /* Address of a BPF helper call. Since part of the core
1239 * kernel, it's always at a fixed location. __bpf_call_base
1240 * and the helper with imm relative to it are both in core
1241 * kernel.
1242 */
1243 addr = (u8 *)__bpf_call_base + imm;
1244 }
1245
1246 *func_addr = (unsigned long)addr;
1247 return 0;
1248 }
1249
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)1250 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1251 const struct bpf_insn *aux,
1252 struct bpf_insn *to_buff,
1253 bool emit_zext)
1254 {
1255 struct bpf_insn *to = to_buff;
1256 u32 imm_rnd = get_random_u32();
1257 s16 off;
1258
1259 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
1260 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1261
1262 /* Constraints on AX register:
1263 *
1264 * AX register is inaccessible from user space. It is mapped in
1265 * all JITs, and used here for constant blinding rewrites. It is
1266 * typically "stateless" meaning its contents are only valid within
1267 * the executed instruction, but not across several instructions.
1268 * There are a few exceptions however which are further detailed
1269 * below.
1270 *
1271 * Constant blinding is only used by JITs, not in the interpreter.
1272 * The interpreter uses AX in some occasions as a local temporary
1273 * register e.g. in DIV or MOD instructions.
1274 *
1275 * In restricted circumstances, the verifier can also use the AX
1276 * register for rewrites as long as they do not interfere with
1277 * the above cases!
1278 */
1279 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1280 goto out;
1281
1282 if (from->imm == 0 &&
1283 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
1284 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1285 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1286 goto out;
1287 }
1288
1289 switch (from->code) {
1290 case BPF_ALU | BPF_ADD | BPF_K:
1291 case BPF_ALU | BPF_SUB | BPF_K:
1292 case BPF_ALU | BPF_AND | BPF_K:
1293 case BPF_ALU | BPF_OR | BPF_K:
1294 case BPF_ALU | BPF_XOR | BPF_K:
1295 case BPF_ALU | BPF_MUL | BPF_K:
1296 case BPF_ALU | BPF_MOV | BPF_K:
1297 case BPF_ALU | BPF_DIV | BPF_K:
1298 case BPF_ALU | BPF_MOD | BPF_K:
1299 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1300 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1301 *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1302 break;
1303
1304 case BPF_ALU64 | BPF_ADD | BPF_K:
1305 case BPF_ALU64 | BPF_SUB | BPF_K:
1306 case BPF_ALU64 | BPF_AND | BPF_K:
1307 case BPF_ALU64 | BPF_OR | BPF_K:
1308 case BPF_ALU64 | BPF_XOR | BPF_K:
1309 case BPF_ALU64 | BPF_MUL | BPF_K:
1310 case BPF_ALU64 | BPF_MOV | BPF_K:
1311 case BPF_ALU64 | BPF_DIV | BPF_K:
1312 case BPF_ALU64 | BPF_MOD | BPF_K:
1313 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1314 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1315 *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1316 break;
1317
1318 case BPF_JMP | BPF_JEQ | BPF_K:
1319 case BPF_JMP | BPF_JNE | BPF_K:
1320 case BPF_JMP | BPF_JGT | BPF_K:
1321 case BPF_JMP | BPF_JLT | BPF_K:
1322 case BPF_JMP | BPF_JGE | BPF_K:
1323 case BPF_JMP | BPF_JLE | BPF_K:
1324 case BPF_JMP | BPF_JSGT | BPF_K:
1325 case BPF_JMP | BPF_JSLT | BPF_K:
1326 case BPF_JMP | BPF_JSGE | BPF_K:
1327 case BPF_JMP | BPF_JSLE | BPF_K:
1328 case BPF_JMP | BPF_JSET | BPF_K:
1329 /* Accommodate for extra offset in case of a backjump. */
1330 off = from->off;
1331 if (off < 0)
1332 off -= 2;
1333 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1334 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1335 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1336 break;
1337
1338 case BPF_JMP32 | BPF_JEQ | BPF_K:
1339 case BPF_JMP32 | BPF_JNE | BPF_K:
1340 case BPF_JMP32 | BPF_JGT | BPF_K:
1341 case BPF_JMP32 | BPF_JLT | BPF_K:
1342 case BPF_JMP32 | BPF_JGE | BPF_K:
1343 case BPF_JMP32 | BPF_JLE | BPF_K:
1344 case BPF_JMP32 | BPF_JSGT | BPF_K:
1345 case BPF_JMP32 | BPF_JSLT | BPF_K:
1346 case BPF_JMP32 | BPF_JSGE | BPF_K:
1347 case BPF_JMP32 | BPF_JSLE | BPF_K:
1348 case BPF_JMP32 | BPF_JSET | BPF_K:
1349 /* Accommodate for extra offset in case of a backjump. */
1350 off = from->off;
1351 if (off < 0)
1352 off -= 2;
1353 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1354 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1355 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1356 off);
1357 break;
1358
1359 case BPF_LD | BPF_IMM | BPF_DW:
1360 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1361 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1362 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1363 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1364 break;
1365 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1366 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1367 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1368 if (emit_zext)
1369 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1370 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1371 break;
1372
1373 case BPF_ST | BPF_MEM | BPF_DW:
1374 case BPF_ST | BPF_MEM | BPF_W:
1375 case BPF_ST | BPF_MEM | BPF_H:
1376 case BPF_ST | BPF_MEM | BPF_B:
1377 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1378 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1379 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1380 break;
1381 }
1382 out:
1383 return to - to_buff;
1384 }
1385
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)1386 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1387 gfp_t gfp_extra_flags)
1388 {
1389 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1390 struct bpf_prog *fp;
1391
1392 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1393 if (fp != NULL) {
1394 /* aux->prog still points to the fp_other one, so
1395 * when promoting the clone to the real program,
1396 * this still needs to be adapted.
1397 */
1398 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1399 }
1400
1401 return fp;
1402 }
1403
bpf_prog_clone_free(struct bpf_prog * fp)1404 static void bpf_prog_clone_free(struct bpf_prog *fp)
1405 {
1406 /* aux was stolen by the other clone, so we cannot free
1407 * it from this path! It will be freed eventually by the
1408 * other program on release.
1409 *
1410 * At this point, we don't need a deferred release since
1411 * clone is guaranteed to not be locked.
1412 */
1413 fp->aux = NULL;
1414 fp->stats = NULL;
1415 fp->active = NULL;
1416 __bpf_prog_free(fp);
1417 }
1418
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)1419 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1420 {
1421 /* We have to repoint aux->prog to self, as we don't
1422 * know whether fp here is the clone or the original.
1423 */
1424 fp->aux->prog = fp;
1425 bpf_prog_clone_free(fp_other);
1426 }
1427
bpf_jit_blind_constants(struct bpf_prog * prog)1428 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1429 {
1430 struct bpf_insn insn_buff[16], aux[2];
1431 struct bpf_prog *clone, *tmp;
1432 int insn_delta, insn_cnt;
1433 struct bpf_insn *insn;
1434 int i, rewritten;
1435
1436 if (!prog->blinding_requested || prog->blinded)
1437 return prog;
1438
1439 clone = bpf_prog_clone_create(prog, GFP_USER);
1440 if (!clone)
1441 return ERR_PTR(-ENOMEM);
1442
1443 insn_cnt = clone->len;
1444 insn = clone->insnsi;
1445
1446 for (i = 0; i < insn_cnt; i++, insn++) {
1447 if (bpf_pseudo_func(insn)) {
1448 /* ld_imm64 with an address of bpf subprog is not
1449 * a user controlled constant. Don't randomize it,
1450 * since it will conflict with jit_subprogs() logic.
1451 */
1452 insn++;
1453 i++;
1454 continue;
1455 }
1456
1457 /* We temporarily need to hold the original ld64 insn
1458 * so that we can still access the first part in the
1459 * second blinding run.
1460 */
1461 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1462 insn[1].code == 0)
1463 memcpy(aux, insn, sizeof(aux));
1464
1465 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1466 clone->aux->verifier_zext);
1467 if (!rewritten)
1468 continue;
1469
1470 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1471 if (IS_ERR(tmp)) {
1472 /* Patching may have repointed aux->prog during
1473 * realloc from the original one, so we need to
1474 * fix it up here on error.
1475 */
1476 bpf_jit_prog_release_other(prog, clone);
1477 return tmp;
1478 }
1479
1480 clone = tmp;
1481 insn_delta = rewritten - 1;
1482
1483 /* Walk new program and skip insns we just inserted. */
1484 insn = clone->insnsi + i + insn_delta;
1485 insn_cnt += insn_delta;
1486 i += insn_delta;
1487 }
1488
1489 clone->blinded = 1;
1490 return clone;
1491 }
1492 #endif /* CONFIG_BPF_JIT */
1493
1494 /* Base function for offset calculation. Needs to go into .text section,
1495 * therefore keeping it non-static as well; will also be used by JITs
1496 * anyway later on, so do not let the compiler omit it. This also needs
1497 * to go into kallsyms for correlation from e.g. bpftool, so naming
1498 * must not change.
1499 */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1500 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1501 {
1502 return 0;
1503 }
1504 EXPORT_SYMBOL_GPL(__bpf_call_base);
1505
1506 /* All UAPI available opcodes. */
1507 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1508 /* 32 bit ALU operations. */ \
1509 /* Register based. */ \
1510 INSN_3(ALU, ADD, X), \
1511 INSN_3(ALU, SUB, X), \
1512 INSN_3(ALU, AND, X), \
1513 INSN_3(ALU, OR, X), \
1514 INSN_3(ALU, LSH, X), \
1515 INSN_3(ALU, RSH, X), \
1516 INSN_3(ALU, XOR, X), \
1517 INSN_3(ALU, MUL, X), \
1518 INSN_3(ALU, MOV, X), \
1519 INSN_3(ALU, ARSH, X), \
1520 INSN_3(ALU, DIV, X), \
1521 INSN_3(ALU, MOD, X), \
1522 INSN_2(ALU, NEG), \
1523 INSN_3(ALU, END, TO_BE), \
1524 INSN_3(ALU, END, TO_LE), \
1525 /* Immediate based. */ \
1526 INSN_3(ALU, ADD, K), \
1527 INSN_3(ALU, SUB, K), \
1528 INSN_3(ALU, AND, K), \
1529 INSN_3(ALU, OR, K), \
1530 INSN_3(ALU, LSH, K), \
1531 INSN_3(ALU, RSH, K), \
1532 INSN_3(ALU, XOR, K), \
1533 INSN_3(ALU, MUL, K), \
1534 INSN_3(ALU, MOV, K), \
1535 INSN_3(ALU, ARSH, K), \
1536 INSN_3(ALU, DIV, K), \
1537 INSN_3(ALU, MOD, K), \
1538 /* 64 bit ALU operations. */ \
1539 /* Register based. */ \
1540 INSN_3(ALU64, ADD, X), \
1541 INSN_3(ALU64, SUB, X), \
1542 INSN_3(ALU64, AND, X), \
1543 INSN_3(ALU64, OR, X), \
1544 INSN_3(ALU64, LSH, X), \
1545 INSN_3(ALU64, RSH, X), \
1546 INSN_3(ALU64, XOR, X), \
1547 INSN_3(ALU64, MUL, X), \
1548 INSN_3(ALU64, MOV, X), \
1549 INSN_3(ALU64, ARSH, X), \
1550 INSN_3(ALU64, DIV, X), \
1551 INSN_3(ALU64, MOD, X), \
1552 INSN_2(ALU64, NEG), \
1553 INSN_3(ALU64, END, TO_LE), \
1554 /* Immediate based. */ \
1555 INSN_3(ALU64, ADD, K), \
1556 INSN_3(ALU64, SUB, K), \
1557 INSN_3(ALU64, AND, K), \
1558 INSN_3(ALU64, OR, K), \
1559 INSN_3(ALU64, LSH, K), \
1560 INSN_3(ALU64, RSH, K), \
1561 INSN_3(ALU64, XOR, K), \
1562 INSN_3(ALU64, MUL, K), \
1563 INSN_3(ALU64, MOV, K), \
1564 INSN_3(ALU64, ARSH, K), \
1565 INSN_3(ALU64, DIV, K), \
1566 INSN_3(ALU64, MOD, K), \
1567 /* Call instruction. */ \
1568 INSN_2(JMP, CALL), \
1569 /* Exit instruction. */ \
1570 INSN_2(JMP, EXIT), \
1571 /* 32-bit Jump instructions. */ \
1572 /* Register based. */ \
1573 INSN_3(JMP32, JEQ, X), \
1574 INSN_3(JMP32, JNE, X), \
1575 INSN_3(JMP32, JGT, X), \
1576 INSN_3(JMP32, JLT, X), \
1577 INSN_3(JMP32, JGE, X), \
1578 INSN_3(JMP32, JLE, X), \
1579 INSN_3(JMP32, JSGT, X), \
1580 INSN_3(JMP32, JSLT, X), \
1581 INSN_3(JMP32, JSGE, X), \
1582 INSN_3(JMP32, JSLE, X), \
1583 INSN_3(JMP32, JSET, X), \
1584 /* Immediate based. */ \
1585 INSN_3(JMP32, JEQ, K), \
1586 INSN_3(JMP32, JNE, K), \
1587 INSN_3(JMP32, JGT, K), \
1588 INSN_3(JMP32, JLT, K), \
1589 INSN_3(JMP32, JGE, K), \
1590 INSN_3(JMP32, JLE, K), \
1591 INSN_3(JMP32, JSGT, K), \
1592 INSN_3(JMP32, JSLT, K), \
1593 INSN_3(JMP32, JSGE, K), \
1594 INSN_3(JMP32, JSLE, K), \
1595 INSN_3(JMP32, JSET, K), \
1596 /* Jump instructions. */ \
1597 /* Register based. */ \
1598 INSN_3(JMP, JEQ, X), \
1599 INSN_3(JMP, JNE, X), \
1600 INSN_3(JMP, JGT, X), \
1601 INSN_3(JMP, JLT, X), \
1602 INSN_3(JMP, JGE, X), \
1603 INSN_3(JMP, JLE, X), \
1604 INSN_3(JMP, JSGT, X), \
1605 INSN_3(JMP, JSLT, X), \
1606 INSN_3(JMP, JSGE, X), \
1607 INSN_3(JMP, JSLE, X), \
1608 INSN_3(JMP, JSET, X), \
1609 /* Immediate based. */ \
1610 INSN_3(JMP, JEQ, K), \
1611 INSN_3(JMP, JNE, K), \
1612 INSN_3(JMP, JGT, K), \
1613 INSN_3(JMP, JLT, K), \
1614 INSN_3(JMP, JGE, K), \
1615 INSN_3(JMP, JLE, K), \
1616 INSN_3(JMP, JSGT, K), \
1617 INSN_3(JMP, JSLT, K), \
1618 INSN_3(JMP, JSGE, K), \
1619 INSN_3(JMP, JSLE, K), \
1620 INSN_3(JMP, JSET, K), \
1621 INSN_2(JMP, JA), \
1622 INSN_2(JMP32, JA), \
1623 /* Store instructions. */ \
1624 /* Register based. */ \
1625 INSN_3(STX, MEM, B), \
1626 INSN_3(STX, MEM, H), \
1627 INSN_3(STX, MEM, W), \
1628 INSN_3(STX, MEM, DW), \
1629 INSN_3(STX, ATOMIC, W), \
1630 INSN_3(STX, ATOMIC, DW), \
1631 /* Immediate based. */ \
1632 INSN_3(ST, MEM, B), \
1633 INSN_3(ST, MEM, H), \
1634 INSN_3(ST, MEM, W), \
1635 INSN_3(ST, MEM, DW), \
1636 /* Load instructions. */ \
1637 /* Register based. */ \
1638 INSN_3(LDX, MEM, B), \
1639 INSN_3(LDX, MEM, H), \
1640 INSN_3(LDX, MEM, W), \
1641 INSN_3(LDX, MEM, DW), \
1642 INSN_3(LDX, MEMSX, B), \
1643 INSN_3(LDX, MEMSX, H), \
1644 INSN_3(LDX, MEMSX, W), \
1645 /* Immediate based. */ \
1646 INSN_3(LD, IMM, DW)
1647
bpf_opcode_in_insntable(u8 code)1648 bool bpf_opcode_in_insntable(u8 code)
1649 {
1650 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1651 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1652 static const bool public_insntable[256] = {
1653 [0 ... 255] = false,
1654 /* Now overwrite non-defaults ... */
1655 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1656 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1657 [BPF_LD | BPF_ABS | BPF_B] = true,
1658 [BPF_LD | BPF_ABS | BPF_H] = true,
1659 [BPF_LD | BPF_ABS | BPF_W] = true,
1660 [BPF_LD | BPF_IND | BPF_B] = true,
1661 [BPF_LD | BPF_IND | BPF_H] = true,
1662 [BPF_LD | BPF_IND | BPF_W] = true,
1663 };
1664 #undef BPF_INSN_3_TBL
1665 #undef BPF_INSN_2_TBL
1666 return public_insntable[code];
1667 }
1668
1669 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1670 /**
1671 * ___bpf_prog_run - run eBPF program on a given context
1672 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1673 * @insn: is the array of eBPF instructions
1674 *
1675 * Decode and execute eBPF instructions.
1676 *
1677 * Return: whatever value is in %BPF_R0 at program exit
1678 */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn)1679 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1680 {
1681 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1682 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1683 static const void * const jumptable[256] __annotate_jump_table = {
1684 [0 ... 255] = &&default_label,
1685 /* Now overwrite non-defaults ... */
1686 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1687 /* Non-UAPI available opcodes. */
1688 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1689 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1690 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1691 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1692 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1693 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1694 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1695 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
1696 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
1697 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1698 };
1699 #undef BPF_INSN_3_LBL
1700 #undef BPF_INSN_2_LBL
1701 u32 tail_call_cnt = 0;
1702
1703 #define CONT ({ insn++; goto select_insn; })
1704 #define CONT_JMP ({ insn++; goto select_insn; })
1705
1706 select_insn:
1707 goto *jumptable[insn->code];
1708
1709 /* Explicitly mask the register-based shift amounts with 63 or 31
1710 * to avoid undefined behavior. Normally this won't affect the
1711 * generated code, for example, in case of native 64 bit archs such
1712 * as x86-64 or arm64, the compiler is optimizing the AND away for
1713 * the interpreter. In case of JITs, each of the JIT backends compiles
1714 * the BPF shift operations to machine instructions which produce
1715 * implementation-defined results in such a case; the resulting
1716 * contents of the register may be arbitrary, but program behaviour
1717 * as a whole remains defined. In other words, in case of JIT backends,
1718 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1719 */
1720 /* ALU (shifts) */
1721 #define SHT(OPCODE, OP) \
1722 ALU64_##OPCODE##_X: \
1723 DST = DST OP (SRC & 63); \
1724 CONT; \
1725 ALU_##OPCODE##_X: \
1726 DST = (u32) DST OP ((u32) SRC & 31); \
1727 CONT; \
1728 ALU64_##OPCODE##_K: \
1729 DST = DST OP IMM; \
1730 CONT; \
1731 ALU_##OPCODE##_K: \
1732 DST = (u32) DST OP (u32) IMM; \
1733 CONT;
1734 /* ALU (rest) */
1735 #define ALU(OPCODE, OP) \
1736 ALU64_##OPCODE##_X: \
1737 DST = DST OP SRC; \
1738 CONT; \
1739 ALU_##OPCODE##_X: \
1740 DST = (u32) DST OP (u32) SRC; \
1741 CONT; \
1742 ALU64_##OPCODE##_K: \
1743 DST = DST OP IMM; \
1744 CONT; \
1745 ALU_##OPCODE##_K: \
1746 DST = (u32) DST OP (u32) IMM; \
1747 CONT;
1748 ALU(ADD, +)
1749 ALU(SUB, -)
1750 ALU(AND, &)
1751 ALU(OR, |)
1752 ALU(XOR, ^)
1753 ALU(MUL, *)
1754 SHT(LSH, <<)
1755 SHT(RSH, >>)
1756 #undef SHT
1757 #undef ALU
1758 ALU_NEG:
1759 DST = (u32) -DST;
1760 CONT;
1761 ALU64_NEG:
1762 DST = -DST;
1763 CONT;
1764 ALU_MOV_X:
1765 switch (OFF) {
1766 case 0:
1767 DST = (u32) SRC;
1768 break;
1769 case 8:
1770 DST = (u32)(s8) SRC;
1771 break;
1772 case 16:
1773 DST = (u32)(s16) SRC;
1774 break;
1775 }
1776 CONT;
1777 ALU_MOV_K:
1778 DST = (u32) IMM;
1779 CONT;
1780 ALU64_MOV_X:
1781 switch (OFF) {
1782 case 0:
1783 DST = SRC;
1784 break;
1785 case 8:
1786 DST = (s8) SRC;
1787 break;
1788 case 16:
1789 DST = (s16) SRC;
1790 break;
1791 case 32:
1792 DST = (s32) SRC;
1793 break;
1794 }
1795 CONT;
1796 ALU64_MOV_K:
1797 DST = IMM;
1798 CONT;
1799 LD_IMM_DW:
1800 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1801 insn++;
1802 CONT;
1803 ALU_ARSH_X:
1804 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1805 CONT;
1806 ALU_ARSH_K:
1807 DST = (u64) (u32) (((s32) DST) >> IMM);
1808 CONT;
1809 ALU64_ARSH_X:
1810 (*(s64 *) &DST) >>= (SRC & 63);
1811 CONT;
1812 ALU64_ARSH_K:
1813 (*(s64 *) &DST) >>= IMM;
1814 CONT;
1815 ALU64_MOD_X:
1816 switch (OFF) {
1817 case 0:
1818 div64_u64_rem(DST, SRC, &AX);
1819 DST = AX;
1820 break;
1821 case 1:
1822 AX = div64_s64(DST, SRC);
1823 DST = DST - AX * SRC;
1824 break;
1825 }
1826 CONT;
1827 ALU_MOD_X:
1828 switch (OFF) {
1829 case 0:
1830 AX = (u32) DST;
1831 DST = do_div(AX, (u32) SRC);
1832 break;
1833 case 1:
1834 AX = abs((s32)DST);
1835 AX = do_div(AX, abs((s32)SRC));
1836 if ((s32)DST < 0)
1837 DST = (u32)-AX;
1838 else
1839 DST = (u32)AX;
1840 break;
1841 }
1842 CONT;
1843 ALU64_MOD_K:
1844 switch (OFF) {
1845 case 0:
1846 div64_u64_rem(DST, IMM, &AX);
1847 DST = AX;
1848 break;
1849 case 1:
1850 AX = div64_s64(DST, IMM);
1851 DST = DST - AX * IMM;
1852 break;
1853 }
1854 CONT;
1855 ALU_MOD_K:
1856 switch (OFF) {
1857 case 0:
1858 AX = (u32) DST;
1859 DST = do_div(AX, (u32) IMM);
1860 break;
1861 case 1:
1862 AX = abs((s32)DST);
1863 AX = do_div(AX, abs((s32)IMM));
1864 if ((s32)DST < 0)
1865 DST = (u32)-AX;
1866 else
1867 DST = (u32)AX;
1868 break;
1869 }
1870 CONT;
1871 ALU64_DIV_X:
1872 switch (OFF) {
1873 case 0:
1874 DST = div64_u64(DST, SRC);
1875 break;
1876 case 1:
1877 DST = div64_s64(DST, SRC);
1878 break;
1879 }
1880 CONT;
1881 ALU_DIV_X:
1882 switch (OFF) {
1883 case 0:
1884 AX = (u32) DST;
1885 do_div(AX, (u32) SRC);
1886 DST = (u32) AX;
1887 break;
1888 case 1:
1889 AX = abs((s32)DST);
1890 do_div(AX, abs((s32)SRC));
1891 if (((s32)DST < 0) == ((s32)SRC < 0))
1892 DST = (u32)AX;
1893 else
1894 DST = (u32)-AX;
1895 break;
1896 }
1897 CONT;
1898 ALU64_DIV_K:
1899 switch (OFF) {
1900 case 0:
1901 DST = div64_u64(DST, IMM);
1902 break;
1903 case 1:
1904 DST = div64_s64(DST, IMM);
1905 break;
1906 }
1907 CONT;
1908 ALU_DIV_K:
1909 switch (OFF) {
1910 case 0:
1911 AX = (u32) DST;
1912 do_div(AX, (u32) IMM);
1913 DST = (u32) AX;
1914 break;
1915 case 1:
1916 AX = abs((s32)DST);
1917 do_div(AX, abs((s32)IMM));
1918 if (((s32)DST < 0) == ((s32)IMM < 0))
1919 DST = (u32)AX;
1920 else
1921 DST = (u32)-AX;
1922 break;
1923 }
1924 CONT;
1925 ALU_END_TO_BE:
1926 switch (IMM) {
1927 case 16:
1928 DST = (__force u16) cpu_to_be16(DST);
1929 break;
1930 case 32:
1931 DST = (__force u32) cpu_to_be32(DST);
1932 break;
1933 case 64:
1934 DST = (__force u64) cpu_to_be64(DST);
1935 break;
1936 }
1937 CONT;
1938 ALU_END_TO_LE:
1939 switch (IMM) {
1940 case 16:
1941 DST = (__force u16) cpu_to_le16(DST);
1942 break;
1943 case 32:
1944 DST = (__force u32) cpu_to_le32(DST);
1945 break;
1946 case 64:
1947 DST = (__force u64) cpu_to_le64(DST);
1948 break;
1949 }
1950 CONT;
1951 ALU64_END_TO_LE:
1952 switch (IMM) {
1953 case 16:
1954 DST = (__force u16) __swab16(DST);
1955 break;
1956 case 32:
1957 DST = (__force u32) __swab32(DST);
1958 break;
1959 case 64:
1960 DST = (__force u64) __swab64(DST);
1961 break;
1962 }
1963 CONT;
1964
1965 /* CALL */
1966 JMP_CALL:
1967 /* Function call scratches BPF_R1-BPF_R5 registers,
1968 * preserves BPF_R6-BPF_R9, and stores return value
1969 * into BPF_R0.
1970 */
1971 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1972 BPF_R4, BPF_R5);
1973 CONT;
1974
1975 JMP_CALL_ARGS:
1976 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1977 BPF_R3, BPF_R4,
1978 BPF_R5,
1979 insn + insn->off + 1);
1980 CONT;
1981
1982 JMP_TAIL_CALL: {
1983 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1984 struct bpf_array *array = container_of(map, struct bpf_array, map);
1985 struct bpf_prog *prog;
1986 u32 index = BPF_R3;
1987
1988 if (unlikely(index >= array->map.max_entries))
1989 goto out;
1990
1991 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
1992 goto out;
1993
1994 tail_call_cnt++;
1995
1996 prog = READ_ONCE(array->ptrs[index]);
1997 if (!prog)
1998 goto out;
1999
2000 /* ARG1 at this point is guaranteed to point to CTX from
2001 * the verifier side due to the fact that the tail call is
2002 * handled like a helper, that is, bpf_tail_call_proto,
2003 * where arg1_type is ARG_PTR_TO_CTX.
2004 */
2005 insn = prog->insnsi;
2006 goto select_insn;
2007 out:
2008 CONT;
2009 }
2010 JMP_JA:
2011 insn += insn->off;
2012 CONT;
2013 JMP32_JA:
2014 insn += insn->imm;
2015 CONT;
2016 JMP_EXIT:
2017 return BPF_R0;
2018 /* JMP */
2019 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
2020 JMP_##OPCODE##_X: \
2021 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
2022 insn += insn->off; \
2023 CONT_JMP; \
2024 } \
2025 CONT; \
2026 JMP32_##OPCODE##_X: \
2027 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
2028 insn += insn->off; \
2029 CONT_JMP; \
2030 } \
2031 CONT; \
2032 JMP_##OPCODE##_K: \
2033 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
2034 insn += insn->off; \
2035 CONT_JMP; \
2036 } \
2037 CONT; \
2038 JMP32_##OPCODE##_K: \
2039 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
2040 insn += insn->off; \
2041 CONT_JMP; \
2042 } \
2043 CONT;
2044 COND_JMP(u, JEQ, ==)
2045 COND_JMP(u, JNE, !=)
2046 COND_JMP(u, JGT, >)
2047 COND_JMP(u, JLT, <)
2048 COND_JMP(u, JGE, >=)
2049 COND_JMP(u, JLE, <=)
2050 COND_JMP(u, JSET, &)
2051 COND_JMP(s, JSGT, >)
2052 COND_JMP(s, JSLT, <)
2053 COND_JMP(s, JSGE, >=)
2054 COND_JMP(s, JSLE, <=)
2055 #undef COND_JMP
2056 /* ST, STX and LDX*/
2057 ST_NOSPEC:
2058 /* Speculation barrier for mitigating Speculative Store Bypass.
2059 * In case of arm64, we rely on the firmware mitigation as
2060 * controlled via the ssbd kernel parameter. Whenever the
2061 * mitigation is enabled, it works for all of the kernel code
2062 * with no need to provide any additional instructions here.
2063 * In case of x86, we use 'lfence' insn for mitigation. We
2064 * reuse preexisting logic from Spectre v1 mitigation that
2065 * happens to produce the required code on x86 for v4 as well.
2066 */
2067 barrier_nospec();
2068 CONT;
2069 #define LDST(SIZEOP, SIZE) \
2070 STX_MEM_##SIZEOP: \
2071 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
2072 CONT; \
2073 ST_MEM_##SIZEOP: \
2074 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
2075 CONT; \
2076 LDX_MEM_##SIZEOP: \
2077 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2078 CONT; \
2079 LDX_PROBE_MEM_##SIZEOP: \
2080 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2081 (const void *)(long) (SRC + insn->off)); \
2082 DST = *((SIZE *)&DST); \
2083 CONT;
2084
2085 LDST(B, u8)
2086 LDST(H, u16)
2087 LDST(W, u32)
2088 LDST(DW, u64)
2089 #undef LDST
2090
2091 #define LDSX(SIZEOP, SIZE) \
2092 LDX_MEMSX_##SIZEOP: \
2093 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2094 CONT; \
2095 LDX_PROBE_MEMSX_##SIZEOP: \
2096 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2097 (const void *)(long) (SRC + insn->off)); \
2098 DST = *((SIZE *)&DST); \
2099 CONT;
2100
2101 LDSX(B, s8)
2102 LDSX(H, s16)
2103 LDSX(W, s32)
2104 #undef LDSX
2105
2106 #define ATOMIC_ALU_OP(BOP, KOP) \
2107 case BOP: \
2108 if (BPF_SIZE(insn->code) == BPF_W) \
2109 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
2110 (DST + insn->off)); \
2111 else \
2112 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
2113 (DST + insn->off)); \
2114 break; \
2115 case BOP | BPF_FETCH: \
2116 if (BPF_SIZE(insn->code) == BPF_W) \
2117 SRC = (u32) atomic_fetch_##KOP( \
2118 (u32) SRC, \
2119 (atomic_t *)(unsigned long) (DST + insn->off)); \
2120 else \
2121 SRC = (u64) atomic64_fetch_##KOP( \
2122 (u64) SRC, \
2123 (atomic64_t *)(unsigned long) (DST + insn->off)); \
2124 break;
2125
2126 STX_ATOMIC_DW:
2127 STX_ATOMIC_W:
2128 switch (IMM) {
2129 ATOMIC_ALU_OP(BPF_ADD, add)
2130 ATOMIC_ALU_OP(BPF_AND, and)
2131 ATOMIC_ALU_OP(BPF_OR, or)
2132 ATOMIC_ALU_OP(BPF_XOR, xor)
2133 #undef ATOMIC_ALU_OP
2134
2135 case BPF_XCHG:
2136 if (BPF_SIZE(insn->code) == BPF_W)
2137 SRC = (u32) atomic_xchg(
2138 (atomic_t *)(unsigned long) (DST + insn->off),
2139 (u32) SRC);
2140 else
2141 SRC = (u64) atomic64_xchg(
2142 (atomic64_t *)(unsigned long) (DST + insn->off),
2143 (u64) SRC);
2144 break;
2145 case BPF_CMPXCHG:
2146 if (BPF_SIZE(insn->code) == BPF_W)
2147 BPF_R0 = (u32) atomic_cmpxchg(
2148 (atomic_t *)(unsigned long) (DST + insn->off),
2149 (u32) BPF_R0, (u32) SRC);
2150 else
2151 BPF_R0 = (u64) atomic64_cmpxchg(
2152 (atomic64_t *)(unsigned long) (DST + insn->off),
2153 (u64) BPF_R0, (u64) SRC);
2154 break;
2155
2156 default:
2157 goto default_label;
2158 }
2159 CONT;
2160
2161 default_label:
2162 /* If we ever reach this, we have a bug somewhere. Die hard here
2163 * instead of just returning 0; we could be somewhere in a subprog,
2164 * so execution could continue otherwise which we do /not/ want.
2165 *
2166 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2167 */
2168 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2169 insn->code, insn->imm);
2170 BUG_ON(1);
2171 return 0;
2172 }
2173
2174 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2175 #define DEFINE_BPF_PROG_RUN(stack_size) \
2176 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2177 { \
2178 u64 stack[stack_size / sizeof(u64)]; \
2179 u64 regs[MAX_BPF_EXT_REG] = {}; \
2180 \
2181 kmsan_unpoison_memory(stack, sizeof(stack)); \
2182 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2183 ARG1 = (u64) (unsigned long) ctx; \
2184 return ___bpf_prog_run(regs, insn); \
2185 }
2186
2187 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2188 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2189 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2190 const struct bpf_insn *insn) \
2191 { \
2192 u64 stack[stack_size / sizeof(u64)]; \
2193 u64 regs[MAX_BPF_EXT_REG]; \
2194 \
2195 kmsan_unpoison_memory(stack, sizeof(stack)); \
2196 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2197 BPF_R1 = r1; \
2198 BPF_R2 = r2; \
2199 BPF_R3 = r3; \
2200 BPF_R4 = r4; \
2201 BPF_R5 = r5; \
2202 return ___bpf_prog_run(regs, insn); \
2203 }
2204
2205 #define EVAL1(FN, X) FN(X)
2206 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2207 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2208 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2209 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2210 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2211
2212 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2213 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2214 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2215
2216 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2217 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2218 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2219
2220 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2221
2222 static unsigned int (*interpreters[])(const void *ctx,
2223 const struct bpf_insn *insn) = {
2224 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2225 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2226 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2227 };
2228 #undef PROG_NAME_LIST
2229 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2230 static __maybe_unused
2231 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2232 const struct bpf_insn *insn) = {
2233 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2234 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2235 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2236 };
2237 #undef PROG_NAME_LIST
2238
2239 #ifdef CONFIG_BPF_SYSCALL
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)2240 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2241 {
2242 stack_depth = max_t(u32, stack_depth, 1);
2243 insn->off = (s16) insn->imm;
2244 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2245 __bpf_call_base_args;
2246 insn->code = BPF_JMP | BPF_CALL_ARGS;
2247 }
2248 #endif
2249 #else
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)2250 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2251 const struct bpf_insn *insn)
2252 {
2253 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2254 * is not working properly, so warn about it!
2255 */
2256 WARN_ON_ONCE(1);
2257 return 0;
2258 }
2259 #endif
2260
bpf_prog_map_compatible(struct bpf_map * map,const struct bpf_prog * fp)2261 bool bpf_prog_map_compatible(struct bpf_map *map,
2262 const struct bpf_prog *fp)
2263 {
2264 enum bpf_prog_type prog_type = resolve_prog_type(fp);
2265 bool ret;
2266 struct bpf_prog_aux *aux = fp->aux;
2267
2268 if (fp->kprobe_override)
2269 return false;
2270
2271 /* XDP programs inserted into maps are not guaranteed to run on
2272 * a particular netdev (and can run outside driver context entirely
2273 * in the case of devmap and cpumap). Until device checks
2274 * are implemented, prohibit adding dev-bound programs to program maps.
2275 */
2276 if (bpf_prog_is_dev_bound(aux))
2277 return false;
2278
2279 spin_lock(&map->owner.lock);
2280 if (!map->owner.type) {
2281 /* There's no owner yet where we could check for
2282 * compatibility.
2283 */
2284 map->owner.type = prog_type;
2285 map->owner.jited = fp->jited;
2286 map->owner.xdp_has_frags = aux->xdp_has_frags;
2287 map->owner.attach_func_proto = aux->attach_func_proto;
2288 ret = true;
2289 } else {
2290 ret = map->owner.type == prog_type &&
2291 map->owner.jited == fp->jited &&
2292 map->owner.xdp_has_frags == aux->xdp_has_frags;
2293 if (ret &&
2294 map->owner.attach_func_proto != aux->attach_func_proto) {
2295 switch (prog_type) {
2296 case BPF_PROG_TYPE_TRACING:
2297 case BPF_PROG_TYPE_LSM:
2298 case BPF_PROG_TYPE_EXT:
2299 case BPF_PROG_TYPE_STRUCT_OPS:
2300 ret = false;
2301 break;
2302 default:
2303 break;
2304 }
2305 }
2306 }
2307 spin_unlock(&map->owner.lock);
2308
2309 return ret;
2310 }
2311
bpf_check_tail_call(const struct bpf_prog * fp)2312 static int bpf_check_tail_call(const struct bpf_prog *fp)
2313 {
2314 struct bpf_prog_aux *aux = fp->aux;
2315 int i, ret = 0;
2316
2317 mutex_lock(&aux->used_maps_mutex);
2318 for (i = 0; i < aux->used_map_cnt; i++) {
2319 struct bpf_map *map = aux->used_maps[i];
2320
2321 if (!map_type_contains_progs(map))
2322 continue;
2323
2324 if (!bpf_prog_map_compatible(map, fp)) {
2325 ret = -EINVAL;
2326 goto out;
2327 }
2328 }
2329
2330 out:
2331 mutex_unlock(&aux->used_maps_mutex);
2332 return ret;
2333 }
2334
bpf_prog_select_func(struct bpf_prog * fp)2335 static void bpf_prog_select_func(struct bpf_prog *fp)
2336 {
2337 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2338 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2339
2340 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2341 #else
2342 fp->bpf_func = __bpf_prog_ret0_warn;
2343 #endif
2344 }
2345
2346 /**
2347 * bpf_prog_select_runtime - select exec runtime for BPF program
2348 * @fp: bpf_prog populated with BPF program
2349 * @err: pointer to error variable
2350 *
2351 * Try to JIT eBPF program, if JIT is not available, use interpreter.
2352 * The BPF program will be executed via bpf_prog_run() function.
2353 *
2354 * Return: the &fp argument along with &err set to 0 for success or
2355 * a negative errno code on failure
2356 */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)2357 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2358 {
2359 /* In case of BPF to BPF calls, verifier did all the prep
2360 * work with regards to JITing, etc.
2361 */
2362 bool jit_needed = false;
2363
2364 if (fp->bpf_func)
2365 goto finalize;
2366
2367 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2368 bpf_prog_has_kfunc_call(fp))
2369 jit_needed = true;
2370
2371 bpf_prog_select_func(fp);
2372
2373 /* eBPF JITs can rewrite the program in case constant
2374 * blinding is active. However, in case of error during
2375 * blinding, bpf_int_jit_compile() must always return a
2376 * valid program, which in this case would simply not
2377 * be JITed, but falls back to the interpreter.
2378 */
2379 if (!bpf_prog_is_offloaded(fp->aux)) {
2380 *err = bpf_prog_alloc_jited_linfo(fp);
2381 if (*err)
2382 return fp;
2383
2384 fp = bpf_int_jit_compile(fp);
2385 bpf_prog_jit_attempt_done(fp);
2386 if (!fp->jited && jit_needed) {
2387 *err = -ENOTSUPP;
2388 return fp;
2389 }
2390 } else {
2391 *err = bpf_prog_offload_compile(fp);
2392 if (*err)
2393 return fp;
2394 }
2395
2396 finalize:
2397 bpf_prog_lock_ro(fp);
2398
2399 /* The tail call compatibility check can only be done at
2400 * this late stage as we need to determine, if we deal
2401 * with JITed or non JITed program concatenations and not
2402 * all eBPF JITs might immediately support all features.
2403 */
2404 *err = bpf_check_tail_call(fp);
2405
2406 return fp;
2407 }
2408 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2409
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)2410 static unsigned int __bpf_prog_ret1(const void *ctx,
2411 const struct bpf_insn *insn)
2412 {
2413 return 1;
2414 }
2415
2416 static struct bpf_prog_dummy {
2417 struct bpf_prog prog;
2418 } dummy_bpf_prog = {
2419 .prog = {
2420 .bpf_func = __bpf_prog_ret1,
2421 },
2422 };
2423
2424 struct bpf_empty_prog_array bpf_empty_prog_array = {
2425 .null_prog = NULL,
2426 };
2427 EXPORT_SYMBOL(bpf_empty_prog_array);
2428
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)2429 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2430 {
2431 if (prog_cnt)
2432 return kzalloc(sizeof(struct bpf_prog_array) +
2433 sizeof(struct bpf_prog_array_item) *
2434 (prog_cnt + 1),
2435 flags);
2436
2437 return &bpf_empty_prog_array.hdr;
2438 }
2439
bpf_prog_array_free(struct bpf_prog_array * progs)2440 void bpf_prog_array_free(struct bpf_prog_array *progs)
2441 {
2442 if (!progs || progs == &bpf_empty_prog_array.hdr)
2443 return;
2444 kfree_rcu(progs, rcu);
2445 }
2446
__bpf_prog_array_free_sleepable_cb(struct rcu_head * rcu)2447 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2448 {
2449 struct bpf_prog_array *progs;
2450
2451 /* If RCU Tasks Trace grace period implies RCU grace period, there is
2452 * no need to call kfree_rcu(), just call kfree() directly.
2453 */
2454 progs = container_of(rcu, struct bpf_prog_array, rcu);
2455 if (rcu_trace_implies_rcu_gp())
2456 kfree(progs);
2457 else
2458 kfree_rcu(progs, rcu);
2459 }
2460
bpf_prog_array_free_sleepable(struct bpf_prog_array * progs)2461 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2462 {
2463 if (!progs || progs == &bpf_empty_prog_array.hdr)
2464 return;
2465 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2466 }
2467
bpf_prog_array_length(struct bpf_prog_array * array)2468 int bpf_prog_array_length(struct bpf_prog_array *array)
2469 {
2470 struct bpf_prog_array_item *item;
2471 u32 cnt = 0;
2472
2473 for (item = array->items; item->prog; item++)
2474 if (item->prog != &dummy_bpf_prog.prog)
2475 cnt++;
2476 return cnt;
2477 }
2478
bpf_prog_array_is_empty(struct bpf_prog_array * array)2479 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2480 {
2481 struct bpf_prog_array_item *item;
2482
2483 for (item = array->items; item->prog; item++)
2484 if (item->prog != &dummy_bpf_prog.prog)
2485 return false;
2486 return true;
2487 }
2488
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)2489 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2490 u32 *prog_ids,
2491 u32 request_cnt)
2492 {
2493 struct bpf_prog_array_item *item;
2494 int i = 0;
2495
2496 for (item = array->items; item->prog; item++) {
2497 if (item->prog == &dummy_bpf_prog.prog)
2498 continue;
2499 prog_ids[i] = item->prog->aux->id;
2500 if (++i == request_cnt) {
2501 item++;
2502 break;
2503 }
2504 }
2505
2506 return !!(item->prog);
2507 }
2508
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)2509 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2510 __u32 __user *prog_ids, u32 cnt)
2511 {
2512 unsigned long err = 0;
2513 bool nospc;
2514 u32 *ids;
2515
2516 /* users of this function are doing:
2517 * cnt = bpf_prog_array_length();
2518 * if (cnt > 0)
2519 * bpf_prog_array_copy_to_user(..., cnt);
2520 * so below kcalloc doesn't need extra cnt > 0 check.
2521 */
2522 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2523 if (!ids)
2524 return -ENOMEM;
2525 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2526 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2527 kfree(ids);
2528 if (err)
2529 return -EFAULT;
2530 if (nospc)
2531 return -ENOSPC;
2532 return 0;
2533 }
2534
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)2535 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2536 struct bpf_prog *old_prog)
2537 {
2538 struct bpf_prog_array_item *item;
2539
2540 for (item = array->items; item->prog; item++)
2541 if (item->prog == old_prog) {
2542 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2543 break;
2544 }
2545 }
2546
2547 /**
2548 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2549 * index into the program array with
2550 * a dummy no-op program.
2551 * @array: a bpf_prog_array
2552 * @index: the index of the program to replace
2553 *
2554 * Skips over dummy programs, by not counting them, when calculating
2555 * the position of the program to replace.
2556 *
2557 * Return:
2558 * * 0 - Success
2559 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2560 * * -ENOENT - Index out of range
2561 */
bpf_prog_array_delete_safe_at(struct bpf_prog_array * array,int index)2562 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2563 {
2564 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2565 }
2566
2567 /**
2568 * bpf_prog_array_update_at() - Updates the program at the given index
2569 * into the program array.
2570 * @array: a bpf_prog_array
2571 * @index: the index of the program to update
2572 * @prog: the program to insert into the array
2573 *
2574 * Skips over dummy programs, by not counting them, when calculating
2575 * the position of the program to update.
2576 *
2577 * Return:
2578 * * 0 - Success
2579 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2580 * * -ENOENT - Index out of range
2581 */
bpf_prog_array_update_at(struct bpf_prog_array * array,int index,struct bpf_prog * prog)2582 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2583 struct bpf_prog *prog)
2584 {
2585 struct bpf_prog_array_item *item;
2586
2587 if (unlikely(index < 0))
2588 return -EINVAL;
2589
2590 for (item = array->items; item->prog; item++) {
2591 if (item->prog == &dummy_bpf_prog.prog)
2592 continue;
2593 if (!index) {
2594 WRITE_ONCE(item->prog, prog);
2595 return 0;
2596 }
2597 index--;
2598 }
2599 return -ENOENT;
2600 }
2601
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,u64 bpf_cookie,struct bpf_prog_array ** new_array)2602 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2603 struct bpf_prog *exclude_prog,
2604 struct bpf_prog *include_prog,
2605 u64 bpf_cookie,
2606 struct bpf_prog_array **new_array)
2607 {
2608 int new_prog_cnt, carry_prog_cnt = 0;
2609 struct bpf_prog_array_item *existing, *new;
2610 struct bpf_prog_array *array;
2611 bool found_exclude = false;
2612
2613 /* Figure out how many existing progs we need to carry over to
2614 * the new array.
2615 */
2616 if (old_array) {
2617 existing = old_array->items;
2618 for (; existing->prog; existing++) {
2619 if (existing->prog == exclude_prog) {
2620 found_exclude = true;
2621 continue;
2622 }
2623 if (existing->prog != &dummy_bpf_prog.prog)
2624 carry_prog_cnt++;
2625 if (existing->prog == include_prog)
2626 return -EEXIST;
2627 }
2628 }
2629
2630 if (exclude_prog && !found_exclude)
2631 return -ENOENT;
2632
2633 /* How many progs (not NULL) will be in the new array? */
2634 new_prog_cnt = carry_prog_cnt;
2635 if (include_prog)
2636 new_prog_cnt += 1;
2637
2638 /* Do we have any prog (not NULL) in the new array? */
2639 if (!new_prog_cnt) {
2640 *new_array = NULL;
2641 return 0;
2642 }
2643
2644 /* +1 as the end of prog_array is marked with NULL */
2645 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2646 if (!array)
2647 return -ENOMEM;
2648 new = array->items;
2649
2650 /* Fill in the new prog array */
2651 if (carry_prog_cnt) {
2652 existing = old_array->items;
2653 for (; existing->prog; existing++) {
2654 if (existing->prog == exclude_prog ||
2655 existing->prog == &dummy_bpf_prog.prog)
2656 continue;
2657
2658 new->prog = existing->prog;
2659 new->bpf_cookie = existing->bpf_cookie;
2660 new++;
2661 }
2662 }
2663 if (include_prog) {
2664 new->prog = include_prog;
2665 new->bpf_cookie = bpf_cookie;
2666 new++;
2667 }
2668 new->prog = NULL;
2669 *new_array = array;
2670 return 0;
2671 }
2672
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)2673 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2674 u32 *prog_ids, u32 request_cnt,
2675 u32 *prog_cnt)
2676 {
2677 u32 cnt = 0;
2678
2679 if (array)
2680 cnt = bpf_prog_array_length(array);
2681
2682 *prog_cnt = cnt;
2683
2684 /* return early if user requested only program count or nothing to copy */
2685 if (!request_cnt || !cnt)
2686 return 0;
2687
2688 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2689 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2690 : 0;
2691 }
2692
__bpf_free_used_maps(struct bpf_prog_aux * aux,struct bpf_map ** used_maps,u32 len)2693 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2694 struct bpf_map **used_maps, u32 len)
2695 {
2696 struct bpf_map *map;
2697 bool sleepable;
2698 u32 i;
2699
2700 sleepable = aux->sleepable;
2701 for (i = 0; i < len; i++) {
2702 map = used_maps[i];
2703 if (map->ops->map_poke_untrack)
2704 map->ops->map_poke_untrack(map, aux);
2705 if (sleepable)
2706 atomic64_dec(&map->sleepable_refcnt);
2707 bpf_map_put(map);
2708 }
2709 }
2710
bpf_free_used_maps(struct bpf_prog_aux * aux)2711 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2712 {
2713 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2714 kfree(aux->used_maps);
2715 }
2716
__bpf_free_used_btfs(struct bpf_prog_aux * aux,struct btf_mod_pair * used_btfs,u32 len)2717 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2718 struct btf_mod_pair *used_btfs, u32 len)
2719 {
2720 #ifdef CONFIG_BPF_SYSCALL
2721 struct btf_mod_pair *btf_mod;
2722 u32 i;
2723
2724 for (i = 0; i < len; i++) {
2725 btf_mod = &used_btfs[i];
2726 if (btf_mod->module)
2727 module_put(btf_mod->module);
2728 btf_put(btf_mod->btf);
2729 }
2730 #endif
2731 }
2732
bpf_free_used_btfs(struct bpf_prog_aux * aux)2733 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2734 {
2735 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2736 kfree(aux->used_btfs);
2737 }
2738
bpf_prog_free_deferred(struct work_struct * work)2739 static void bpf_prog_free_deferred(struct work_struct *work)
2740 {
2741 struct bpf_prog_aux *aux;
2742 int i;
2743
2744 aux = container_of(work, struct bpf_prog_aux, work);
2745 #ifdef CONFIG_BPF_SYSCALL
2746 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2747 #endif
2748 #ifdef CONFIG_CGROUP_BPF
2749 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2750 bpf_cgroup_atype_put(aux->cgroup_atype);
2751 #endif
2752 bpf_free_used_maps(aux);
2753 bpf_free_used_btfs(aux);
2754 if (bpf_prog_is_dev_bound(aux))
2755 bpf_prog_dev_bound_destroy(aux->prog);
2756 #ifdef CONFIG_PERF_EVENTS
2757 if (aux->prog->has_callchain_buf)
2758 put_callchain_buffers();
2759 #endif
2760 if (aux->dst_trampoline)
2761 bpf_trampoline_put(aux->dst_trampoline);
2762 for (i = 0; i < aux->func_cnt; i++) {
2763 /* We can just unlink the subprog poke descriptor table as
2764 * it was originally linked to the main program and is also
2765 * released along with it.
2766 */
2767 aux->func[i]->aux->poke_tab = NULL;
2768 bpf_jit_free(aux->func[i]);
2769 }
2770 if (aux->func_cnt) {
2771 kfree(aux->func);
2772 bpf_prog_unlock_free(aux->prog);
2773 } else {
2774 bpf_jit_free(aux->prog);
2775 }
2776 }
2777
bpf_prog_free(struct bpf_prog * fp)2778 void bpf_prog_free(struct bpf_prog *fp)
2779 {
2780 struct bpf_prog_aux *aux = fp->aux;
2781
2782 if (aux->dst_prog)
2783 bpf_prog_put(aux->dst_prog);
2784 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2785 schedule_work(&aux->work);
2786 }
2787 EXPORT_SYMBOL_GPL(bpf_prog_free);
2788
2789 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2790 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2791
bpf_user_rnd_init_once(void)2792 void bpf_user_rnd_init_once(void)
2793 {
2794 prandom_init_once(&bpf_user_rnd_state);
2795 }
2796
BPF_CALL_0(bpf_user_rnd_u32)2797 BPF_CALL_0(bpf_user_rnd_u32)
2798 {
2799 /* Should someone ever have the rather unwise idea to use some
2800 * of the registers passed into this function, then note that
2801 * this function is called from native eBPF and classic-to-eBPF
2802 * transformations. Register assignments from both sides are
2803 * different, f.e. classic always sets fn(ctx, A, X) here.
2804 */
2805 struct rnd_state *state;
2806 u32 res;
2807
2808 state = &get_cpu_var(bpf_user_rnd_state);
2809 res = prandom_u32_state(state);
2810 put_cpu_var(bpf_user_rnd_state);
2811
2812 return res;
2813 }
2814
BPF_CALL_0(bpf_get_raw_cpu_id)2815 BPF_CALL_0(bpf_get_raw_cpu_id)
2816 {
2817 return raw_smp_processor_id();
2818 }
2819
2820 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2821 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2822 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2823 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2824 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2825 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2826 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2827 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2828 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2829 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2830 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2831
2832 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2833 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2834 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2835 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2836 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2837 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2838 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
2839
2840 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2841 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2842 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2843 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2844 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2845 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2846 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2847 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2848 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2849 const struct bpf_func_proto bpf_set_retval_proto __weak;
2850 const struct bpf_func_proto bpf_get_retval_proto __weak;
2851
bpf_get_trace_printk_proto(void)2852 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2853 {
2854 return NULL;
2855 }
2856
bpf_get_trace_vprintk_proto(void)2857 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2858 {
2859 return NULL;
2860 }
2861
2862 u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)2863 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2864 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2865 {
2866 return -ENOTSUPP;
2867 }
2868 EXPORT_SYMBOL_GPL(bpf_event_output);
2869
2870 /* Always built-in helper functions. */
2871 const struct bpf_func_proto bpf_tail_call_proto = {
2872 .func = NULL,
2873 .gpl_only = false,
2874 .ret_type = RET_VOID,
2875 .arg1_type = ARG_PTR_TO_CTX,
2876 .arg2_type = ARG_CONST_MAP_PTR,
2877 .arg3_type = ARG_ANYTHING,
2878 };
2879
2880 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2881 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2882 * eBPF and implicitly also cBPF can get JITed!
2883 */
bpf_int_jit_compile(struct bpf_prog * prog)2884 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2885 {
2886 return prog;
2887 }
2888
2889 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2890 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2891 */
bpf_jit_compile(struct bpf_prog * prog)2892 void __weak bpf_jit_compile(struct bpf_prog *prog)
2893 {
2894 }
2895
bpf_helper_changes_pkt_data(void * func)2896 bool __weak bpf_helper_changes_pkt_data(void *func)
2897 {
2898 return false;
2899 }
2900
2901 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2902 * analysis code and wants explicit zero extension inserted by verifier.
2903 * Otherwise, return FALSE.
2904 *
2905 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2906 * you don't override this. JITs that don't want these extra insns can detect
2907 * them using insn_is_zext.
2908 */
bpf_jit_needs_zext(void)2909 bool __weak bpf_jit_needs_zext(void)
2910 {
2911 return false;
2912 }
2913
2914 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)2915 bool __weak bpf_jit_supports_subprog_tailcalls(void)
2916 {
2917 return false;
2918 }
2919
bpf_jit_supports_kfunc_call(void)2920 bool __weak bpf_jit_supports_kfunc_call(void)
2921 {
2922 return false;
2923 }
2924
bpf_jit_supports_far_kfunc_call(void)2925 bool __weak bpf_jit_supports_far_kfunc_call(void)
2926 {
2927 return false;
2928 }
2929
2930 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2931 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2932 */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)2933 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2934 int len)
2935 {
2936 return -EFAULT;
2937 }
2938
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * addr1,void * addr2)2939 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2940 void *addr1, void *addr2)
2941 {
2942 return -ENOTSUPP;
2943 }
2944
bpf_arch_text_copy(void * dst,void * src,size_t len)2945 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2946 {
2947 return ERR_PTR(-ENOTSUPP);
2948 }
2949
bpf_arch_text_invalidate(void * dst,size_t len)2950 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
2951 {
2952 return -ENOTSUPP;
2953 }
2954
2955 #ifdef CONFIG_BPF_SYSCALL
bpf_global_ma_init(void)2956 static int __init bpf_global_ma_init(void)
2957 {
2958 int ret;
2959
2960 ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
2961 bpf_global_ma_set = !ret;
2962 return ret;
2963 }
2964 late_initcall(bpf_global_ma_init);
2965 #endif
2966
2967 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2968 EXPORT_SYMBOL(bpf_stats_enabled_key);
2969
2970 /* All definitions of tracepoints related to BPF. */
2971 #define CREATE_TRACE_POINTS
2972 #include <linux/bpf_trace.h>
2973
2974 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2975 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2976