verifier.c (30aa69e7bd9f7af3574120249eecb3726dcaf737) verifier.c (8bad74f9840f87661f20ced3dc80c84ab4fd55a1)
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but

--- 247 unchanged lines hidden (view full) ---

256 [PTR_TO_CTX] = "ctx",
257 [CONST_PTR_TO_MAP] = "map_ptr",
258 [PTR_TO_MAP_VALUE] = "map_value",
259 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
260 [PTR_TO_STACK] = "fp",
261 [PTR_TO_PACKET] = "pkt",
262 [PTR_TO_PACKET_META] = "pkt_meta",
263 [PTR_TO_PACKET_END] = "pkt_end",
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but

--- 247 unchanged lines hidden (view full) ---

256 [PTR_TO_CTX] = "ctx",
257 [CONST_PTR_TO_MAP] = "map_ptr",
258 [PTR_TO_MAP_VALUE] = "map_value",
259 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
260 [PTR_TO_STACK] = "fp",
261 [PTR_TO_PACKET] = "pkt",
262 [PTR_TO_PACKET_META] = "pkt_meta",
263 [PTR_TO_PACKET_END] = "pkt_end",
264 [PTR_TO_FLOW_KEYS] = "flow_keys",
264};
265
265};
266
267static char slot_type_char[] = {
268 [STACK_INVALID] = '?',
269 [STACK_SPILL] = 'r',
270 [STACK_MISC] = 'm',
271 [STACK_ZERO] = '0',
272};
273
266static void print_liveness(struct bpf_verifier_env *env,
267 enum bpf_reg_liveness live)
268{
269 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN))
270 verbose(env, "_");
271 if (live & REG_LIVE_READ)
272 verbose(env, "r");
273 if (live & REG_LIVE_WRITTEN)

--- 70 unchanged lines hidden (view full) ---

344 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
345 verbose(env, ",var_off=%s", tn_buf);
346 }
347 }
348 verbose(env, ")");
349 }
350 }
351 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
274static void print_liveness(struct bpf_verifier_env *env,
275 enum bpf_reg_liveness live)
276{
277 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN))
278 verbose(env, "_");
279 if (live & REG_LIVE_READ)
280 verbose(env, "r");
281 if (live & REG_LIVE_WRITTEN)

--- 70 unchanged lines hidden (view full) ---

352 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
353 verbose(env, ",var_off=%s", tn_buf);
354 }
355 }
356 verbose(env, ")");
357 }
358 }
359 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
352 if (state->stack[i].slot_type[0] == STACK_SPILL) {
353 verbose(env, " fp%d",
354 (-i - 1) * BPF_REG_SIZE);
355 print_liveness(env, state->stack[i].spilled_ptr.live);
360 char types_buf[BPF_REG_SIZE + 1];
361 bool valid = false;
362 int j;
363
364 for (j = 0; j < BPF_REG_SIZE; j++) {
365 if (state->stack[i].slot_type[j] != STACK_INVALID)
366 valid = true;
367 types_buf[j] = slot_type_char[
368 state->stack[i].slot_type[j]];
369 }
370 types_buf[BPF_REG_SIZE] = 0;
371 if (!valid)
372 continue;
373 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
374 print_liveness(env, state->stack[i].spilled_ptr.live);
375 if (state->stack[i].slot_type[0] == STACK_SPILL)
356 verbose(env, "=%s",
357 reg_type_str[state->stack[i].spilled_ptr.type]);
376 verbose(env, "=%s",
377 reg_type_str[state->stack[i].spilled_ptr.type]);
358 }
359 if (state->stack[i].slot_type[0] == STACK_ZERO)
360 verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE);
378 else
379 verbose(env, "=%s", types_buf);
361 }
362 verbose(env, "\n");
363}
364
365static int copy_stack_state(struct bpf_func_state *dst,
366 const struct bpf_func_state *src)
367{
368 if (!src->stack)

--- 6 unchanged lines hidden (view full) ---

375 memcpy(dst->stack, src->stack,
376 sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE));
377 return 0;
378}
379
380/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
381 * make it consume minimal amount of memory. check_stack_write() access from
382 * the program calls into realloc_func_state() to grow the stack size.
380 }
381 verbose(env, "\n");
382}
383
384static int copy_stack_state(struct bpf_func_state *dst,
385 const struct bpf_func_state *src)
386{
387 if (!src->stack)

--- 6 unchanged lines hidden (view full) ---

394 memcpy(dst->stack, src->stack,
395 sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE));
396 return 0;
397}
398
399/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
400 * make it consume minimal amount of memory. check_stack_write() access from
401 * the program calls into realloc_func_state() to grow the stack size.
383 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
384 * which this function copies over. It points to previous bpf_verifier_state
385 * which is never reallocated
402 * Note there is a non-zero parent pointer inside each reg of bpf_verifier_state
403 * which this function copies over. It points to corresponding reg in previous
404 * bpf_verifier_state which is never reallocated
386 */
387static int realloc_func_state(struct bpf_func_state *state, int size,
388 bool copy_old)
389{
390 u32 old_size = state->allocated_stack;
391 struct bpf_stack_state *new_stack;
392 int slot = size / BPF_REG_SIZE;
393

--- 67 unchanged lines hidden (view full) ---

461 int i, err;
462
463 /* if dst has more stack frames then src frame, free them */
464 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
465 free_func_state(dst_state->frame[i]);
466 dst_state->frame[i] = NULL;
467 }
468 dst_state->curframe = src->curframe;
405 */
406static int realloc_func_state(struct bpf_func_state *state, int size,
407 bool copy_old)
408{
409 u32 old_size = state->allocated_stack;
410 struct bpf_stack_state *new_stack;
411 int slot = size / BPF_REG_SIZE;
412

--- 67 unchanged lines hidden (view full) ---

480 int i, err;
481
482 /* if dst has more stack frames then src frame, free them */
483 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
484 free_func_state(dst_state->frame[i]);
485 dst_state->frame[i] = NULL;
486 }
487 dst_state->curframe = src->curframe;
469 dst_state->parent = src->parent;
470 for (i = 0; i <= src->curframe; i++) {
471 dst = dst_state->frame[i];
472 if (!dst) {
473 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
474 if (!dst)
475 return -ENOMEM;
476 dst_state->frame[i] = dst;
477 }

--- 70 unchanged lines hidden (view full) ---

548
549static void __mark_reg_not_init(struct bpf_reg_state *reg);
550
551/* Mark the unknown part of a register (variable offset or scalar value) as
552 * known to have the value @imm.
553 */
554static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
555{
488 for (i = 0; i <= src->curframe; i++) {
489 dst = dst_state->frame[i];
490 if (!dst) {
491 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
492 if (!dst)
493 return -ENOMEM;
494 dst_state->frame[i] = dst;
495 }

--- 70 unchanged lines hidden (view full) ---

566
567static void __mark_reg_not_init(struct bpf_reg_state *reg);
568
569/* Mark the unknown part of a register (variable offset or scalar value) as
570 * known to have the value @imm.
571 */
572static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
573{
556 reg->id = 0;
574 /* Clear id, off, and union(map_ptr, range) */
575 memset(((u8 *)reg) + sizeof(reg->type), 0,
576 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
557 reg->var_off = tnum_const(imm);
558 reg->smin_value = (s64)imm;
559 reg->smax_value = (s64)imm;
560 reg->umin_value = imm;
561 reg->umax_value = imm;
562}
563
564/* Mark the 'variable offset' part of a register as zero. This should be
565 * used only on registers holding a pointer type.
566 */
567static void __mark_reg_known_zero(struct bpf_reg_state *reg)
568{
569 __mark_reg_known(reg, 0);
570}
571
572static void __mark_reg_const_zero(struct bpf_reg_state *reg)
573{
574 __mark_reg_known(reg, 0);
577 reg->var_off = tnum_const(imm);
578 reg->smin_value = (s64)imm;
579 reg->smax_value = (s64)imm;
580 reg->umin_value = imm;
581 reg->umax_value = imm;
582}
583
584/* Mark the 'variable offset' part of a register as zero. This should be
585 * used only on registers holding a pointer type.
586 */
587static void __mark_reg_known_zero(struct bpf_reg_state *reg)
588{
589 __mark_reg_known(reg, 0);
590}
591
592static void __mark_reg_const_zero(struct bpf_reg_state *reg)
593{
594 __mark_reg_known(reg, 0);
575 reg->off = 0;
576 reg->type = SCALAR_VALUE;
577}
578
579static void mark_reg_known_zero(struct bpf_verifier_env *env,
580 struct bpf_reg_state *regs, u32 regno)
581{
582 if (WARN_ON(regno >= MAX_BPF_REG)) {
583 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);

--- 94 unchanged lines hidden (view full) ---

678 reg->smax_value = S64_MAX;
679 reg->umin_value = 0;
680 reg->umax_value = U64_MAX;
681}
682
683/* Mark a register as having a completely unknown (scalar) value. */
684static void __mark_reg_unknown(struct bpf_reg_state *reg)
685{
595 reg->type = SCALAR_VALUE;
596}
597
598static void mark_reg_known_zero(struct bpf_verifier_env *env,
599 struct bpf_reg_state *regs, u32 regno)
600{
601 if (WARN_ON(regno >= MAX_BPF_REG)) {
602 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);

--- 94 unchanged lines hidden (view full) ---

697 reg->smax_value = S64_MAX;
698 reg->umin_value = 0;
699 reg->umax_value = U64_MAX;
700}
701
702/* Mark a register as having a completely unknown (scalar) value. */
703static void __mark_reg_unknown(struct bpf_reg_state *reg)
704{
705 /*
706 * Clear type, id, off, and union(map_ptr, range) and
707 * padding between 'type' and union
708 */
709 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
686 reg->type = SCALAR_VALUE;
710 reg->type = SCALAR_VALUE;
687 reg->id = 0;
688 reg->off = 0;
689 reg->var_off = tnum_unknown;
690 reg->frameno = 0;
691 __mark_reg_unbounded(reg);
692}
693
694static void mark_reg_unknown(struct bpf_verifier_env *env,
695 struct bpf_reg_state *regs, u32 regno)
696{

--- 30 unchanged lines hidden (view full) ---

727 struct bpf_func_state *state)
728{
729 struct bpf_reg_state *regs = state->regs;
730 int i;
731
732 for (i = 0; i < MAX_BPF_REG; i++) {
733 mark_reg_not_init(env, regs, i);
734 regs[i].live = REG_LIVE_NONE;
711 reg->var_off = tnum_unknown;
712 reg->frameno = 0;
713 __mark_reg_unbounded(reg);
714}
715
716static void mark_reg_unknown(struct bpf_verifier_env *env,
717 struct bpf_reg_state *regs, u32 regno)
718{

--- 30 unchanged lines hidden (view full) ---

749 struct bpf_func_state *state)
750{
751 struct bpf_reg_state *regs = state->regs;
752 int i;
753
754 for (i = 0; i < MAX_BPF_REG; i++) {
755 mark_reg_not_init(env, regs, i);
756 regs[i].live = REG_LIVE_NONE;
757 regs[i].parent = NULL;
735 }
736
737 /* frame pointer */
738 regs[BPF_REG_FP].type = PTR_TO_STACK;
739 mark_reg_known_zero(env, regs, BPF_REG_FP);
740 regs[BPF_REG_FP].frameno = state->frameno;
741
742 /* 1st arg to a function */

--- 128 unchanged lines hidden (view full) ---

871 cur_subprog++;
872 if (cur_subprog < env->subprog_cnt)
873 subprog_end = subprog[cur_subprog + 1].start;
874 }
875 }
876 return 0;
877}
878
758 }
759
760 /* frame pointer */
761 regs[BPF_REG_FP].type = PTR_TO_STACK;
762 mark_reg_known_zero(env, regs, BPF_REG_FP);
763 regs[BPF_REG_FP].frameno = state->frameno;
764
765 /* 1st arg to a function */

--- 128 unchanged lines hidden (view full) ---

894 cur_subprog++;
895 if (cur_subprog < env->subprog_cnt)
896 subprog_end = subprog[cur_subprog + 1].start;
897 }
898 }
899 return 0;
900}
901
879static
880struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
881 const struct bpf_verifier_state *state,
882 struct bpf_verifier_state *parent,
883 u32 regno)
884{
885 struct bpf_verifier_state *tmp = NULL;
886
887 /* 'parent' could be a state of caller and
888 * 'state' could be a state of callee. In such case
889 * parent->curframe < state->curframe
890 * and it's ok for r1 - r5 registers
891 *
892 * 'parent' could be a callee's state after it bpf_exit-ed.
893 * In such case parent->curframe > state->curframe
894 * and it's ok for r0 only
895 */
896 if (parent->curframe == state->curframe ||
897 (parent->curframe < state->curframe &&
898 regno >= BPF_REG_1 && regno <= BPF_REG_5) ||
899 (parent->curframe > state->curframe &&
900 regno == BPF_REG_0))
901 return parent;
902
903 if (parent->curframe > state->curframe &&
904 regno >= BPF_REG_6) {
905 /* for callee saved regs we have to skip the whole chain
906 * of states that belong to callee and mark as LIVE_READ
907 * the registers before the call
908 */
909 tmp = parent;
910 while (tmp && tmp->curframe != state->curframe) {
911 tmp = tmp->parent;
912 }
913 if (!tmp)
914 goto bug;
915 parent = tmp;
916 } else {
917 goto bug;
918 }
919 return parent;
920bug:
921 verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
922 verbose(env, "regno %d parent frame %d current frame %d\n",
923 regno, parent->curframe, state->curframe);
924 return NULL;
925}
926
902/* Parentage chain of this register (or stack slot) should take care of all
903 * issues like callee-saved registers, stack slot allocation time, etc.
904 */
927static int mark_reg_read(struct bpf_verifier_env *env,
905static int mark_reg_read(struct bpf_verifier_env *env,
928 const struct bpf_verifier_state *state,
929 struct bpf_verifier_state *parent,
930 u32 regno)
906 const struct bpf_reg_state *state,
907 struct bpf_reg_state *parent)
931{
932 bool writes = parent == state->parent; /* Observe write marks */
933
908{
909 bool writes = parent == state->parent; /* Observe write marks */
910
934 if (regno == BPF_REG_FP)
935 /* We don't need to worry about FP liveness because it's read-only */
936 return 0;
937
938 while (parent) {
939 /* if read wasn't screened by an earlier write ... */
911 while (parent) {
912 /* if read wasn't screened by an earlier write ... */
940 if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN)
913 if (writes && state->live & REG_LIVE_WRITTEN)
941 break;
914 break;
942 parent = skip_callee(env, state, parent, regno);
943 if (!parent)
944 return -EFAULT;
945 /* ... then we depend on parent's value */
915 /* ... then we depend on parent's value */
946 parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ;
916 parent->live |= REG_LIVE_READ;
947 state = parent;
948 parent = state->parent;
949 writes = true;
950 }
951 return 0;
952}
953
954static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,

--- 9 unchanged lines hidden (view full) ---

964 }
965
966 if (t == SRC_OP) {
967 /* check whether register used as source operand can be read */
968 if (regs[regno].type == NOT_INIT) {
969 verbose(env, "R%d !read_ok\n", regno);
970 return -EACCES;
971 }
917 state = parent;
918 parent = state->parent;
919 writes = true;
920 }
921 return 0;
922}
923
924static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,

--- 9 unchanged lines hidden (view full) ---

934 }
935
936 if (t == SRC_OP) {
937 /* check whether register used as source operand can be read */
938 if (regs[regno].type == NOT_INIT) {
939 verbose(env, "R%d !read_ok\n", regno);
940 return -EACCES;
941 }
972 return mark_reg_read(env, vstate, vstate->parent, regno);
942 /* We don't need to worry about FP liveness because it's read-only */
943 if (regno != BPF_REG_FP)
944 return mark_reg_read(env, &regs[regno],
945 regs[regno].parent);
973 } else {
974 /* check whether register used as dest operand can be written to */
975 if (regno == BPF_REG_FP) {
976 verbose(env, "frame pointer is read only\n");
977 return -EACCES;
978 }
979 regs[regno].live |= REG_LIVE_WRITTEN;
980 if (t == DST_OP)

--- 7 unchanged lines hidden (view full) ---

988 switch (type) {
989 case PTR_TO_MAP_VALUE:
990 case PTR_TO_MAP_VALUE_OR_NULL:
991 case PTR_TO_STACK:
992 case PTR_TO_CTX:
993 case PTR_TO_PACKET:
994 case PTR_TO_PACKET_META:
995 case PTR_TO_PACKET_END:
946 } else {
947 /* check whether register used as dest operand can be written to */
948 if (regno == BPF_REG_FP) {
949 verbose(env, "frame pointer is read only\n");
950 return -EACCES;
951 }
952 regs[regno].live |= REG_LIVE_WRITTEN;
953 if (t == DST_OP)

--- 7 unchanged lines hidden (view full) ---

961 switch (type) {
962 case PTR_TO_MAP_VALUE:
963 case PTR_TO_MAP_VALUE_OR_NULL:
964 case PTR_TO_STACK:
965 case PTR_TO_CTX:
966 case PTR_TO_PACKET:
967 case PTR_TO_PACKET_META:
968 case PTR_TO_PACKET_END:
969 case PTR_TO_FLOW_KEYS:
996 case CONST_PTR_TO_MAP:
997 return true;
998 default:
999 return false;
1000 }
1001}
1002
1003/* Does this register contain a constant zero? */

--- 71 unchanged lines hidden (view full) ---

1075 }
1076 *poff = soff;
1077 }
1078 state->stack[spi].slot_type[i] = STACK_SPILL;
1079 }
1080 } else {
1081 u8 type = STACK_MISC;
1082
970 case CONST_PTR_TO_MAP:
971 return true;
972 default:
973 return false;
974 }
975}
976
977/* Does this register contain a constant zero? */

--- 71 unchanged lines hidden (view full) ---

1049 }
1050 *poff = soff;
1051 }
1052 state->stack[spi].slot_type[i] = STACK_SPILL;
1053 }
1054 } else {
1055 u8 type = STACK_MISC;
1056
1083 /* regular write of data into stack */
1084 state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
1057 /* regular write of data into stack destroys any spilled ptr */
1058 state->stack[spi].spilled_ptr.type = NOT_INIT;
1085
1086 /* only mark the slot as written if all 8 bytes were written
1087 * otherwise read propagation may incorrectly stop too soon
1088 * when stack slots are partially written.
1089 * This heuristic means that read propagation will be
1090 * conservative, since it will add reg_live_read marks
1091 * to stack slots all the way to first state when programs
1092 * writes+reads less than 8 bytes

--- 8 unchanged lines hidden (view full) ---

1101
1102 for (i = 0; i < size; i++)
1103 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
1104 type;
1105 }
1106 return 0;
1107}
1108
1059
1060 /* only mark the slot as written if all 8 bytes were written
1061 * otherwise read propagation may incorrectly stop too soon
1062 * when stack slots are partially written.
1063 * This heuristic means that read propagation will be
1064 * conservative, since it will add reg_live_read marks
1065 * to stack slots all the way to first state when programs
1066 * writes+reads less than 8 bytes

--- 8 unchanged lines hidden (view full) ---

1075
1076 for (i = 0; i < size; i++)
1077 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
1078 type;
1079 }
1080 return 0;
1081}
1082
1109/* registers of every function are unique and mark_reg_read() propagates
1110 * the liveness in the following cases:
1111 * - from callee into caller for R1 - R5 that were used as arguments
1112 * - from caller into callee for R0 that used as result of the call
1113 * - from caller to the same caller skipping states of the callee for R6 - R9,
1114 * since R6 - R9 are callee saved by implicit function prologue and
1115 * caller's R6 != callee's R6, so when we propagate liveness up to
1116 * parent states we need to skip callee states for R6 - R9.
1117 *
1118 * stack slot marking is different, since stacks of caller and callee are
1119 * accessible in both (since caller can pass a pointer to caller's stack to
1120 * callee which can pass it to another function), hence mark_stack_slot_read()
1121 * has to propagate the stack liveness to all parent states at given frame number.
1122 * Consider code:
1123 * f1() {
1124 * ptr = fp - 8;
1125 * *ptr = ctx;
1126 * call f2 {
1127 * .. = *ptr;
1128 * }
1129 * .. = *ptr;
1130 * }
1131 * First *ptr is reading from f1's stack and mark_stack_slot_read() has
1132 * to mark liveness at the f1's frame and not f2's frame.
1133 * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has
1134 * to propagate liveness to f2 states at f1's frame level and further into
1135 * f1 states at f1's frame level until write into that stack slot
1136 */
1137static void mark_stack_slot_read(struct bpf_verifier_env *env,
1138 const struct bpf_verifier_state *state,
1139 struct bpf_verifier_state *parent,
1140 int slot, int frameno)
1141{
1142 bool writes = parent == state->parent; /* Observe write marks */
1143
1144 while (parent) {
1145 if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE)
1146 /* since LIVE_WRITTEN mark is only done for full 8-byte
1147 * write the read marks are conservative and parent
1148 * state may not even have the stack allocated. In such case
1149 * end the propagation, since the loop reached beginning
1150 * of the function
1151 */
1152 break;
1153 /* if read wasn't screened by an earlier write ... */
1154 if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
1155 break;
1156 /* ... then we depend on parent's value */
1157 parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
1158 state = parent;
1159 parent = state->parent;
1160 writes = true;
1161 }
1162}
1163
1164static int check_stack_read(struct bpf_verifier_env *env,
1165 struct bpf_func_state *reg_state /* func where register points to */,
1166 int off, int size, int value_regno)
1167{
1168 struct bpf_verifier_state *vstate = env->cur_state;
1169 struct bpf_func_state *state = vstate->frame[vstate->curframe];
1170 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
1171 u8 *stype;

--- 21 unchanged lines hidden (view full) ---

1193 /* restore register state from stack */
1194 state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
1195 /* mark reg as written since spilled pointer state likely
1196 * has its liveness marks cleared by is_state_visited()
1197 * which resets stack/reg liveness for state transitions
1198 */
1199 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1200 }
1083static int check_stack_read(struct bpf_verifier_env *env,
1084 struct bpf_func_state *reg_state /* func where register points to */,
1085 int off, int size, int value_regno)
1086{
1087 struct bpf_verifier_state *vstate = env->cur_state;
1088 struct bpf_func_state *state = vstate->frame[vstate->curframe];
1089 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
1090 u8 *stype;

--- 21 unchanged lines hidden (view full) ---

1112 /* restore register state from stack */
1113 state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
1114 /* mark reg as written since spilled pointer state likely
1115 * has its liveness marks cleared by is_state_visited()
1116 * which resets stack/reg liveness for state transitions
1117 */
1118 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1119 }
1201 mark_stack_slot_read(env, vstate, vstate->parent, spi,
1202 reg_state->frameno);
1120 mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
1121 reg_state->stack[spi].spilled_ptr.parent);
1203 return 0;
1204 } else {
1205 int zeros = 0;
1206
1207 for (i = 0; i < size; i++) {
1208 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
1209 continue;
1210 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
1211 zeros++;
1212 continue;
1213 }
1214 verbose(env, "invalid read from stack off %d+%d size %d\n",
1215 off, i, size);
1216 return -EACCES;
1217 }
1122 return 0;
1123 } else {
1124 int zeros = 0;
1125
1126 for (i = 0; i < size; i++) {
1127 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
1128 continue;
1129 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
1130 zeros++;
1131 continue;
1132 }
1133 verbose(env, "invalid read from stack off %d+%d size %d\n",
1134 off, i, size);
1135 return -EACCES;
1136 }
1218 mark_stack_slot_read(env, vstate, vstate->parent, spi,
1219 reg_state->frameno);
1137 mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
1138 reg_state->stack[spi].spilled_ptr.parent);
1220 if (value_regno >= 0) {
1221 if (zeros == size) {
1222 /* any size read into register is zero extended,
1223 * so the whole register == const_zero
1224 */
1225 __mark_reg_const_zero(&state->regs[value_regno]);
1226 } else {
1227 /* have read misc data from the stack */

--- 88 unchanged lines hidden (view full) ---

1316 return false;
1317 /* fallthrough */
1318 case BPF_PROG_TYPE_SCHED_CLS:
1319 case BPF_PROG_TYPE_SCHED_ACT:
1320 case BPF_PROG_TYPE_XDP:
1321 case BPF_PROG_TYPE_LWT_XMIT:
1322 case BPF_PROG_TYPE_SK_SKB:
1323 case BPF_PROG_TYPE_SK_MSG:
1139 if (value_regno >= 0) {
1140 if (zeros == size) {
1141 /* any size read into register is zero extended,
1142 * so the whole register == const_zero
1143 */
1144 __mark_reg_const_zero(&state->regs[value_regno]);
1145 } else {
1146 /* have read misc data from the stack */

--- 88 unchanged lines hidden (view full) ---

1235 return false;
1236 /* fallthrough */
1237 case BPF_PROG_TYPE_SCHED_CLS:
1238 case BPF_PROG_TYPE_SCHED_ACT:
1239 case BPF_PROG_TYPE_XDP:
1240 case BPF_PROG_TYPE_LWT_XMIT:
1241 case BPF_PROG_TYPE_SK_SKB:
1242 case BPF_PROG_TYPE_SK_MSG:
1243 case BPF_PROG_TYPE_FLOW_DISSECTOR:
1324 if (meta)
1325 return meta->pkt_access;
1326
1327 env->seen_direct_write = true;
1328 return true;
1329 default:
1330 return false;
1331 }

--- 67 unchanged lines hidden (view full) ---

1399 env->prog->aux->max_ctx_offset = off + size;
1400 return 0;
1401 }
1402
1403 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
1404 return -EACCES;
1405}
1406
1244 if (meta)
1245 return meta->pkt_access;
1246
1247 env->seen_direct_write = true;
1248 return true;
1249 default:
1250 return false;
1251 }

--- 67 unchanged lines hidden (view full) ---

1319 env->prog->aux->max_ctx_offset = off + size;
1320 return 0;
1321 }
1322
1323 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
1324 return -EACCES;
1325}
1326
1327static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
1328 int size)
1329{
1330 if (size < 0 || off < 0 ||
1331 (u64)off + size > sizeof(struct bpf_flow_keys)) {
1332 verbose(env, "invalid access to flow keys off=%d size=%d\n",
1333 off, size);
1334 return -EACCES;
1335 }
1336 return 0;
1337}
1338
1407static bool __is_pointer_value(bool allow_ptr_leaks,
1408 const struct bpf_reg_state *reg)
1409{
1410 if (allow_ptr_leaks)
1411 return false;
1412
1413 return reg->type != SCALAR_VALUE;
1414}

--- 85 unchanged lines hidden (view full) ---

1500
1501 switch (reg->type) {
1502 case PTR_TO_PACKET:
1503 case PTR_TO_PACKET_META:
1504 /* Special case, because of NET_IP_ALIGN. Given metadata sits
1505 * right in front, treat it the very same way.
1506 */
1507 return check_pkt_ptr_alignment(env, reg, off, size, strict);
1339static bool __is_pointer_value(bool allow_ptr_leaks,
1340 const struct bpf_reg_state *reg)
1341{
1342 if (allow_ptr_leaks)
1343 return false;
1344
1345 return reg->type != SCALAR_VALUE;
1346}

--- 85 unchanged lines hidden (view full) ---

1432
1433 switch (reg->type) {
1434 case PTR_TO_PACKET:
1435 case PTR_TO_PACKET_META:
1436 /* Special case, because of NET_IP_ALIGN. Given metadata sits
1437 * right in front, treat it the very same way.
1438 */
1439 return check_pkt_ptr_alignment(env, reg, off, size, strict);
1440 case PTR_TO_FLOW_KEYS:
1441 pointer_desc = "flow keys ";
1442 break;
1508 case PTR_TO_MAP_VALUE:
1509 pointer_desc = "value ";
1510 break;
1511 case PTR_TO_CTX:
1512 pointer_desc = "context ";
1513 break;
1514 case PTR_TO_STACK:
1515 pointer_desc = "stack ";

--- 206 unchanged lines hidden (view full) ---

1722 * PTR_TO_PACKET[_META,_END]. In the latter
1723 * case, we know the offset is zero.
1724 */
1725 if (reg_type == SCALAR_VALUE)
1726 mark_reg_unknown(env, regs, value_regno);
1727 else
1728 mark_reg_known_zero(env, regs,
1729 value_regno);
1443 case PTR_TO_MAP_VALUE:
1444 pointer_desc = "value ";
1445 break;
1446 case PTR_TO_CTX:
1447 pointer_desc = "context ";
1448 break;
1449 case PTR_TO_STACK:
1450 pointer_desc = "stack ";

--- 206 unchanged lines hidden (view full) ---

1657 * PTR_TO_PACKET[_META,_END]. In the latter
1658 * case, we know the offset is zero.
1659 */
1660 if (reg_type == SCALAR_VALUE)
1661 mark_reg_unknown(env, regs, value_regno);
1662 else
1663 mark_reg_known_zero(env, regs,
1664 value_regno);
1730 regs[value_regno].id = 0;
1731 regs[value_regno].off = 0;
1732 regs[value_regno].range = 0;
1733 regs[value_regno].type = reg_type;
1734 }
1735
1736 } else if (reg->type == PTR_TO_STACK) {
1737 /* stack accesses must be at a fixed offset, so that we can
1738 * determine what type of data were returned.
1739 * See check_stack_read().
1740 */

--- 32 unchanged lines hidden (view full) ---

1773 is_pointer_value(env, value_regno)) {
1774 verbose(env, "R%d leaks addr into packet\n",
1775 value_regno);
1776 return -EACCES;
1777 }
1778 err = check_packet_access(env, regno, off, size, false);
1779 if (!err && t == BPF_READ && value_regno >= 0)
1780 mark_reg_unknown(env, regs, value_regno);
1665 regs[value_regno].type = reg_type;
1666 }
1667
1668 } else if (reg->type == PTR_TO_STACK) {
1669 /* stack accesses must be at a fixed offset, so that we can
1670 * determine what type of data were returned.
1671 * See check_stack_read().
1672 */

--- 32 unchanged lines hidden (view full) ---

1705 is_pointer_value(env, value_regno)) {
1706 verbose(env, "R%d leaks addr into packet\n",
1707 value_regno);
1708 return -EACCES;
1709 }
1710 err = check_packet_access(env, regno, off, size, false);
1711 if (!err && t == BPF_READ && value_regno >= 0)
1712 mark_reg_unknown(env, regs, value_regno);
1713 } else if (reg->type == PTR_TO_FLOW_KEYS) {
1714 if (t == BPF_WRITE && value_regno >= 0 &&
1715 is_pointer_value(env, value_regno)) {
1716 verbose(env, "R%d leaks addr into flow keys\n",
1717 value_regno);
1718 return -EACCES;
1719 }
1720
1721 err = check_flow_keys_access(env, off, size);
1722 if (!err && t == BPF_READ && value_regno >= 0)
1723 mark_reg_unknown(env, regs, value_regno);
1781 } else {
1782 verbose(env, "R%d invalid mem access '%s'\n", regno,
1783 reg_type_str[reg->type]);
1784 return -EACCES;
1785 }
1786
1787 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1788 regs[value_regno].type == SCALAR_VALUE) {

--- 114 unchanged lines hidden (view full) ---

1903err:
1904 verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
1905 off, i, access_size);
1906 return -EACCES;
1907mark:
1908 /* reading any byte out of 8-byte 'spill_slot' will cause
1909 * the whole slot to be marked as 'read'
1910 */
1724 } else {
1725 verbose(env, "R%d invalid mem access '%s'\n", regno,
1726 reg_type_str[reg->type]);
1727 return -EACCES;
1728 }
1729
1730 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1731 regs[value_regno].type == SCALAR_VALUE) {

--- 114 unchanged lines hidden (view full) ---

1846err:
1847 verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
1848 off, i, access_size);
1849 return -EACCES;
1850mark:
1851 /* reading any byte out of 8-byte 'spill_slot' will cause
1852 * the whole slot to be marked as 'read'
1853 */
1911 mark_stack_slot_read(env, env->cur_state, env->cur_state->parent,
1912 spi, state->frameno);
1854 mark_reg_read(env, &state->stack[spi].spilled_ptr,
1855 state->stack[spi].spilled_ptr.parent);
1913 }
1914 return update_stack_depth(env, state, off);
1915}
1916
1917static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1918 int access_size, bool zero_size_allowed,
1919 struct bpf_call_arg_meta *meta)
1920{
1921 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1922
1923 switch (reg->type) {
1924 case PTR_TO_PACKET:
1925 case PTR_TO_PACKET_META:
1926 return check_packet_access(env, regno, reg->off, access_size,
1927 zero_size_allowed);
1856 }
1857 return update_stack_depth(env, state, off);
1858}
1859
1860static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1861 int access_size, bool zero_size_allowed,
1862 struct bpf_call_arg_meta *meta)
1863{
1864 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1865
1866 switch (reg->type) {
1867 case PTR_TO_PACKET:
1868 case PTR_TO_PACKET_META:
1869 return check_packet_access(env, regno, reg->off, access_size,
1870 zero_size_allowed);
1871 case PTR_TO_FLOW_KEYS:
1872 return check_flow_keys_access(env, reg->off, access_size);
1928 case PTR_TO_MAP_VALUE:
1929 return check_map_access(env, regno, reg->off, access_size,
1930 zero_size_allowed);
1931 default: /* scalar_value|ptr_to_stack or invalid ptr */
1932 return check_stack_boundary(env, regno, access_size,
1933 zero_size_allowed, meta);
1934 }
1935}

--- 425 unchanged lines hidden (view full) ---

2361 * callee can read/write into caller's stack
2362 */
2363 init_func_state(env, callee,
2364 /* remember the callsite, it will be used by bpf_exit */
2365 *insn_idx /* callsite */,
2366 state->curframe + 1 /* frameno within this callchain */,
2367 subprog /* subprog number within this prog */);
2368
1873 case PTR_TO_MAP_VALUE:
1874 return check_map_access(env, regno, reg->off, access_size,
1875 zero_size_allowed);
1876 default: /* scalar_value|ptr_to_stack or invalid ptr */
1877 return check_stack_boundary(env, regno, access_size,
1878 zero_size_allowed, meta);
1879 }
1880}

--- 425 unchanged lines hidden (view full) ---

2306 * callee can read/write into caller's stack
2307 */
2308 init_func_state(env, callee,
2309 /* remember the callsite, it will be used by bpf_exit */
2310 *insn_idx /* callsite */,
2311 state->curframe + 1 /* frameno within this callchain */,
2312 subprog /* subprog number within this prog */);
2313
2369 /* copy r1 - r5 args that callee can access */
2314 /* copy r1 - r5 args that callee can access. The copy includes parent
2315 * pointers, which connects us up to the liveness chain
2316 */
2370 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
2371 callee->regs[i] = caller->regs[i];
2372
2317 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
2318 callee->regs[i] = caller->regs[i];
2319
2373 /* after the call regsiters r0 - r5 were scratched */
2320 /* after the call registers r0 - r5 were scratched */
2374 for (i = 0; i < CALLER_SAVED_REGS; i++) {
2375 mark_reg_not_init(env, caller->regs, caller_saved[i]);
2376 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2377 }
2378
2379 /* only increment it after check_reg_arg() finished */
2380 state->curframe++;
2381

--- 193 unchanged lines hidden (view full) ---

2575 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
2576 fn->ret_type == RET_PTR_TO_MAP_VALUE) {
2577 if (fn->ret_type == RET_PTR_TO_MAP_VALUE)
2578 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
2579 else
2580 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
2581 /* There is no offset yet applied, variable or fixed */
2582 mark_reg_known_zero(env, regs, BPF_REG_0);
2321 for (i = 0; i < CALLER_SAVED_REGS; i++) {
2322 mark_reg_not_init(env, caller->regs, caller_saved[i]);
2323 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2324 }
2325
2326 /* only increment it after check_reg_arg() finished */
2327 state->curframe++;
2328

--- 193 unchanged lines hidden (view full) ---

2522 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
2523 fn->ret_type == RET_PTR_TO_MAP_VALUE) {
2524 if (fn->ret_type == RET_PTR_TO_MAP_VALUE)
2525 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
2526 else
2527 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
2528 /* There is no offset yet applied, variable or fixed */
2529 mark_reg_known_zero(env, regs, BPF_REG_0);
2583 regs[BPF_REG_0].off = 0;
2584 /* remember map_ptr, so that check_map_access()
2585 * can check 'value_size' boundary of memory access
2586 * to map element returned from bpf_map_lookup_elem()
2587 */
2588 if (meta.map_ptr == NULL) {
2589 verbose(env,
2590 "kernel subsystem misconfigured verifier\n");
2591 return -EINVAL;

--- 1773 unchanged lines hidden (view full) ---

4365 struct idpair *idmap)
4366{
4367 bool equal;
4368
4369 if (!(rold->live & REG_LIVE_READ))
4370 /* explored state didn't use this */
4371 return true;
4372
2530 /* remember map_ptr, so that check_map_access()
2531 * can check 'value_size' boundary of memory access
2532 * to map element returned from bpf_map_lookup_elem()
2533 */
2534 if (meta.map_ptr == NULL) {
2535 verbose(env,
2536 "kernel subsystem misconfigured verifier\n");
2537 return -EINVAL;

--- 1773 unchanged lines hidden (view full) ---

4311 struct idpair *idmap)
4312{
4313 bool equal;
4314
4315 if (!(rold->live & REG_LIVE_READ))
4316 /* explored state didn't use this */
4317 return true;
4318
4373 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0;
4319 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
4374
4375 if (rold->type == PTR_TO_STACK)
4376 /* two stack pointers are equal only if they're pointing to
4377 * the same stack frame, since fp-8 in foo != fp-8 in bar
4378 */
4379 return equal && rold->frameno == rcur->frameno;
4380
4381 if (equal)

--- 64 unchanged lines hidden (view full) ---

4446 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
4447 return false;
4448 /* new val must satisfy old val knowledge */
4449 return range_within(rold, rcur) &&
4450 tnum_in(rold->var_off, rcur->var_off);
4451 case PTR_TO_CTX:
4452 case CONST_PTR_TO_MAP:
4453 case PTR_TO_PACKET_END:
4320
4321 if (rold->type == PTR_TO_STACK)
4322 /* two stack pointers are equal only if they're pointing to
4323 * the same stack frame, since fp-8 in foo != fp-8 in bar
4324 */
4325 return equal && rold->frameno == rcur->frameno;
4326
4327 if (equal)

--- 64 unchanged lines hidden (view full) ---

4392 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
4393 return false;
4394 /* new val must satisfy old val knowledge */
4395 return range_within(rold, rcur) &&
4396 tnum_in(rold->var_off, rcur->var_off);
4397 case PTR_TO_CTX:
4398 case CONST_PTR_TO_MAP:
4399 case PTR_TO_PACKET_END:
4400 case PTR_TO_FLOW_KEYS:
4454 /* Only valid matches are exact, which memcmp() above
4455 * would have accepted
4456 */
4457 default:
4458 /* Don't know what's going on, just say it's not safe */
4459 return false;
4460 }
4461

--- 136 unchanged lines hidden (view full) ---

4598 return true;
4599}
4600
4601/* A write screens off any subsequent reads; but write marks come from the
4602 * straight-line code between a state and its parent. When we arrive at an
4603 * equivalent state (jump target or such) we didn't arrive by the straight-line
4604 * code, so read marks in the state must propagate to the parent regardless
4605 * of the state's write marks. That's what 'parent == state->parent' comparison
4401 /* Only valid matches are exact, which memcmp() above
4402 * would have accepted
4403 */
4404 default:
4405 /* Don't know what's going on, just say it's not safe */
4406 return false;
4407 }
4408

--- 136 unchanged lines hidden (view full) ---

4545 return true;
4546}
4547
4548/* A write screens off any subsequent reads; but write marks come from the
4549 * straight-line code between a state and its parent. When we arrive at an
4550 * equivalent state (jump target or such) we didn't arrive by the straight-line
4551 * code, so read marks in the state must propagate to the parent regardless
4552 * of the state's write marks. That's what 'parent == state->parent' comparison
4606 * in mark_reg_read() and mark_stack_slot_read() is for.
4553 * in mark_reg_read() is for.
4607 */
4608static int propagate_liveness(struct bpf_verifier_env *env,
4609 const struct bpf_verifier_state *vstate,
4610 struct bpf_verifier_state *vparent)
4611{
4612 int i, frame, err = 0;
4613 struct bpf_func_state *state, *parent;
4614

--- 4 unchanged lines hidden (view full) ---

4619 }
4620 /* Propagate read liveness of registers... */
4621 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
4622 /* We don't need to worry about FP liveness because it's read-only */
4623 for (i = 0; i < BPF_REG_FP; i++) {
4624 if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
4625 continue;
4626 if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
4554 */
4555static int propagate_liveness(struct bpf_verifier_env *env,
4556 const struct bpf_verifier_state *vstate,
4557 struct bpf_verifier_state *vparent)
4558{
4559 int i, frame, err = 0;
4560 struct bpf_func_state *state, *parent;
4561

--- 4 unchanged lines hidden (view full) ---

4566 }
4567 /* Propagate read liveness of registers... */
4568 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
4569 /* We don't need to worry about FP liveness because it's read-only */
4570 for (i = 0; i < BPF_REG_FP; i++) {
4571 if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
4572 continue;
4573 if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
4627 err = mark_reg_read(env, vstate, vparent, i);
4574 err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i],
4575 &vparent->frame[vstate->curframe]->regs[i]);
4628 if (err)
4629 return err;
4630 }
4631 }
4632
4633 /* ... and stack slots */
4634 for (frame = 0; frame <= vstate->curframe; frame++) {
4635 state = vstate->frame[frame];
4636 parent = vparent->frame[frame];
4637 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
4638 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
4639 if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
4640 continue;
4641 if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
4576 if (err)
4577 return err;
4578 }
4579 }
4580
4581 /* ... and stack slots */
4582 for (frame = 0; frame <= vstate->curframe; frame++) {
4583 state = vstate->frame[frame];
4584 parent = vparent->frame[frame];
4585 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
4586 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
4587 if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
4588 continue;
4589 if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
4642 mark_stack_slot_read(env, vstate, vparent, i, frame);
4590 mark_reg_read(env, &state->stack[i].spilled_ptr,
4591 &parent->stack[i].spilled_ptr);
4643 }
4644 }
4645 return err;
4646}
4647
4648static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
4649{
4650 struct bpf_verifier_state_list *new_sl;
4651 struct bpf_verifier_state_list *sl;
4592 }
4593 }
4594 return err;
4595}
4596
4597static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
4598{
4599 struct bpf_verifier_state_list *new_sl;
4600 struct bpf_verifier_state_list *sl;
4652 struct bpf_verifier_state *cur = env->cur_state;
4601 struct bpf_verifier_state *cur = env->cur_state, *new;
4653 int i, j, err;
4654
4655 sl = env->explored_states[insn_idx];
4656 if (!sl)
4657 /* this 'insn_idx' instruction wasn't marked, so we will not
4658 * be doing state search here
4659 */
4660 return 0;

--- 25 unchanged lines hidden (view full) ---

4686 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
4687 * again on the way to bpf_exit
4688 */
4689 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
4690 if (!new_sl)
4691 return -ENOMEM;
4692
4693 /* add new state to the head of linked list */
4602 int i, j, err;
4603
4604 sl = env->explored_states[insn_idx];
4605 if (!sl)
4606 /* this 'insn_idx' instruction wasn't marked, so we will not
4607 * be doing state search here
4608 */
4609 return 0;

--- 25 unchanged lines hidden (view full) ---

4635 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
4636 * again on the way to bpf_exit
4637 */
4638 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
4639 if (!new_sl)
4640 return -ENOMEM;
4641
4642 /* add new state to the head of linked list */
4694 err = copy_verifier_state(&new_sl->state, cur);
4643 new = &new_sl->state;
4644 err = copy_verifier_state(new, cur);
4695 if (err) {
4645 if (err) {
4696 free_verifier_state(&new_sl->state, false);
4646 free_verifier_state(new, false);
4697 kfree(new_sl);
4698 return err;
4699 }
4700 new_sl->next = env->explored_states[insn_idx];
4701 env->explored_states[insn_idx] = new_sl;
4702 /* connect new state to parentage chain */
4647 kfree(new_sl);
4648 return err;
4649 }
4650 new_sl->next = env->explored_states[insn_idx];
4651 env->explored_states[insn_idx] = new_sl;
4652 /* connect new state to parentage chain */
4703 cur->parent = &new_sl->state;
4653 for (i = 0; i < BPF_REG_FP; i++)
4654 cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i];
4704 /* clear write marks in current state: the writes we did are not writes
4705 * our child did, so they don't screen off its reads from us.
4706 * (There are no read marks in current state, because reads always mark
4707 * their parent and current state never has children yet. Only
4708 * explored_states can get read marks.)
4709 */
4710 for (i = 0; i < BPF_REG_FP; i++)
4711 cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
4712
4713 /* all stack frames are accessible from callee, clear them all */
4714 for (j = 0; j <= cur->curframe; j++) {
4715 struct bpf_func_state *frame = cur->frame[j];
4655 /* clear write marks in current state: the writes we did are not writes
4656 * our child did, so they don't screen off its reads from us.
4657 * (There are no read marks in current state, because reads always mark
4658 * their parent and current state never has children yet. Only
4659 * explored_states can get read marks.)
4660 */
4661 for (i = 0; i < BPF_REG_FP; i++)
4662 cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
4663
4664 /* all stack frames are accessible from callee, clear them all */
4665 for (j = 0; j <= cur->curframe; j++) {
4666 struct bpf_func_state *frame = cur->frame[j];
4667 struct bpf_func_state *newframe = new->frame[j];
4716
4668
4717 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++)
4669 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
4718 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
4670 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
4671 frame->stack[i].spilled_ptr.parent =
4672 &newframe->stack[i].spilled_ptr;
4673 }
4719 }
4720 return 0;
4721}
4722
4723static int do_check(struct bpf_verifier_env *env)
4724{
4725 struct bpf_verifier_state *state;
4726 struct bpf_insn *insns = env->prog->insnsi;
4727 struct bpf_reg_state *regs;
4728 int insn_cnt = env->prog->len, i;
4729 int insn_idx, prev_insn_idx = 0;
4730 int insn_processed = 0;
4731 bool do_print_state = false;
4732
4733 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
4734 if (!state)
4735 return -ENOMEM;
4736 state->curframe = 0;
4674 }
4675 return 0;
4676}
4677
4678static int do_check(struct bpf_verifier_env *env)
4679{
4680 struct bpf_verifier_state *state;
4681 struct bpf_insn *insns = env->prog->insnsi;
4682 struct bpf_reg_state *regs;
4683 int insn_cnt = env->prog->len, i;
4684 int insn_idx, prev_insn_idx = 0;
4685 int insn_processed = 0;
4686 bool do_print_state = false;
4687
4688 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
4689 if (!state)
4690 return -ENOMEM;
4691 state->curframe = 0;
4737 state->parent = NULL;
4738 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
4739 if (!state->frame[0]) {
4740 kfree(state);
4741 return -ENOMEM;
4742 }
4743 env->cur_state = state;
4744 init_func_state(env, state->frame[0],
4745 BPF_MAIN_FUNC /* callsite */,

--- 466 unchanged lines hidden (view full) ---

5212 * These pointers will be used later by verifier to validate map access.
5213 */
5214 return 0;
5215}
5216
5217/* drop refcnt of maps used by the rejected program */
5218static void release_maps(struct bpf_verifier_env *env)
5219{
4692 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
4693 if (!state->frame[0]) {
4694 kfree(state);
4695 return -ENOMEM;
4696 }
4697 env->cur_state = state;
4698 init_func_state(env, state->frame[0],
4699 BPF_MAIN_FUNC /* callsite */,

--- 466 unchanged lines hidden (view full) ---

5166 * These pointers will be used later by verifier to validate map access.
5167 */
5168 return 0;
5169}
5170
5171/* drop refcnt of maps used by the rejected program */
5172static void release_maps(struct bpf_verifier_env *env)
5173{
5174 enum bpf_cgroup_storage_type stype;
5220 int i;
5221
5175 int i;
5176
5222 if (env->prog->aux->cgroup_storage)
5177 for_each_cgroup_storage_type(stype) {
5178 if (!env->prog->aux->cgroup_storage[stype])
5179 continue;
5223 bpf_cgroup_storage_release(env->prog,
5180 bpf_cgroup_storage_release(env->prog,
5224 env->prog->aux->cgroup_storage);
5181 env->prog->aux->cgroup_storage[stype]);
5182 }
5225
5226 for (i = 0; i < env->used_map_cnt; i++)
5227 bpf_map_put(env->used_maps[i]);
5228}
5229
5230/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
5231static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
5232{

--- 794 unchanged lines hidden ---
5183
5184 for (i = 0; i < env->used_map_cnt; i++)
5185 bpf_map_put(env->used_maps[i]);
5186}
5187
5188/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
5189static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
5190{

--- 794 unchanged lines hidden ---