Lines Matching refs:BPF_REG_5
2091 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
3801 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { in backtrack_insn()
3827 for (i = BPF_REG_1; i <= BPF_REG_5; i++) in backtrack_insn()
3861 for (i = BPF_REG_1; i <= BPF_REG_5; i++) in backtrack_insn()
7383 WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5); in check_kfunc_mem_size_reg()
9400 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in map_set_for_each_callback_args()
9413 for (i = BPF_REG_1; i <= BPF_REG_5; i++) in set_callee_state()
9463 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_loop_callback_state()
9494 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_timer_callback_state()
9522 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_find_vma_callback_state()
9544 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_user_ringbuf_callback_state()
9577 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_rbtree_add_callback_state()
9852 struct bpf_reg_state *data_len_reg = ®s[BPF_REG_5]; in check_bpf_snprintf_call()
18855 node_offset_reg = BPF_REG_5; in fixup_kfunc_call()
19145 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC); in do_misc_fixups()
19147 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL); in do_misc_fixups()
19604 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { in do_check_common()