1 /* 2 * Copyright (C) 2016 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "NFP net bpf: " fmt 35 36 #include <linux/bpf.h> 37 #include <linux/bpf_verifier.h> 38 #include <linux/kernel.h> 39 #include <linux/pkt_cls.h> 40 41 #include "main.h" 42 43 static struct nfp_insn_meta * 44 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 45 unsigned int insn_idx, unsigned int n_insns) 46 { 47 unsigned int forward, backward, i; 48 49 backward = meta->n - insn_idx; 50 forward = insn_idx - meta->n; 51 52 if (min(forward, backward) > n_insns - insn_idx - 1) { 53 backward = n_insns - insn_idx - 1; 54 meta = nfp_prog_last_meta(nfp_prog); 55 } 56 if (min(forward, backward) > insn_idx && backward > insn_idx) { 57 forward = insn_idx; 58 meta = nfp_prog_first_meta(nfp_prog); 59 } 60 61 if (forward < backward) 62 for (i = 0; i < forward; i++) 63 meta = nfp_meta_next(meta); 64 else 65 for (i = 0; i < backward; i++) 66 meta = nfp_meta_prev(meta); 67 68 return meta; 69 } 70 71 static int 72 nfp_bpf_check_exit(struct nfp_prog *nfp_prog, 73 struct bpf_verifier_env *env) 74 { 75 const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0; 76 u64 imm; 77 78 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 79 return 0; 80 81 if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) { 82 char tn_buf[48]; 83 84 tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off); 85 pr_info("unsupported exit state: %d, var_off: %s\n", 86 reg0->type, tn_buf); 87 return -EINVAL; 88 } 89 90 imm = reg0->var_off.value; 91 if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS && 92 imm <= TC_ACT_REDIRECT && 93 imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN && 94 imm != TC_ACT_QUEUED) { 95 pr_info("unsupported exit state: %d, imm: %llx\n", 96 reg0->type, imm); 97 return -EINVAL; 98 } 99 100 return 0; 101 } 102 103 static int 104 nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, 105 struct nfp_insn_meta *meta, 106 const struct bpf_reg_state *reg) 107 { 108 s32 old_off, new_off; 109 110 if (!tnum_is_const(reg->var_off)) { 111 pr_info("variable ptr stack access\n"); 112 return -EINVAL; 113 } 114 115 if (meta->ptr.type == NOT_INIT) 116 return 0; 117 118 old_off = meta->ptr.off + meta->ptr.var_off.value; 119 new_off = reg->off + reg->var_off.value; 120 121 meta->ptr_not_const |= old_off != new_off; 122 123 if (!meta->ptr_not_const) 124 return 0; 125 126 if (old_off % 4 == new_off % 4) 127 return 0; 128 129 pr_info("stack access changed location was:%d is:%d\n", 130 old_off, new_off); 131 return -EINVAL; 132 } 133 134 static int 135 nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 136 struct bpf_verifier_env *env, u8 reg_no) 137 { 138 const struct bpf_reg_state *reg = cur_regs(env) + reg_no; 139 int err; 140 141 if (reg->type != PTR_TO_CTX && 142 reg->type != PTR_TO_STACK && 143 reg->type != PTR_TO_PACKET) { 144 pr_info("unsupported ptr type: %d\n", reg->type); 145 return -EINVAL; 146 } 147 148 if (reg->type == PTR_TO_STACK) { 149 err = nfp_bpf_check_stack_access(nfp_prog, meta, reg); 150 if (err) 151 return err; 152 } 153 154 if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) { 155 pr_info("ptr type changed for instruction %d -> %d\n", 156 meta->ptr.type, reg->type); 157 return -EINVAL; 158 } 159 160 meta->ptr = *reg; 161 162 return 0; 163 } 164 165 static int 166 nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) 167 { 168 struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; 169 struct nfp_insn_meta *meta = nfp_prog->verifier_meta; 170 171 meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len); 172 nfp_prog->verifier_meta = meta; 173 174 if (meta->insn.src_reg >= MAX_BPF_REG || 175 meta->insn.dst_reg >= MAX_BPF_REG) { 176 pr_err("program uses extended registers - jit hardening?\n"); 177 return -EINVAL; 178 } 179 180 if (meta->insn.code == (BPF_JMP | BPF_EXIT)) 181 return nfp_bpf_check_exit(nfp_prog, env); 182 183 if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM)) 184 return nfp_bpf_check_ptr(nfp_prog, meta, env, 185 meta->insn.src_reg); 186 if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM)) 187 return nfp_bpf_check_ptr(nfp_prog, meta, env, 188 meta->insn.dst_reg); 189 190 return 0; 191 } 192 193 const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = { 194 .insn_hook = nfp_verify_insn, 195 }; 196