1 /* 2 * Linux Socket Filter - Kernel level socket filtering 3 * 4 * Based on the design of the Berkeley Packet Filter. The new 5 * internal format has been designed by PLUMgrid: 6 * 7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 8 * 9 * Authors: 10 * 11 * Jay Schulist <jschlst@samba.org> 12 * Alexei Starovoitov <ast@plumgrid.com> 13 * Daniel Borkmann <dborkman@redhat.com> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * Andi Kleen - Fix a few bad bugs and races. 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 22 */ 23 24 #include <linux/module.h> 25 #include <linux/types.h> 26 #include <linux/mm.h> 27 #include <linux/fcntl.h> 28 #include <linux/socket.h> 29 #include <linux/in.h> 30 #include <linux/inet.h> 31 #include <linux/netdevice.h> 32 #include <linux/if_packet.h> 33 #include <linux/gfp.h> 34 #include <net/ip.h> 35 #include <net/protocol.h> 36 #include <net/netlink.h> 37 #include <linux/skbuff.h> 38 #include <net/sock.h> 39 #include <linux/errno.h> 40 #include <linux/timer.h> 41 #include <asm/uaccess.h> 42 #include <asm/unaligned.h> 43 #include <linux/filter.h> 44 #include <linux/ratelimit.h> 45 #include <linux/seccomp.h> 46 #include <linux/if_vlan.h> 47 #include <linux/bpf.h> 48 49 /** 50 * sk_filter - run a packet through a socket filter 51 * @sk: sock associated with &sk_buff 52 * @skb: buffer to filter 53 * 54 * Run the filter code and then cut skb->data to correct size returned by 55 * SK_RUN_FILTER. If pkt_len is 0 we toss packet. If skb->len is smaller 56 * than pkt_len we keep whole skb->data. This is the socket level 57 * wrapper to SK_RUN_FILTER. It returns 0 if the packet should 58 * be accepted or -EPERM if the packet should be tossed. 59 * 60 */ 61 int sk_filter(struct sock *sk, struct sk_buff *skb) 62 { 63 int err; 64 struct sk_filter *filter; 65 66 /* 67 * If the skb was allocated from pfmemalloc reserves, only 68 * allow SOCK_MEMALLOC sockets to use it as this socket is 69 * helping free memory 70 */ 71 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) 72 return -ENOMEM; 73 74 err = security_sock_rcv_skb(sk, skb); 75 if (err) 76 return err; 77 78 rcu_read_lock(); 79 filter = rcu_dereference(sk->sk_filter); 80 if (filter) { 81 unsigned int pkt_len = SK_RUN_FILTER(filter, skb); 82 83 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; 84 } 85 rcu_read_unlock(); 86 87 return err; 88 } 89 EXPORT_SYMBOL(sk_filter); 90 91 static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) 92 { 93 return skb_get_poff((struct sk_buff *)(unsigned long) ctx); 94 } 95 96 static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) 97 { 98 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx; 99 struct nlattr *nla; 100 101 if (skb_is_nonlinear(skb)) 102 return 0; 103 104 if (skb->len < sizeof(struct nlattr)) 105 return 0; 106 107 if (a > skb->len - sizeof(struct nlattr)) 108 return 0; 109 110 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); 111 if (nla) 112 return (void *) nla - (void *) skb->data; 113 114 return 0; 115 } 116 117 static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) 118 { 119 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx; 120 struct nlattr *nla; 121 122 if (skb_is_nonlinear(skb)) 123 return 0; 124 125 if (skb->len < sizeof(struct nlattr)) 126 return 0; 127 128 if (a > skb->len - sizeof(struct nlattr)) 129 return 0; 130 131 nla = (struct nlattr *) &skb->data[a]; 132 if (nla->nla_len > skb->len - a) 133 return 0; 134 135 nla = nla_find_nested(nla, x); 136 if (nla) 137 return (void *) nla - (void *) skb->data; 138 139 return 0; 140 } 141 142 static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) 143 { 144 return raw_smp_processor_id(); 145 } 146 147 /* note that this only generates 32-bit random numbers */ 148 static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) 149 { 150 return prandom_u32(); 151 } 152 153 static bool convert_bpf_extensions(struct sock_filter *fp, 154 struct bpf_insn **insnp) 155 { 156 struct bpf_insn *insn = *insnp; 157 158 switch (fp->k) { 159 case SKF_AD_OFF + SKF_AD_PROTOCOL: 160 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); 161 162 /* A = *(u16 *) (CTX + offsetof(protocol)) */ 163 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 164 offsetof(struct sk_buff, protocol)); 165 /* A = ntohs(A) [emitting a nop or swap16] */ 166 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); 167 break; 168 169 case SKF_AD_OFF + SKF_AD_PKTTYPE: 170 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX, 171 PKT_TYPE_OFFSET()); 172 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX); 173 #ifdef __BIG_ENDIAN_BITFIELD 174 insn++; 175 *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5); 176 #endif 177 break; 178 179 case SKF_AD_OFF + SKF_AD_IFINDEX: 180 case SKF_AD_OFF + SKF_AD_HATYPE: 181 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); 182 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); 183 BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0); 184 185 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)), 186 BPF_REG_TMP, BPF_REG_CTX, 187 offsetof(struct sk_buff, dev)); 188 /* if (tmp != 0) goto pc + 1 */ 189 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); 190 *insn++ = BPF_EXIT_INSN(); 191 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) 192 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, 193 offsetof(struct net_device, ifindex)); 194 else 195 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, 196 offsetof(struct net_device, type)); 197 break; 198 199 case SKF_AD_OFF + SKF_AD_MARK: 200 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 201 202 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, 203 offsetof(struct sk_buff, mark)); 204 break; 205 206 case SKF_AD_OFF + SKF_AD_RXHASH: 207 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 208 209 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, 210 offsetof(struct sk_buff, hash)); 211 break; 212 213 case SKF_AD_OFF + SKF_AD_QUEUE: 214 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); 215 216 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 217 offsetof(struct sk_buff, queue_mapping)); 218 break; 219 220 case SKF_AD_OFF + SKF_AD_VLAN_TAG: 221 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: 222 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 223 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); 224 225 /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */ 226 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 227 offsetof(struct sk_buff, vlan_tci)); 228 if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { 229 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 230 ~VLAN_TAG_PRESENT); 231 } else { 232 /* A >>= 12 */ 233 *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12); 234 /* A &= 1 */ 235 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1); 236 } 237 break; 238 239 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 240 case SKF_AD_OFF + SKF_AD_NLATTR: 241 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 242 case SKF_AD_OFF + SKF_AD_CPU: 243 case SKF_AD_OFF + SKF_AD_RANDOM: 244 /* arg1 = CTX */ 245 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); 246 /* arg2 = A */ 247 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); 248 /* arg3 = X */ 249 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); 250 /* Emit call(arg1=CTX, arg2=A, arg3=X) */ 251 switch (fp->k) { 252 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 253 *insn = BPF_EMIT_CALL(__skb_get_pay_offset); 254 break; 255 case SKF_AD_OFF + SKF_AD_NLATTR: 256 *insn = BPF_EMIT_CALL(__skb_get_nlattr); 257 break; 258 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 259 *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest); 260 break; 261 case SKF_AD_OFF + SKF_AD_CPU: 262 *insn = BPF_EMIT_CALL(__get_raw_cpu_id); 263 break; 264 case SKF_AD_OFF + SKF_AD_RANDOM: 265 *insn = BPF_EMIT_CALL(__get_random_u32); 266 break; 267 } 268 break; 269 270 case SKF_AD_OFF + SKF_AD_ALU_XOR_X: 271 /* A ^= X */ 272 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); 273 break; 274 275 default: 276 /* This is just a dummy call to avoid letting the compiler 277 * evict __bpf_call_base() as an optimization. Placed here 278 * where no-one bothers. 279 */ 280 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0); 281 return false; 282 } 283 284 *insnp = insn; 285 return true; 286 } 287 288 /** 289 * bpf_convert_filter - convert filter program 290 * @prog: the user passed filter program 291 * @len: the length of the user passed filter program 292 * @new_prog: buffer where converted program will be stored 293 * @new_len: pointer to store length of converted program 294 * 295 * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style. 296 * Conversion workflow: 297 * 298 * 1) First pass for calculating the new program length: 299 * bpf_convert_filter(old_prog, old_len, NULL, &new_len) 300 * 301 * 2) 2nd pass to remap in two passes: 1st pass finds new 302 * jump offsets, 2nd pass remapping: 303 * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len); 304 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len); 305 * 306 * User BPF's register A is mapped to our BPF register 6, user BPF 307 * register X is mapped to BPF register 7; frame pointer is always 308 * register 10; Context 'void *ctx' is stored in register 1, that is, 309 * for socket filters: ctx == 'struct sk_buff *', for seccomp: 310 * ctx == 'struct seccomp_data *'. 311 */ 312 int bpf_convert_filter(struct sock_filter *prog, int len, 313 struct bpf_insn *new_prog, int *new_len) 314 { 315 int new_flen = 0, pass = 0, target, i; 316 struct bpf_insn *new_insn; 317 struct sock_filter *fp; 318 int *addrs = NULL; 319 u8 bpf_src; 320 321 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); 322 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 323 324 if (len <= 0 || len > BPF_MAXINSNS) 325 return -EINVAL; 326 327 if (new_prog) { 328 addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL); 329 if (!addrs) 330 return -ENOMEM; 331 } 332 333 do_pass: 334 new_insn = new_prog; 335 fp = prog; 336 337 if (new_insn) 338 *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); 339 new_insn++; 340 341 for (i = 0; i < len; fp++, i++) { 342 struct bpf_insn tmp_insns[6] = { }; 343 struct bpf_insn *insn = tmp_insns; 344 345 if (addrs) 346 addrs[i] = new_insn - new_prog; 347 348 switch (fp->code) { 349 /* All arithmetic insns and skb loads map as-is. */ 350 case BPF_ALU | BPF_ADD | BPF_X: 351 case BPF_ALU | BPF_ADD | BPF_K: 352 case BPF_ALU | BPF_SUB | BPF_X: 353 case BPF_ALU | BPF_SUB | BPF_K: 354 case BPF_ALU | BPF_AND | BPF_X: 355 case BPF_ALU | BPF_AND | BPF_K: 356 case BPF_ALU | BPF_OR | BPF_X: 357 case BPF_ALU | BPF_OR | BPF_K: 358 case BPF_ALU | BPF_LSH | BPF_X: 359 case BPF_ALU | BPF_LSH | BPF_K: 360 case BPF_ALU | BPF_RSH | BPF_X: 361 case BPF_ALU | BPF_RSH | BPF_K: 362 case BPF_ALU | BPF_XOR | BPF_X: 363 case BPF_ALU | BPF_XOR | BPF_K: 364 case BPF_ALU | BPF_MUL | BPF_X: 365 case BPF_ALU | BPF_MUL | BPF_K: 366 case BPF_ALU | BPF_DIV | BPF_X: 367 case BPF_ALU | BPF_DIV | BPF_K: 368 case BPF_ALU | BPF_MOD | BPF_X: 369 case BPF_ALU | BPF_MOD | BPF_K: 370 case BPF_ALU | BPF_NEG: 371 case BPF_LD | BPF_ABS | BPF_W: 372 case BPF_LD | BPF_ABS | BPF_H: 373 case BPF_LD | BPF_ABS | BPF_B: 374 case BPF_LD | BPF_IND | BPF_W: 375 case BPF_LD | BPF_IND | BPF_H: 376 case BPF_LD | BPF_IND | BPF_B: 377 /* Check for overloaded BPF extension and 378 * directly convert it if found, otherwise 379 * just move on with mapping. 380 */ 381 if (BPF_CLASS(fp->code) == BPF_LD && 382 BPF_MODE(fp->code) == BPF_ABS && 383 convert_bpf_extensions(fp, &insn)) 384 break; 385 386 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); 387 break; 388 389 /* Jump transformation cannot use BPF block macros 390 * everywhere as offset calculation and target updates 391 * require a bit more work than the rest, i.e. jump 392 * opcodes map as-is, but offsets need adjustment. 393 */ 394 395 #define BPF_EMIT_JMP \ 396 do { \ 397 if (target >= len || target < 0) \ 398 goto err; \ 399 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ 400 /* Adjust pc relative offset for 2nd or 3rd insn. */ \ 401 insn->off -= insn - tmp_insns; \ 402 } while (0) 403 404 case BPF_JMP | BPF_JA: 405 target = i + fp->k + 1; 406 insn->code = fp->code; 407 BPF_EMIT_JMP; 408 break; 409 410 case BPF_JMP | BPF_JEQ | BPF_K: 411 case BPF_JMP | BPF_JEQ | BPF_X: 412 case BPF_JMP | BPF_JSET | BPF_K: 413 case BPF_JMP | BPF_JSET | BPF_X: 414 case BPF_JMP | BPF_JGT | BPF_K: 415 case BPF_JMP | BPF_JGT | BPF_X: 416 case BPF_JMP | BPF_JGE | BPF_K: 417 case BPF_JMP | BPF_JGE | BPF_X: 418 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) { 419 /* BPF immediates are signed, zero extend 420 * immediate into tmp register and use it 421 * in compare insn. 422 */ 423 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); 424 425 insn->dst_reg = BPF_REG_A; 426 insn->src_reg = BPF_REG_TMP; 427 bpf_src = BPF_X; 428 } else { 429 insn->dst_reg = BPF_REG_A; 430 insn->src_reg = BPF_REG_X; 431 insn->imm = fp->k; 432 bpf_src = BPF_SRC(fp->code); 433 } 434 435 /* Common case where 'jump_false' is next insn. */ 436 if (fp->jf == 0) { 437 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 438 target = i + fp->jt + 1; 439 BPF_EMIT_JMP; 440 break; 441 } 442 443 /* Convert JEQ into JNE when 'jump_true' is next insn. */ 444 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { 445 insn->code = BPF_JMP | BPF_JNE | bpf_src; 446 target = i + fp->jf + 1; 447 BPF_EMIT_JMP; 448 break; 449 } 450 451 /* Other jumps are mapped into two insns: Jxx and JA. */ 452 target = i + fp->jt + 1; 453 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 454 BPF_EMIT_JMP; 455 insn++; 456 457 insn->code = BPF_JMP | BPF_JA; 458 target = i + fp->jf + 1; 459 BPF_EMIT_JMP; 460 break; 461 462 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ 463 case BPF_LDX | BPF_MSH | BPF_B: 464 /* tmp = A */ 465 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A); 466 /* A = BPF_R0 = *(u8 *) (skb->data + K) */ 467 *insn++ = BPF_LD_ABS(BPF_B, fp->k); 468 /* A &= 0xf */ 469 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); 470 /* A <<= 2 */ 471 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); 472 /* X = A */ 473 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 474 /* A = tmp */ 475 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); 476 break; 477 478 /* RET_K, RET_A are remaped into 2 insns. */ 479 case BPF_RET | BPF_A: 480 case BPF_RET | BPF_K: 481 *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ? 482 BPF_K : BPF_X, BPF_REG_0, 483 BPF_REG_A, fp->k); 484 *insn = BPF_EXIT_INSN(); 485 break; 486 487 /* Store to stack. */ 488 case BPF_ST: 489 case BPF_STX: 490 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == 491 BPF_ST ? BPF_REG_A : BPF_REG_X, 492 -(BPF_MEMWORDS - fp->k) * 4); 493 break; 494 495 /* Load from stack. */ 496 case BPF_LD | BPF_MEM: 497 case BPF_LDX | BPF_MEM: 498 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? 499 BPF_REG_A : BPF_REG_X, BPF_REG_FP, 500 -(BPF_MEMWORDS - fp->k) * 4); 501 break; 502 503 /* A = K or X = K */ 504 case BPF_LD | BPF_IMM: 505 case BPF_LDX | BPF_IMM: 506 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? 507 BPF_REG_A : BPF_REG_X, fp->k); 508 break; 509 510 /* X = A */ 511 case BPF_MISC | BPF_TAX: 512 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 513 break; 514 515 /* A = X */ 516 case BPF_MISC | BPF_TXA: 517 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); 518 break; 519 520 /* A = skb->len or X = skb->len */ 521 case BPF_LD | BPF_W | BPF_LEN: 522 case BPF_LDX | BPF_W | BPF_LEN: 523 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? 524 BPF_REG_A : BPF_REG_X, BPF_REG_CTX, 525 offsetof(struct sk_buff, len)); 526 break; 527 528 /* Access seccomp_data fields. */ 529 case BPF_LDX | BPF_ABS | BPF_W: 530 /* A = *(u32 *) (ctx + K) */ 531 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); 532 break; 533 534 /* Unkown instruction. */ 535 default: 536 goto err; 537 } 538 539 insn++; 540 if (new_prog) 541 memcpy(new_insn, tmp_insns, 542 sizeof(*insn) * (insn - tmp_insns)); 543 new_insn += insn - tmp_insns; 544 } 545 546 if (!new_prog) { 547 /* Only calculating new length. */ 548 *new_len = new_insn - new_prog; 549 return 0; 550 } 551 552 pass++; 553 if (new_flen != new_insn - new_prog) { 554 new_flen = new_insn - new_prog; 555 if (pass > 2) 556 goto err; 557 goto do_pass; 558 } 559 560 kfree(addrs); 561 BUG_ON(*new_len != new_flen); 562 return 0; 563 err: 564 kfree(addrs); 565 return -EINVAL; 566 } 567 568 /* Security: 569 * 570 * As we dont want to clear mem[] array for each packet going through 571 * __bpf_prog_run(), we check that filter loaded by user never try to read 572 * a cell if not previously written, and we check all branches to be sure 573 * a malicious user doesn't try to abuse us. 574 */ 575 static int check_load_and_stores(const struct sock_filter *filter, int flen) 576 { 577 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ 578 int pc, ret = 0; 579 580 BUILD_BUG_ON(BPF_MEMWORDS > 16); 581 582 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL); 583 if (!masks) 584 return -ENOMEM; 585 586 memset(masks, 0xff, flen * sizeof(*masks)); 587 588 for (pc = 0; pc < flen; pc++) { 589 memvalid &= masks[pc]; 590 591 switch (filter[pc].code) { 592 case BPF_ST: 593 case BPF_STX: 594 memvalid |= (1 << filter[pc].k); 595 break; 596 case BPF_LD | BPF_MEM: 597 case BPF_LDX | BPF_MEM: 598 if (!(memvalid & (1 << filter[pc].k))) { 599 ret = -EINVAL; 600 goto error; 601 } 602 break; 603 case BPF_JMP | BPF_JA: 604 /* A jump must set masks on target */ 605 masks[pc + 1 + filter[pc].k] &= memvalid; 606 memvalid = ~0; 607 break; 608 case BPF_JMP | BPF_JEQ | BPF_K: 609 case BPF_JMP | BPF_JEQ | BPF_X: 610 case BPF_JMP | BPF_JGE | BPF_K: 611 case BPF_JMP | BPF_JGE | BPF_X: 612 case BPF_JMP | BPF_JGT | BPF_K: 613 case BPF_JMP | BPF_JGT | BPF_X: 614 case BPF_JMP | BPF_JSET | BPF_K: 615 case BPF_JMP | BPF_JSET | BPF_X: 616 /* A jump must set masks on targets */ 617 masks[pc + 1 + filter[pc].jt] &= memvalid; 618 masks[pc + 1 + filter[pc].jf] &= memvalid; 619 memvalid = ~0; 620 break; 621 } 622 } 623 error: 624 kfree(masks); 625 return ret; 626 } 627 628 static bool chk_code_allowed(u16 code_to_probe) 629 { 630 static const bool codes[] = { 631 /* 32 bit ALU operations */ 632 [BPF_ALU | BPF_ADD | BPF_K] = true, 633 [BPF_ALU | BPF_ADD | BPF_X] = true, 634 [BPF_ALU | BPF_SUB | BPF_K] = true, 635 [BPF_ALU | BPF_SUB | BPF_X] = true, 636 [BPF_ALU | BPF_MUL | BPF_K] = true, 637 [BPF_ALU | BPF_MUL | BPF_X] = true, 638 [BPF_ALU | BPF_DIV | BPF_K] = true, 639 [BPF_ALU | BPF_DIV | BPF_X] = true, 640 [BPF_ALU | BPF_MOD | BPF_K] = true, 641 [BPF_ALU | BPF_MOD | BPF_X] = true, 642 [BPF_ALU | BPF_AND | BPF_K] = true, 643 [BPF_ALU | BPF_AND | BPF_X] = true, 644 [BPF_ALU | BPF_OR | BPF_K] = true, 645 [BPF_ALU | BPF_OR | BPF_X] = true, 646 [BPF_ALU | BPF_XOR | BPF_K] = true, 647 [BPF_ALU | BPF_XOR | BPF_X] = true, 648 [BPF_ALU | BPF_LSH | BPF_K] = true, 649 [BPF_ALU | BPF_LSH | BPF_X] = true, 650 [BPF_ALU | BPF_RSH | BPF_K] = true, 651 [BPF_ALU | BPF_RSH | BPF_X] = true, 652 [BPF_ALU | BPF_NEG] = true, 653 /* Load instructions */ 654 [BPF_LD | BPF_W | BPF_ABS] = true, 655 [BPF_LD | BPF_H | BPF_ABS] = true, 656 [BPF_LD | BPF_B | BPF_ABS] = true, 657 [BPF_LD | BPF_W | BPF_LEN] = true, 658 [BPF_LD | BPF_W | BPF_IND] = true, 659 [BPF_LD | BPF_H | BPF_IND] = true, 660 [BPF_LD | BPF_B | BPF_IND] = true, 661 [BPF_LD | BPF_IMM] = true, 662 [BPF_LD | BPF_MEM] = true, 663 [BPF_LDX | BPF_W | BPF_LEN] = true, 664 [BPF_LDX | BPF_B | BPF_MSH] = true, 665 [BPF_LDX | BPF_IMM] = true, 666 [BPF_LDX | BPF_MEM] = true, 667 /* Store instructions */ 668 [BPF_ST] = true, 669 [BPF_STX] = true, 670 /* Misc instructions */ 671 [BPF_MISC | BPF_TAX] = true, 672 [BPF_MISC | BPF_TXA] = true, 673 /* Return instructions */ 674 [BPF_RET | BPF_K] = true, 675 [BPF_RET | BPF_A] = true, 676 /* Jump instructions */ 677 [BPF_JMP | BPF_JA] = true, 678 [BPF_JMP | BPF_JEQ | BPF_K] = true, 679 [BPF_JMP | BPF_JEQ | BPF_X] = true, 680 [BPF_JMP | BPF_JGE | BPF_K] = true, 681 [BPF_JMP | BPF_JGE | BPF_X] = true, 682 [BPF_JMP | BPF_JGT | BPF_K] = true, 683 [BPF_JMP | BPF_JGT | BPF_X] = true, 684 [BPF_JMP | BPF_JSET | BPF_K] = true, 685 [BPF_JMP | BPF_JSET | BPF_X] = true, 686 }; 687 688 if (code_to_probe >= ARRAY_SIZE(codes)) 689 return false; 690 691 return codes[code_to_probe]; 692 } 693 694 /** 695 * bpf_check_classic - verify socket filter code 696 * @filter: filter to verify 697 * @flen: length of filter 698 * 699 * Check the user's filter code. If we let some ugly 700 * filter code slip through kaboom! The filter must contain 701 * no references or jumps that are out of range, no illegal 702 * instructions, and must end with a RET instruction. 703 * 704 * All jumps are forward as they are not signed. 705 * 706 * Returns 0 if the rule set is legal or -EINVAL if not. 707 */ 708 int bpf_check_classic(const struct sock_filter *filter, unsigned int flen) 709 { 710 bool anc_found; 711 int pc; 712 713 if (flen == 0 || flen > BPF_MAXINSNS) 714 return -EINVAL; 715 716 /* Check the filter code now */ 717 for (pc = 0; pc < flen; pc++) { 718 const struct sock_filter *ftest = &filter[pc]; 719 720 /* May we actually operate on this code? */ 721 if (!chk_code_allowed(ftest->code)) 722 return -EINVAL; 723 724 /* Some instructions need special checks */ 725 switch (ftest->code) { 726 case BPF_ALU | BPF_DIV | BPF_K: 727 case BPF_ALU | BPF_MOD | BPF_K: 728 /* Check for division by zero */ 729 if (ftest->k == 0) 730 return -EINVAL; 731 break; 732 case BPF_LD | BPF_MEM: 733 case BPF_LDX | BPF_MEM: 734 case BPF_ST: 735 case BPF_STX: 736 /* Check for invalid memory addresses */ 737 if (ftest->k >= BPF_MEMWORDS) 738 return -EINVAL; 739 break; 740 case BPF_JMP | BPF_JA: 741 /* Note, the large ftest->k might cause loops. 742 * Compare this with conditional jumps below, 743 * where offsets are limited. --ANK (981016) 744 */ 745 if (ftest->k >= (unsigned int)(flen - pc - 1)) 746 return -EINVAL; 747 break; 748 case BPF_JMP | BPF_JEQ | BPF_K: 749 case BPF_JMP | BPF_JEQ | BPF_X: 750 case BPF_JMP | BPF_JGE | BPF_K: 751 case BPF_JMP | BPF_JGE | BPF_X: 752 case BPF_JMP | BPF_JGT | BPF_K: 753 case BPF_JMP | BPF_JGT | BPF_X: 754 case BPF_JMP | BPF_JSET | BPF_K: 755 case BPF_JMP | BPF_JSET | BPF_X: 756 /* Both conditionals must be safe */ 757 if (pc + ftest->jt + 1 >= flen || 758 pc + ftest->jf + 1 >= flen) 759 return -EINVAL; 760 break; 761 case BPF_LD | BPF_W | BPF_ABS: 762 case BPF_LD | BPF_H | BPF_ABS: 763 case BPF_LD | BPF_B | BPF_ABS: 764 anc_found = false; 765 if (bpf_anc_helper(ftest) & BPF_ANC) 766 anc_found = true; 767 /* Ancillary operation unknown or unsupported */ 768 if (anc_found == false && ftest->k >= SKF_AD_OFF) 769 return -EINVAL; 770 } 771 } 772 773 /* Last instruction must be a RET code */ 774 switch (filter[flen - 1].code) { 775 case BPF_RET | BPF_K: 776 case BPF_RET | BPF_A: 777 return check_load_and_stores(filter, flen); 778 } 779 780 return -EINVAL; 781 } 782 EXPORT_SYMBOL(bpf_check_classic); 783 784 static int bpf_prog_store_orig_filter(struct bpf_prog *fp, 785 const struct sock_fprog *fprog) 786 { 787 unsigned int fsize = bpf_classic_proglen(fprog); 788 struct sock_fprog_kern *fkprog; 789 790 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); 791 if (!fp->orig_prog) 792 return -ENOMEM; 793 794 fkprog = fp->orig_prog; 795 fkprog->len = fprog->len; 796 fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL); 797 if (!fkprog->filter) { 798 kfree(fp->orig_prog); 799 return -ENOMEM; 800 } 801 802 return 0; 803 } 804 805 static void bpf_release_orig_filter(struct bpf_prog *fp) 806 { 807 struct sock_fprog_kern *fprog = fp->orig_prog; 808 809 if (fprog) { 810 kfree(fprog->filter); 811 kfree(fprog); 812 } 813 } 814 815 static void __bpf_prog_release(struct bpf_prog *prog) 816 { 817 if (prog->aux->prog_type == BPF_PROG_TYPE_SOCKET_FILTER) { 818 bpf_prog_put(prog); 819 } else { 820 bpf_release_orig_filter(prog); 821 bpf_prog_free(prog); 822 } 823 } 824 825 static void __sk_filter_release(struct sk_filter *fp) 826 { 827 __bpf_prog_release(fp->prog); 828 kfree(fp); 829 } 830 831 /** 832 * sk_filter_release_rcu - Release a socket filter by rcu_head 833 * @rcu: rcu_head that contains the sk_filter to free 834 */ 835 static void sk_filter_release_rcu(struct rcu_head *rcu) 836 { 837 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 838 839 __sk_filter_release(fp); 840 } 841 842 /** 843 * sk_filter_release - release a socket filter 844 * @fp: filter to remove 845 * 846 * Remove a filter from a socket and release its resources. 847 */ 848 static void sk_filter_release(struct sk_filter *fp) 849 { 850 if (atomic_dec_and_test(&fp->refcnt)) 851 call_rcu(&fp->rcu, sk_filter_release_rcu); 852 } 853 854 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 855 { 856 u32 filter_size = bpf_prog_size(fp->prog->len); 857 858 atomic_sub(filter_size, &sk->sk_omem_alloc); 859 sk_filter_release(fp); 860 } 861 862 /* try to charge the socket memory if there is space available 863 * return true on success 864 */ 865 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) 866 { 867 u32 filter_size = bpf_prog_size(fp->prog->len); 868 869 /* same check as in sock_kmalloc() */ 870 if (filter_size <= sysctl_optmem_max && 871 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { 872 atomic_inc(&fp->refcnt); 873 atomic_add(filter_size, &sk->sk_omem_alloc); 874 return true; 875 } 876 return false; 877 } 878 879 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) 880 { 881 struct sock_filter *old_prog; 882 struct bpf_prog *old_fp; 883 int err, new_len, old_len = fp->len; 884 885 /* We are free to overwrite insns et al right here as it 886 * won't be used at this point in time anymore internally 887 * after the migration to the internal BPF instruction 888 * representation. 889 */ 890 BUILD_BUG_ON(sizeof(struct sock_filter) != 891 sizeof(struct bpf_insn)); 892 893 /* Conversion cannot happen on overlapping memory areas, 894 * so we need to keep the user BPF around until the 2nd 895 * pass. At this time, the user BPF is stored in fp->insns. 896 */ 897 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter), 898 GFP_KERNEL); 899 if (!old_prog) { 900 err = -ENOMEM; 901 goto out_err; 902 } 903 904 /* 1st pass: calculate the new program length. */ 905 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len); 906 if (err) 907 goto out_err_free; 908 909 /* Expand fp for appending the new filter representation. */ 910 old_fp = fp; 911 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); 912 if (!fp) { 913 /* The old_fp is still around in case we couldn't 914 * allocate new memory, so uncharge on that one. 915 */ 916 fp = old_fp; 917 err = -ENOMEM; 918 goto out_err_free; 919 } 920 921 fp->len = new_len; 922 923 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ 924 err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len); 925 if (err) 926 /* 2nd bpf_convert_filter() can fail only if it fails 927 * to allocate memory, remapping must succeed. Note, 928 * that at this time old_fp has already been released 929 * by krealloc(). 930 */ 931 goto out_err_free; 932 933 bpf_prog_select_runtime(fp); 934 935 kfree(old_prog); 936 return fp; 937 938 out_err_free: 939 kfree(old_prog); 940 out_err: 941 __bpf_prog_release(fp); 942 return ERR_PTR(err); 943 } 944 945 static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp) 946 { 947 int err; 948 949 fp->bpf_func = NULL; 950 fp->jited = false; 951 952 err = bpf_check_classic(fp->insns, fp->len); 953 if (err) { 954 __bpf_prog_release(fp); 955 return ERR_PTR(err); 956 } 957 958 /* Probe if we can JIT compile the filter and if so, do 959 * the compilation of the filter. 960 */ 961 bpf_jit_compile(fp); 962 963 /* JIT compiler couldn't process this filter, so do the 964 * internal BPF translation for the optimized interpreter. 965 */ 966 if (!fp->jited) 967 fp = bpf_migrate_filter(fp); 968 969 return fp; 970 } 971 972 /** 973 * bpf_prog_create - create an unattached filter 974 * @pfp: the unattached filter that is created 975 * @fprog: the filter program 976 * 977 * Create a filter independent of any socket. We first run some 978 * sanity checks on it to make sure it does not explode on us later. 979 * If an error occurs or there is insufficient memory for the filter 980 * a negative errno code is returned. On success the return is zero. 981 */ 982 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) 983 { 984 unsigned int fsize = bpf_classic_proglen(fprog); 985 struct bpf_prog *fp; 986 987 /* Make sure new filter is there and in the right amounts. */ 988 if (fprog->filter == NULL) 989 return -EINVAL; 990 991 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 992 if (!fp) 993 return -ENOMEM; 994 995 memcpy(fp->insns, fprog->filter, fsize); 996 997 fp->len = fprog->len; 998 /* Since unattached filters are not copied back to user 999 * space through sk_get_filter(), we do not need to hold 1000 * a copy here, and can spare us the work. 1001 */ 1002 fp->orig_prog = NULL; 1003 1004 /* bpf_prepare_filter() already takes care of freeing 1005 * memory in case something goes wrong. 1006 */ 1007 fp = bpf_prepare_filter(fp); 1008 if (IS_ERR(fp)) 1009 return PTR_ERR(fp); 1010 1011 *pfp = fp; 1012 return 0; 1013 } 1014 EXPORT_SYMBOL_GPL(bpf_prog_create); 1015 1016 void bpf_prog_destroy(struct bpf_prog *fp) 1017 { 1018 __bpf_prog_release(fp); 1019 } 1020 EXPORT_SYMBOL_GPL(bpf_prog_destroy); 1021 1022 /** 1023 * sk_attach_filter - attach a socket filter 1024 * @fprog: the filter program 1025 * @sk: the socket to use 1026 * 1027 * Attach the user's filter code. We first run some sanity checks on 1028 * it to make sure it does not explode on us later. If an error 1029 * occurs or there is insufficient memory for the filter a negative 1030 * errno code is returned. On success the return is zero. 1031 */ 1032 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) 1033 { 1034 struct sk_filter *fp, *old_fp; 1035 unsigned int fsize = bpf_classic_proglen(fprog); 1036 unsigned int bpf_fsize = bpf_prog_size(fprog->len); 1037 struct bpf_prog *prog; 1038 int err; 1039 1040 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1041 return -EPERM; 1042 1043 /* Make sure new filter is there and in the right amounts. */ 1044 if (fprog->filter == NULL) 1045 return -EINVAL; 1046 1047 prog = bpf_prog_alloc(bpf_fsize, 0); 1048 if (!prog) 1049 return -ENOMEM; 1050 1051 if (copy_from_user(prog->insns, fprog->filter, fsize)) { 1052 __bpf_prog_free(prog); 1053 return -EFAULT; 1054 } 1055 1056 prog->len = fprog->len; 1057 1058 err = bpf_prog_store_orig_filter(prog, fprog); 1059 if (err) { 1060 __bpf_prog_free(prog); 1061 return -ENOMEM; 1062 } 1063 1064 /* bpf_prepare_filter() already takes care of freeing 1065 * memory in case something goes wrong. 1066 */ 1067 prog = bpf_prepare_filter(prog); 1068 if (IS_ERR(prog)) 1069 return PTR_ERR(prog); 1070 1071 fp = kmalloc(sizeof(*fp), GFP_KERNEL); 1072 if (!fp) { 1073 __bpf_prog_release(prog); 1074 return -ENOMEM; 1075 } 1076 fp->prog = prog; 1077 1078 atomic_set(&fp->refcnt, 0); 1079 1080 if (!sk_filter_charge(sk, fp)) { 1081 __sk_filter_release(fp); 1082 return -ENOMEM; 1083 } 1084 1085 old_fp = rcu_dereference_protected(sk->sk_filter, 1086 sock_owned_by_user(sk)); 1087 rcu_assign_pointer(sk->sk_filter, fp); 1088 1089 if (old_fp) 1090 sk_filter_uncharge(sk, old_fp); 1091 1092 return 0; 1093 } 1094 EXPORT_SYMBOL_GPL(sk_attach_filter); 1095 1096 #ifdef CONFIG_BPF_SYSCALL 1097 int sk_attach_bpf(u32 ufd, struct sock *sk) 1098 { 1099 struct sk_filter *fp, *old_fp; 1100 struct bpf_prog *prog; 1101 1102 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1103 return -EPERM; 1104 1105 prog = bpf_prog_get(ufd); 1106 if (IS_ERR(prog)) 1107 return PTR_ERR(prog); 1108 1109 if (prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) { 1110 /* valid fd, but invalid program type */ 1111 bpf_prog_put(prog); 1112 return -EINVAL; 1113 } 1114 1115 fp = kmalloc(sizeof(*fp), GFP_KERNEL); 1116 if (!fp) { 1117 bpf_prog_put(prog); 1118 return -ENOMEM; 1119 } 1120 fp->prog = prog; 1121 1122 atomic_set(&fp->refcnt, 0); 1123 1124 if (!sk_filter_charge(sk, fp)) { 1125 __sk_filter_release(fp); 1126 return -ENOMEM; 1127 } 1128 1129 old_fp = rcu_dereference_protected(sk->sk_filter, 1130 sock_owned_by_user(sk)); 1131 rcu_assign_pointer(sk->sk_filter, fp); 1132 1133 if (old_fp) 1134 sk_filter_uncharge(sk, old_fp); 1135 1136 return 0; 1137 } 1138 1139 /* allow socket filters to call 1140 * bpf_map_lookup_elem(), bpf_map_update_elem(), bpf_map_delete_elem() 1141 */ 1142 static const struct bpf_func_proto *sock_filter_func_proto(enum bpf_func_id func_id) 1143 { 1144 switch (func_id) { 1145 case BPF_FUNC_map_lookup_elem: 1146 return &bpf_map_lookup_elem_proto; 1147 case BPF_FUNC_map_update_elem: 1148 return &bpf_map_update_elem_proto; 1149 case BPF_FUNC_map_delete_elem: 1150 return &bpf_map_delete_elem_proto; 1151 default: 1152 return NULL; 1153 } 1154 } 1155 1156 static bool sock_filter_is_valid_access(int off, int size, enum bpf_access_type type) 1157 { 1158 /* skb fields cannot be accessed yet */ 1159 return false; 1160 } 1161 1162 static struct bpf_verifier_ops sock_filter_ops = { 1163 .get_func_proto = sock_filter_func_proto, 1164 .is_valid_access = sock_filter_is_valid_access, 1165 }; 1166 1167 static struct bpf_prog_type_list tl = { 1168 .ops = &sock_filter_ops, 1169 .type = BPF_PROG_TYPE_SOCKET_FILTER, 1170 }; 1171 1172 static int __init register_sock_filter_ops(void) 1173 { 1174 bpf_register_prog_type(&tl); 1175 return 0; 1176 } 1177 late_initcall(register_sock_filter_ops); 1178 #else 1179 int sk_attach_bpf(u32 ufd, struct sock *sk) 1180 { 1181 return -EOPNOTSUPP; 1182 } 1183 #endif 1184 int sk_detach_filter(struct sock *sk) 1185 { 1186 int ret = -ENOENT; 1187 struct sk_filter *filter; 1188 1189 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1190 return -EPERM; 1191 1192 filter = rcu_dereference_protected(sk->sk_filter, 1193 sock_owned_by_user(sk)); 1194 if (filter) { 1195 RCU_INIT_POINTER(sk->sk_filter, NULL); 1196 sk_filter_uncharge(sk, filter); 1197 ret = 0; 1198 } 1199 1200 return ret; 1201 } 1202 EXPORT_SYMBOL_GPL(sk_detach_filter); 1203 1204 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, 1205 unsigned int len) 1206 { 1207 struct sock_fprog_kern *fprog; 1208 struct sk_filter *filter; 1209 int ret = 0; 1210 1211 lock_sock(sk); 1212 filter = rcu_dereference_protected(sk->sk_filter, 1213 sock_owned_by_user(sk)); 1214 if (!filter) 1215 goto out; 1216 1217 /* We're copying the filter that has been originally attached, 1218 * so no conversion/decode needed anymore. 1219 */ 1220 fprog = filter->prog->orig_prog; 1221 1222 ret = fprog->len; 1223 if (!len) 1224 /* User space only enquires number of filter blocks. */ 1225 goto out; 1226 1227 ret = -EINVAL; 1228 if (len < fprog->len) 1229 goto out; 1230 1231 ret = -EFAULT; 1232 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog))) 1233 goto out; 1234 1235 /* Instead of bytes, the API requests to return the number 1236 * of filter blocks. 1237 */ 1238 ret = fprog->len; 1239 out: 1240 release_sock(sk); 1241 return ret; 1242 } 1243