1 /* 2 * Berkeley Packet Filter based traffic classifier 3 * 4 * Might be used to classify traffic through flexible, user-defined and 5 * possibly JIT-ed BPF filters for traffic control as an alternative to 6 * ematches. 7 * 8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/skbuff.h> 18 #include <linux/filter.h> 19 #include <linux/bpf.h> 20 21 #include <net/rtnetlink.h> 22 #include <net/pkt_cls.h> 23 #include <net/sock.h> 24 25 MODULE_LICENSE("GPL"); 26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>"); 27 MODULE_DESCRIPTION("TC BPF based classifier"); 28 29 #define CLS_BPF_NAME_LEN 256 30 31 struct cls_bpf_head { 32 struct list_head plist; 33 u32 hgen; 34 struct rcu_head rcu; 35 }; 36 37 struct cls_bpf_prog { 38 struct bpf_prog *filter; 39 struct list_head link; 40 struct tcf_result res; 41 bool exts_integrated; 42 struct tcf_exts exts; 43 u32 handle; 44 union { 45 u32 bpf_fd; 46 u16 bpf_num_ops; 47 }; 48 struct sock_filter *bpf_ops; 49 const char *bpf_name; 50 struct tcf_proto *tp; 51 struct rcu_head rcu; 52 }; 53 54 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { 55 [TCA_BPF_CLASSID] = { .type = NLA_U32 }, 56 [TCA_BPF_FLAGS] = { .type = NLA_U32 }, 57 [TCA_BPF_FD] = { .type = NLA_U32 }, 58 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN }, 59 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 }, 60 [TCA_BPF_OPS] = { .type = NLA_BINARY, 61 .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, 62 }; 63 64 static int cls_bpf_exec_opcode(int code) 65 { 66 switch (code) { 67 case TC_ACT_OK: 68 case TC_ACT_SHOT: 69 case TC_ACT_STOLEN: 70 case TC_ACT_REDIRECT: 71 case TC_ACT_UNSPEC: 72 return code; 73 default: 74 return TC_ACT_UNSPEC; 75 } 76 } 77 78 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 79 struct tcf_result *res) 80 { 81 struct cls_bpf_head *head = rcu_dereference_bh(tp->root); 82 bool at_ingress = skb_at_tc_ingress(skb); 83 struct cls_bpf_prog *prog; 84 int ret = -1; 85 86 if (unlikely(!skb_mac_header_was_set(skb))) 87 return -1; 88 89 /* Needed here for accessing maps. */ 90 rcu_read_lock(); 91 list_for_each_entry_rcu(prog, &head->plist, link) { 92 int filter_res; 93 94 qdisc_skb_cb(skb)->tc_classid = prog->res.classid; 95 96 if (at_ingress) { 97 /* It is safe to push/pull even if skb_shared() */ 98 __skb_push(skb, skb->mac_len); 99 bpf_compute_data_end(skb); 100 filter_res = BPF_PROG_RUN(prog->filter, skb); 101 __skb_pull(skb, skb->mac_len); 102 } else { 103 bpf_compute_data_end(skb); 104 filter_res = BPF_PROG_RUN(prog->filter, skb); 105 } 106 107 if (prog->exts_integrated) { 108 res->class = 0; 109 res->classid = TC_H_MAJ(prog->res.classid) | 110 qdisc_skb_cb(skb)->tc_classid; 111 112 ret = cls_bpf_exec_opcode(filter_res); 113 if (ret == TC_ACT_UNSPEC) 114 continue; 115 break; 116 } 117 118 if (filter_res == 0) 119 continue; 120 if (filter_res != -1) { 121 res->class = 0; 122 res->classid = filter_res; 123 } else { 124 *res = prog->res; 125 } 126 127 ret = tcf_exts_exec(skb, &prog->exts, res); 128 if (ret < 0) 129 continue; 130 131 break; 132 } 133 rcu_read_unlock(); 134 135 return ret; 136 } 137 138 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog) 139 { 140 return !prog->bpf_ops; 141 } 142 143 static int cls_bpf_init(struct tcf_proto *tp) 144 { 145 struct cls_bpf_head *head; 146 147 head = kzalloc(sizeof(*head), GFP_KERNEL); 148 if (head == NULL) 149 return -ENOBUFS; 150 151 INIT_LIST_HEAD_RCU(&head->plist); 152 rcu_assign_pointer(tp->root, head); 153 154 return 0; 155 } 156 157 static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog) 158 { 159 tcf_exts_destroy(&prog->exts); 160 161 if (cls_bpf_is_ebpf(prog)) 162 bpf_prog_put(prog->filter); 163 else 164 bpf_prog_destroy(prog->filter); 165 166 kfree(prog->bpf_name); 167 kfree(prog->bpf_ops); 168 kfree(prog); 169 } 170 171 static void __cls_bpf_delete_prog(struct rcu_head *rcu) 172 { 173 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu); 174 175 cls_bpf_delete_prog(prog->tp, prog); 176 } 177 178 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) 179 { 180 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg; 181 182 list_del_rcu(&prog->link); 183 tcf_unbind_filter(tp, &prog->res); 184 call_rcu(&prog->rcu, __cls_bpf_delete_prog); 185 186 return 0; 187 } 188 189 static bool cls_bpf_destroy(struct tcf_proto *tp, bool force) 190 { 191 struct cls_bpf_head *head = rtnl_dereference(tp->root); 192 struct cls_bpf_prog *prog, *tmp; 193 194 if (!force && !list_empty(&head->plist)) 195 return false; 196 197 list_for_each_entry_safe(prog, tmp, &head->plist, link) { 198 list_del_rcu(&prog->link); 199 tcf_unbind_filter(tp, &prog->res); 200 call_rcu(&prog->rcu, __cls_bpf_delete_prog); 201 } 202 203 RCU_INIT_POINTER(tp->root, NULL); 204 kfree_rcu(head, rcu); 205 return true; 206 } 207 208 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) 209 { 210 struct cls_bpf_head *head = rtnl_dereference(tp->root); 211 struct cls_bpf_prog *prog; 212 unsigned long ret = 0UL; 213 214 if (head == NULL) 215 return 0UL; 216 217 list_for_each_entry(prog, &head->plist, link) { 218 if (prog->handle == handle) { 219 ret = (unsigned long) prog; 220 break; 221 } 222 } 223 224 return ret; 225 } 226 227 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog) 228 { 229 struct sock_filter *bpf_ops; 230 struct sock_fprog_kern fprog_tmp; 231 struct bpf_prog *fp; 232 u16 bpf_size, bpf_num_ops; 233 int ret; 234 235 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]); 236 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) 237 return -EINVAL; 238 239 bpf_size = bpf_num_ops * sizeof(*bpf_ops); 240 if (bpf_size != nla_len(tb[TCA_BPF_OPS])) 241 return -EINVAL; 242 243 bpf_ops = kzalloc(bpf_size, GFP_KERNEL); 244 if (bpf_ops == NULL) 245 return -ENOMEM; 246 247 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); 248 249 fprog_tmp.len = bpf_num_ops; 250 fprog_tmp.filter = bpf_ops; 251 252 ret = bpf_prog_create(&fp, &fprog_tmp); 253 if (ret < 0) { 254 kfree(bpf_ops); 255 return ret; 256 } 257 258 prog->bpf_ops = bpf_ops; 259 prog->bpf_num_ops = bpf_num_ops; 260 prog->bpf_name = NULL; 261 prog->filter = fp; 262 263 return 0; 264 } 265 266 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog, 267 const struct tcf_proto *tp) 268 { 269 struct bpf_prog *fp; 270 char *name = NULL; 271 u32 bpf_fd; 272 273 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); 274 275 fp = bpf_prog_get(bpf_fd); 276 if (IS_ERR(fp)) 277 return PTR_ERR(fp); 278 279 if (fp->type != BPF_PROG_TYPE_SCHED_CLS) { 280 bpf_prog_put(fp); 281 return -EINVAL; 282 } 283 284 if (tb[TCA_BPF_NAME]) { 285 name = kmemdup(nla_data(tb[TCA_BPF_NAME]), 286 nla_len(tb[TCA_BPF_NAME]), 287 GFP_KERNEL); 288 if (!name) { 289 bpf_prog_put(fp); 290 return -ENOMEM; 291 } 292 } 293 294 prog->bpf_ops = NULL; 295 prog->bpf_fd = bpf_fd; 296 prog->bpf_name = name; 297 prog->filter = fp; 298 299 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS)) 300 netif_keep_dst(qdisc_dev(tp->q)); 301 302 return 0; 303 } 304 305 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, 306 struct cls_bpf_prog *prog, 307 unsigned long base, struct nlattr **tb, 308 struct nlattr *est, bool ovr) 309 { 310 bool is_bpf, is_ebpf, have_exts = false; 311 struct tcf_exts exts; 312 int ret; 313 314 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS]; 315 is_ebpf = tb[TCA_BPF_FD]; 316 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) 317 return -EINVAL; 318 319 tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE); 320 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr); 321 if (ret < 0) 322 return ret; 323 324 if (tb[TCA_BPF_FLAGS]) { 325 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]); 326 327 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) { 328 tcf_exts_destroy(&exts); 329 return -EINVAL; 330 } 331 332 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT; 333 } 334 335 prog->exts_integrated = have_exts; 336 337 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) : 338 cls_bpf_prog_from_efd(tb, prog, tp); 339 if (ret < 0) { 340 tcf_exts_destroy(&exts); 341 return ret; 342 } 343 344 if (tb[TCA_BPF_CLASSID]) { 345 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]); 346 tcf_bind_filter(tp, &prog->res, base); 347 } 348 349 tcf_exts_change(tp, &prog->exts, &exts); 350 return 0; 351 } 352 353 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp, 354 struct cls_bpf_head *head) 355 { 356 unsigned int i = 0x80000000; 357 u32 handle; 358 359 do { 360 if (++head->hgen == 0x7FFFFFFF) 361 head->hgen = 1; 362 } while (--i > 0 && cls_bpf_get(tp, head->hgen)); 363 364 if (unlikely(i == 0)) { 365 pr_err("Insufficient number of handles\n"); 366 handle = 0; 367 } else { 368 handle = head->hgen; 369 } 370 371 return handle; 372 } 373 374 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, 375 struct tcf_proto *tp, unsigned long base, 376 u32 handle, struct nlattr **tca, 377 unsigned long *arg, bool ovr) 378 { 379 struct cls_bpf_head *head = rtnl_dereference(tp->root); 380 struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg; 381 struct nlattr *tb[TCA_BPF_MAX + 1]; 382 struct cls_bpf_prog *prog; 383 int ret; 384 385 if (tca[TCA_OPTIONS] == NULL) 386 return -EINVAL; 387 388 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy); 389 if (ret < 0) 390 return ret; 391 392 prog = kzalloc(sizeof(*prog), GFP_KERNEL); 393 if (!prog) 394 return -ENOBUFS; 395 396 tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE); 397 398 if (oldprog) { 399 if (handle && oldprog->handle != handle) { 400 ret = -EINVAL; 401 goto errout; 402 } 403 } 404 405 if (handle == 0) 406 prog->handle = cls_bpf_grab_new_handle(tp, head); 407 else 408 prog->handle = handle; 409 if (prog->handle == 0) { 410 ret = -EINVAL; 411 goto errout; 412 } 413 414 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr); 415 if (ret < 0) 416 goto errout; 417 418 if (oldprog) { 419 list_replace_rcu(&oldprog->link, &prog->link); 420 tcf_unbind_filter(tp, &oldprog->res); 421 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); 422 } else { 423 list_add_rcu(&prog->link, &head->plist); 424 } 425 426 *arg = (unsigned long) prog; 427 return 0; 428 errout: 429 kfree(prog); 430 431 return ret; 432 } 433 434 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog, 435 struct sk_buff *skb) 436 { 437 struct nlattr *nla; 438 439 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops)) 440 return -EMSGSIZE; 441 442 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops * 443 sizeof(struct sock_filter)); 444 if (nla == NULL) 445 return -EMSGSIZE; 446 447 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla)); 448 449 return 0; 450 } 451 452 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog, 453 struct sk_buff *skb) 454 { 455 if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd)) 456 return -EMSGSIZE; 457 458 if (prog->bpf_name && 459 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name)) 460 return -EMSGSIZE; 461 462 return 0; 463 } 464 465 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, 466 struct sk_buff *skb, struct tcmsg *tm) 467 { 468 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh; 469 struct nlattr *nest; 470 u32 bpf_flags = 0; 471 int ret; 472 473 if (prog == NULL) 474 return skb->len; 475 476 tm->tcm_handle = prog->handle; 477 478 nest = nla_nest_start(skb, TCA_OPTIONS); 479 if (nest == NULL) 480 goto nla_put_failure; 481 482 if (prog->res.classid && 483 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid)) 484 goto nla_put_failure; 485 486 if (cls_bpf_is_ebpf(prog)) 487 ret = cls_bpf_dump_ebpf_info(prog, skb); 488 else 489 ret = cls_bpf_dump_bpf_info(prog, skb); 490 if (ret) 491 goto nla_put_failure; 492 493 if (tcf_exts_dump(skb, &prog->exts) < 0) 494 goto nla_put_failure; 495 496 if (prog->exts_integrated) 497 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT; 498 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags)) 499 goto nla_put_failure; 500 501 nla_nest_end(skb, nest); 502 503 if (tcf_exts_dump_stats(skb, &prog->exts) < 0) 504 goto nla_put_failure; 505 506 return skb->len; 507 508 nla_put_failure: 509 nla_nest_cancel(skb, nest); 510 return -1; 511 } 512 513 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg) 514 { 515 struct cls_bpf_head *head = rtnl_dereference(tp->root); 516 struct cls_bpf_prog *prog; 517 518 list_for_each_entry(prog, &head->plist, link) { 519 if (arg->count < arg->skip) 520 goto skip; 521 if (arg->fn(tp, (unsigned long) prog, arg) < 0) { 522 arg->stop = 1; 523 break; 524 } 525 skip: 526 arg->count++; 527 } 528 } 529 530 static struct tcf_proto_ops cls_bpf_ops __read_mostly = { 531 .kind = "bpf", 532 .owner = THIS_MODULE, 533 .classify = cls_bpf_classify, 534 .init = cls_bpf_init, 535 .destroy = cls_bpf_destroy, 536 .get = cls_bpf_get, 537 .change = cls_bpf_change, 538 .delete = cls_bpf_delete, 539 .walk = cls_bpf_walk, 540 .dump = cls_bpf_dump, 541 }; 542 543 static int __init cls_bpf_init_mod(void) 544 { 545 return register_tcf_proto_ops(&cls_bpf_ops); 546 } 547 548 static void __exit cls_bpf_exit_mod(void) 549 { 550 unregister_tcf_proto_ops(&cls_bpf_ops); 551 } 552 553 module_init(cls_bpf_init_mod); 554 module_exit(cls_bpf_exit_mod); 555