1 /* 2 * Berkeley Packet Filter based traffic classifier 3 * 4 * Might be used to classify traffic through flexible, user-defined and 5 * possibly JIT-ed BPF filters for traffic control as an alternative to 6 * ematches. 7 * 8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/skbuff.h> 18 #include <linux/filter.h> 19 #include <linux/bpf.h> 20 21 #include <net/rtnetlink.h> 22 #include <net/pkt_cls.h> 23 #include <net/sock.h> 24 25 MODULE_LICENSE("GPL"); 26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>"); 27 MODULE_DESCRIPTION("TC BPF based classifier"); 28 29 #define CLS_BPF_NAME_LEN 256 30 #define CLS_BPF_SUPPORTED_GEN_FLAGS \ 31 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW) 32 33 struct cls_bpf_head { 34 struct list_head plist; 35 u32 hgen; 36 struct rcu_head rcu; 37 }; 38 39 struct cls_bpf_prog { 40 struct bpf_prog *filter; 41 struct list_head link; 42 struct tcf_result res; 43 bool exts_integrated; 44 bool offloaded; 45 u32 gen_flags; 46 struct tcf_exts exts; 47 u32 handle; 48 u16 bpf_num_ops; 49 struct sock_filter *bpf_ops; 50 const char *bpf_name; 51 struct tcf_proto *tp; 52 struct rcu_head rcu; 53 }; 54 55 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { 56 [TCA_BPF_CLASSID] = { .type = NLA_U32 }, 57 [TCA_BPF_FLAGS] = { .type = NLA_U32 }, 58 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 }, 59 [TCA_BPF_FD] = { .type = NLA_U32 }, 60 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, 61 .len = CLS_BPF_NAME_LEN }, 62 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 }, 63 [TCA_BPF_OPS] = { .type = NLA_BINARY, 64 .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, 65 }; 66 67 static int cls_bpf_exec_opcode(int code) 68 { 69 switch (code) { 70 case TC_ACT_OK: 71 case TC_ACT_SHOT: 72 case TC_ACT_STOLEN: 73 case TC_ACT_TRAP: 74 case TC_ACT_REDIRECT: 75 case TC_ACT_UNSPEC: 76 return code; 77 default: 78 return TC_ACT_UNSPEC; 79 } 80 } 81 82 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 83 struct tcf_result *res) 84 { 85 struct cls_bpf_head *head = rcu_dereference_bh(tp->root); 86 bool at_ingress = skb_at_tc_ingress(skb); 87 struct cls_bpf_prog *prog; 88 int ret = -1; 89 90 /* Needed here for accessing maps. */ 91 rcu_read_lock(); 92 list_for_each_entry_rcu(prog, &head->plist, link) { 93 int filter_res; 94 95 qdisc_skb_cb(skb)->tc_classid = prog->res.classid; 96 97 if (tc_skip_sw(prog->gen_flags)) { 98 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0; 99 } else if (at_ingress) { 100 /* It is safe to push/pull even if skb_shared() */ 101 __skb_push(skb, skb->mac_len); 102 bpf_compute_data_end(skb); 103 filter_res = BPF_PROG_RUN(prog->filter, skb); 104 __skb_pull(skb, skb->mac_len); 105 } else { 106 bpf_compute_data_end(skb); 107 filter_res = BPF_PROG_RUN(prog->filter, skb); 108 } 109 110 if (prog->exts_integrated) { 111 res->class = 0; 112 res->classid = TC_H_MAJ(prog->res.classid) | 113 qdisc_skb_cb(skb)->tc_classid; 114 115 ret = cls_bpf_exec_opcode(filter_res); 116 if (ret == TC_ACT_UNSPEC) 117 continue; 118 break; 119 } 120 121 if (filter_res == 0) 122 continue; 123 if (filter_res != -1) { 124 res->class = 0; 125 res->classid = filter_res; 126 } else { 127 *res = prog->res; 128 } 129 130 ret = tcf_exts_exec(skb, &prog->exts, res); 131 if (ret < 0) 132 continue; 133 134 break; 135 } 136 rcu_read_unlock(); 137 138 return ret; 139 } 140 141 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog) 142 { 143 return !prog->bpf_ops; 144 } 145 146 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, 147 enum tc_clsbpf_command cmd) 148 { 149 struct net_device *dev = tp->q->dev_queue->dev; 150 struct tc_cls_bpf_offload cls_bpf = {}; 151 int err; 152 153 tc_cls_common_offload_init(&cls_bpf.common, tp); 154 cls_bpf.command = cmd; 155 cls_bpf.exts = &prog->exts; 156 cls_bpf.prog = prog->filter; 157 cls_bpf.name = prog->bpf_name; 158 cls_bpf.exts_integrated = prog->exts_integrated; 159 cls_bpf.gen_flags = prog->gen_flags; 160 161 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, &cls_bpf); 162 if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE)) 163 prog->gen_flags |= TCA_CLS_FLAGS_IN_HW; 164 165 return err; 166 } 167 168 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, 169 struct cls_bpf_prog *oldprog) 170 { 171 struct net_device *dev = tp->q->dev_queue->dev; 172 struct cls_bpf_prog *obj = prog; 173 enum tc_clsbpf_command cmd; 174 bool skip_sw; 175 int ret; 176 177 skip_sw = tc_skip_sw(prog->gen_flags) || 178 (oldprog && tc_skip_sw(oldprog->gen_flags)); 179 180 if (oldprog && oldprog->offloaded) { 181 if (tc_should_offload(dev, prog->gen_flags)) { 182 cmd = TC_CLSBPF_REPLACE; 183 } else if (!tc_skip_sw(prog->gen_flags)) { 184 obj = oldprog; 185 cmd = TC_CLSBPF_DESTROY; 186 } else { 187 return -EINVAL; 188 } 189 } else { 190 if (!tc_should_offload(dev, prog->gen_flags)) 191 return skip_sw ? -EINVAL : 0; 192 cmd = TC_CLSBPF_ADD; 193 } 194 195 ret = cls_bpf_offload_cmd(tp, obj, cmd); 196 if (ret) 197 return skip_sw ? ret : 0; 198 199 obj->offloaded = true; 200 if (oldprog) 201 oldprog->offloaded = false; 202 203 return 0; 204 } 205 206 static void cls_bpf_stop_offload(struct tcf_proto *tp, 207 struct cls_bpf_prog *prog) 208 { 209 int err; 210 211 if (!prog->offloaded) 212 return; 213 214 err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY); 215 if (err) { 216 pr_err("Stopping hardware offload failed: %d\n", err); 217 return; 218 } 219 220 prog->offloaded = false; 221 } 222 223 static void cls_bpf_offload_update_stats(struct tcf_proto *tp, 224 struct cls_bpf_prog *prog) 225 { 226 if (!prog->offloaded) 227 return; 228 229 cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS); 230 } 231 232 static int cls_bpf_init(struct tcf_proto *tp) 233 { 234 struct cls_bpf_head *head; 235 236 head = kzalloc(sizeof(*head), GFP_KERNEL); 237 if (head == NULL) 238 return -ENOBUFS; 239 240 INIT_LIST_HEAD_RCU(&head->plist); 241 rcu_assign_pointer(tp->root, head); 242 243 return 0; 244 } 245 246 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) 247 { 248 tcf_exts_destroy(&prog->exts); 249 250 if (cls_bpf_is_ebpf(prog)) 251 bpf_prog_put(prog->filter); 252 else 253 bpf_prog_destroy(prog->filter); 254 255 kfree(prog->bpf_name); 256 kfree(prog->bpf_ops); 257 kfree(prog); 258 } 259 260 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu) 261 { 262 __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu)); 263 } 264 265 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog) 266 { 267 cls_bpf_stop_offload(tp, prog); 268 list_del_rcu(&prog->link); 269 tcf_unbind_filter(tp, &prog->res); 270 call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu); 271 } 272 273 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last) 274 { 275 struct cls_bpf_head *head = rtnl_dereference(tp->root); 276 277 __cls_bpf_delete(tp, arg); 278 *last = list_empty(&head->plist); 279 return 0; 280 } 281 282 static void cls_bpf_destroy(struct tcf_proto *tp) 283 { 284 struct cls_bpf_head *head = rtnl_dereference(tp->root); 285 struct cls_bpf_prog *prog, *tmp; 286 287 list_for_each_entry_safe(prog, tmp, &head->plist, link) 288 __cls_bpf_delete(tp, prog); 289 290 kfree_rcu(head, rcu); 291 } 292 293 static void *cls_bpf_get(struct tcf_proto *tp, u32 handle) 294 { 295 struct cls_bpf_head *head = rtnl_dereference(tp->root); 296 struct cls_bpf_prog *prog; 297 298 list_for_each_entry(prog, &head->plist, link) { 299 if (prog->handle == handle) 300 return prog; 301 } 302 303 return NULL; 304 } 305 306 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog) 307 { 308 struct sock_filter *bpf_ops; 309 struct sock_fprog_kern fprog_tmp; 310 struct bpf_prog *fp; 311 u16 bpf_size, bpf_num_ops; 312 int ret; 313 314 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]); 315 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) 316 return -EINVAL; 317 318 bpf_size = bpf_num_ops * sizeof(*bpf_ops); 319 if (bpf_size != nla_len(tb[TCA_BPF_OPS])) 320 return -EINVAL; 321 322 bpf_ops = kzalloc(bpf_size, GFP_KERNEL); 323 if (bpf_ops == NULL) 324 return -ENOMEM; 325 326 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); 327 328 fprog_tmp.len = bpf_num_ops; 329 fprog_tmp.filter = bpf_ops; 330 331 ret = bpf_prog_create(&fp, &fprog_tmp); 332 if (ret < 0) { 333 kfree(bpf_ops); 334 return ret; 335 } 336 337 prog->bpf_ops = bpf_ops; 338 prog->bpf_num_ops = bpf_num_ops; 339 prog->bpf_name = NULL; 340 prog->filter = fp; 341 342 return 0; 343 } 344 345 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog, 346 const struct tcf_proto *tp) 347 { 348 struct bpf_prog *fp; 349 char *name = NULL; 350 u32 bpf_fd; 351 352 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); 353 354 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS); 355 if (IS_ERR(fp)) 356 return PTR_ERR(fp); 357 358 if (tb[TCA_BPF_NAME]) { 359 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL); 360 if (!name) { 361 bpf_prog_put(fp); 362 return -ENOMEM; 363 } 364 } 365 366 prog->bpf_ops = NULL; 367 prog->bpf_name = name; 368 prog->filter = fp; 369 370 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS)) 371 netif_keep_dst(qdisc_dev(tp->q)); 372 373 return 0; 374 } 375 376 static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp, 377 struct cls_bpf_prog *prog, unsigned long base, 378 struct nlattr **tb, struct nlattr *est, bool ovr) 379 { 380 bool is_bpf, is_ebpf, have_exts = false; 381 u32 gen_flags = 0; 382 int ret; 383 384 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS]; 385 is_ebpf = tb[TCA_BPF_FD]; 386 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) 387 return -EINVAL; 388 389 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr); 390 if (ret < 0) 391 return ret; 392 393 if (tb[TCA_BPF_FLAGS]) { 394 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]); 395 396 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) 397 return -EINVAL; 398 399 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT; 400 } 401 if (tb[TCA_BPF_FLAGS_GEN]) { 402 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]); 403 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS || 404 !tc_flags_valid(gen_flags)) 405 return -EINVAL; 406 } 407 408 prog->exts_integrated = have_exts; 409 prog->gen_flags = gen_flags; 410 411 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) : 412 cls_bpf_prog_from_efd(tb, prog, tp); 413 if (ret < 0) 414 return ret; 415 416 if (tb[TCA_BPF_CLASSID]) { 417 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]); 418 tcf_bind_filter(tp, &prog->res, base); 419 } 420 421 return 0; 422 } 423 424 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp, 425 struct cls_bpf_head *head) 426 { 427 unsigned int i = 0x80000000; 428 u32 handle; 429 430 do { 431 if (++head->hgen == 0x7FFFFFFF) 432 head->hgen = 1; 433 } while (--i > 0 && cls_bpf_get(tp, head->hgen)); 434 435 if (unlikely(i == 0)) { 436 pr_err("Insufficient number of handles\n"); 437 handle = 0; 438 } else { 439 handle = head->hgen; 440 } 441 442 return handle; 443 } 444 445 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, 446 struct tcf_proto *tp, unsigned long base, 447 u32 handle, struct nlattr **tca, 448 void **arg, bool ovr) 449 { 450 struct cls_bpf_head *head = rtnl_dereference(tp->root); 451 struct cls_bpf_prog *oldprog = *arg; 452 struct nlattr *tb[TCA_BPF_MAX + 1]; 453 struct cls_bpf_prog *prog; 454 int ret; 455 456 if (tca[TCA_OPTIONS] == NULL) 457 return -EINVAL; 458 459 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy, 460 NULL); 461 if (ret < 0) 462 return ret; 463 464 prog = kzalloc(sizeof(*prog), GFP_KERNEL); 465 if (!prog) 466 return -ENOBUFS; 467 468 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE); 469 if (ret < 0) 470 goto errout; 471 472 if (oldprog) { 473 if (handle && oldprog->handle != handle) { 474 ret = -EINVAL; 475 goto errout; 476 } 477 } 478 479 if (handle == 0) 480 prog->handle = cls_bpf_grab_new_handle(tp, head); 481 else 482 prog->handle = handle; 483 if (prog->handle == 0) { 484 ret = -EINVAL; 485 goto errout; 486 } 487 488 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr); 489 if (ret < 0) 490 goto errout; 491 492 ret = cls_bpf_offload(tp, prog, oldprog); 493 if (ret) { 494 __cls_bpf_delete_prog(prog); 495 return ret; 496 } 497 498 if (!tc_in_hw(prog->gen_flags)) 499 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; 500 501 if (oldprog) { 502 list_replace_rcu(&oldprog->link, &prog->link); 503 tcf_unbind_filter(tp, &oldprog->res); 504 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu); 505 } else { 506 list_add_rcu(&prog->link, &head->plist); 507 } 508 509 *arg = prog; 510 return 0; 511 512 errout: 513 tcf_exts_destroy(&prog->exts); 514 kfree(prog); 515 return ret; 516 } 517 518 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog, 519 struct sk_buff *skb) 520 { 521 struct nlattr *nla; 522 523 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops)) 524 return -EMSGSIZE; 525 526 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops * 527 sizeof(struct sock_filter)); 528 if (nla == NULL) 529 return -EMSGSIZE; 530 531 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla)); 532 533 return 0; 534 } 535 536 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog, 537 struct sk_buff *skb) 538 { 539 struct nlattr *nla; 540 541 if (prog->bpf_name && 542 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name)) 543 return -EMSGSIZE; 544 545 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id)) 546 return -EMSGSIZE; 547 548 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag)); 549 if (nla == NULL) 550 return -EMSGSIZE; 551 552 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla)); 553 554 return 0; 555 } 556 557 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh, 558 struct sk_buff *skb, struct tcmsg *tm) 559 { 560 struct cls_bpf_prog *prog = fh; 561 struct nlattr *nest; 562 u32 bpf_flags = 0; 563 int ret; 564 565 if (prog == NULL) 566 return skb->len; 567 568 tm->tcm_handle = prog->handle; 569 570 cls_bpf_offload_update_stats(tp, prog); 571 572 nest = nla_nest_start(skb, TCA_OPTIONS); 573 if (nest == NULL) 574 goto nla_put_failure; 575 576 if (prog->res.classid && 577 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid)) 578 goto nla_put_failure; 579 580 if (cls_bpf_is_ebpf(prog)) 581 ret = cls_bpf_dump_ebpf_info(prog, skb); 582 else 583 ret = cls_bpf_dump_bpf_info(prog, skb); 584 if (ret) 585 goto nla_put_failure; 586 587 if (tcf_exts_dump(skb, &prog->exts) < 0) 588 goto nla_put_failure; 589 590 if (prog->exts_integrated) 591 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT; 592 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags)) 593 goto nla_put_failure; 594 if (prog->gen_flags && 595 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags)) 596 goto nla_put_failure; 597 598 nla_nest_end(skb, nest); 599 600 if (tcf_exts_dump_stats(skb, &prog->exts) < 0) 601 goto nla_put_failure; 602 603 return skb->len; 604 605 nla_put_failure: 606 nla_nest_cancel(skb, nest); 607 return -1; 608 } 609 610 static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl) 611 { 612 struct cls_bpf_prog *prog = fh; 613 614 if (prog && prog->res.classid == classid) 615 prog->res.class = cl; 616 } 617 618 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg) 619 { 620 struct cls_bpf_head *head = rtnl_dereference(tp->root); 621 struct cls_bpf_prog *prog; 622 623 list_for_each_entry(prog, &head->plist, link) { 624 if (arg->count < arg->skip) 625 goto skip; 626 if (arg->fn(tp, prog, arg) < 0) { 627 arg->stop = 1; 628 break; 629 } 630 skip: 631 arg->count++; 632 } 633 } 634 635 static struct tcf_proto_ops cls_bpf_ops __read_mostly = { 636 .kind = "bpf", 637 .owner = THIS_MODULE, 638 .classify = cls_bpf_classify, 639 .init = cls_bpf_init, 640 .destroy = cls_bpf_destroy, 641 .get = cls_bpf_get, 642 .change = cls_bpf_change, 643 .delete = cls_bpf_delete, 644 .walk = cls_bpf_walk, 645 .dump = cls_bpf_dump, 646 .bind_class = cls_bpf_bind_class, 647 }; 648 649 static int __init cls_bpf_init_mod(void) 650 { 651 return register_tcf_proto_ops(&cls_bpf_ops); 652 } 653 654 static void __exit cls_bpf_exit_mod(void) 655 { 656 unregister_tcf_proto_ops(&cls_bpf_ops); 657 } 658 659 module_init(cls_bpf_init_mod); 660 module_exit(cls_bpf_exit_mod); 661