1 /* 2 * Berkeley Packet Filter based traffic classifier 3 * 4 * Might be used to classify traffic through flexible, user-defined and 5 * possibly JIT-ed BPF filters for traffic control as an alternative to 6 * ematches. 7 * 8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/skbuff.h> 18 #include <linux/filter.h> 19 #include <linux/bpf.h> 20 #include <linux/idr.h> 21 22 #include <net/rtnetlink.h> 23 #include <net/pkt_cls.h> 24 #include <net/sock.h> 25 26 MODULE_LICENSE("GPL"); 27 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>"); 28 MODULE_DESCRIPTION("TC BPF based classifier"); 29 30 #define CLS_BPF_NAME_LEN 256 31 #define CLS_BPF_SUPPORTED_GEN_FLAGS \ 32 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW) 33 34 struct cls_bpf_head { 35 struct list_head plist; 36 struct idr handle_idr; 37 struct rcu_head rcu; 38 }; 39 40 struct cls_bpf_prog { 41 struct bpf_prog *filter; 42 struct list_head link; 43 struct tcf_result res; 44 bool exts_integrated; 45 u32 gen_flags; 46 struct tcf_exts exts; 47 u32 handle; 48 u16 bpf_num_ops; 49 struct sock_filter *bpf_ops; 50 const char *bpf_name; 51 struct tcf_proto *tp; 52 union { 53 struct work_struct work; 54 struct rcu_head rcu; 55 }; 56 }; 57 58 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { 59 [TCA_BPF_CLASSID] = { .type = NLA_U32 }, 60 [TCA_BPF_FLAGS] = { .type = NLA_U32 }, 61 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 }, 62 [TCA_BPF_FD] = { .type = NLA_U32 }, 63 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, 64 .len = CLS_BPF_NAME_LEN }, 65 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 }, 66 [TCA_BPF_OPS] = { .type = NLA_BINARY, 67 .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, 68 }; 69 70 static int cls_bpf_exec_opcode(int code) 71 { 72 switch (code) { 73 case TC_ACT_OK: 74 case TC_ACT_SHOT: 75 case TC_ACT_STOLEN: 76 case TC_ACT_TRAP: 77 case TC_ACT_REDIRECT: 78 case TC_ACT_UNSPEC: 79 return code; 80 default: 81 return TC_ACT_UNSPEC; 82 } 83 } 84 85 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 86 struct tcf_result *res) 87 { 88 struct cls_bpf_head *head = rcu_dereference_bh(tp->root); 89 bool at_ingress = skb_at_tc_ingress(skb); 90 struct cls_bpf_prog *prog; 91 int ret = -1; 92 93 /* Needed here for accessing maps. */ 94 rcu_read_lock(); 95 list_for_each_entry_rcu(prog, &head->plist, link) { 96 int filter_res; 97 98 qdisc_skb_cb(skb)->tc_classid = prog->res.classid; 99 100 if (tc_skip_sw(prog->gen_flags)) { 101 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0; 102 } else if (at_ingress) { 103 /* It is safe to push/pull even if skb_shared() */ 104 __skb_push(skb, skb->mac_len); 105 bpf_compute_data_pointers(skb); 106 filter_res = BPF_PROG_RUN(prog->filter, skb); 107 __skb_pull(skb, skb->mac_len); 108 } else { 109 bpf_compute_data_pointers(skb); 110 filter_res = BPF_PROG_RUN(prog->filter, skb); 111 } 112 113 if (prog->exts_integrated) { 114 res->class = 0; 115 res->classid = TC_H_MAJ(prog->res.classid) | 116 qdisc_skb_cb(skb)->tc_classid; 117 118 ret = cls_bpf_exec_opcode(filter_res); 119 if (ret == TC_ACT_UNSPEC) 120 continue; 121 break; 122 } 123 124 if (filter_res == 0) 125 continue; 126 if (filter_res != -1) { 127 res->class = 0; 128 res->classid = filter_res; 129 } else { 130 *res = prog->res; 131 } 132 133 ret = tcf_exts_exec(skb, &prog->exts, res); 134 if (ret < 0) 135 continue; 136 137 break; 138 } 139 rcu_read_unlock(); 140 141 return ret; 142 } 143 144 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog) 145 { 146 return !prog->bpf_ops; 147 } 148 149 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, 150 struct cls_bpf_prog *oldprog) 151 { 152 struct tcf_block *block = tp->chain->block; 153 struct tc_cls_bpf_offload cls_bpf = {}; 154 struct cls_bpf_prog *obj; 155 bool skip_sw; 156 int err; 157 158 skip_sw = prog && tc_skip_sw(prog->gen_flags); 159 obj = prog ?: oldprog; 160 161 tc_cls_common_offload_init(&cls_bpf.common, tp); 162 cls_bpf.command = TC_CLSBPF_OFFLOAD; 163 cls_bpf.exts = &obj->exts; 164 cls_bpf.prog = prog ? prog->filter : NULL; 165 cls_bpf.oldprog = oldprog ? oldprog->filter : NULL; 166 cls_bpf.name = obj->bpf_name; 167 cls_bpf.exts_integrated = obj->exts_integrated; 168 cls_bpf.gen_flags = obj->gen_flags; 169 170 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw); 171 if (prog) { 172 if (err < 0) { 173 cls_bpf_offload_cmd(tp, oldprog, prog); 174 return err; 175 } else if (err > 0) { 176 prog->gen_flags |= TCA_CLS_FLAGS_IN_HW; 177 } 178 } 179 180 if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW)) 181 return -EINVAL; 182 183 return 0; 184 } 185 186 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, 187 struct cls_bpf_prog *oldprog) 188 { 189 if (prog && oldprog && prog->gen_flags != oldprog->gen_flags) 190 return -EINVAL; 191 192 if (prog && tc_skip_hw(prog->gen_flags)) 193 prog = NULL; 194 if (oldprog && tc_skip_hw(oldprog->gen_flags)) 195 oldprog = NULL; 196 if (!prog && !oldprog) 197 return 0; 198 199 return cls_bpf_offload_cmd(tp, prog, oldprog); 200 } 201 202 static void cls_bpf_stop_offload(struct tcf_proto *tp, 203 struct cls_bpf_prog *prog) 204 { 205 int err; 206 207 err = cls_bpf_offload_cmd(tp, NULL, prog); 208 if (err) 209 pr_err("Stopping hardware offload failed: %d\n", err); 210 } 211 212 static void cls_bpf_offload_update_stats(struct tcf_proto *tp, 213 struct cls_bpf_prog *prog) 214 { 215 struct tcf_block *block = tp->chain->block; 216 struct tc_cls_bpf_offload cls_bpf = {}; 217 218 tc_cls_common_offload_init(&cls_bpf.common, tp); 219 cls_bpf.command = TC_CLSBPF_STATS; 220 cls_bpf.exts = &prog->exts; 221 cls_bpf.prog = prog->filter; 222 cls_bpf.name = prog->bpf_name; 223 cls_bpf.exts_integrated = prog->exts_integrated; 224 cls_bpf.gen_flags = prog->gen_flags; 225 226 tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false); 227 } 228 229 static int cls_bpf_init(struct tcf_proto *tp) 230 { 231 struct cls_bpf_head *head; 232 233 head = kzalloc(sizeof(*head), GFP_KERNEL); 234 if (head == NULL) 235 return -ENOBUFS; 236 237 INIT_LIST_HEAD_RCU(&head->plist); 238 idr_init(&head->handle_idr); 239 rcu_assign_pointer(tp->root, head); 240 241 return 0; 242 } 243 244 static void cls_bpf_free_parms(struct cls_bpf_prog *prog) 245 { 246 if (cls_bpf_is_ebpf(prog)) 247 bpf_prog_put(prog->filter); 248 else 249 bpf_prog_destroy(prog->filter); 250 251 kfree(prog->bpf_name); 252 kfree(prog->bpf_ops); 253 } 254 255 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) 256 { 257 tcf_exts_destroy(&prog->exts); 258 tcf_exts_put_net(&prog->exts); 259 260 cls_bpf_free_parms(prog); 261 kfree(prog); 262 } 263 264 static void cls_bpf_delete_prog_work(struct work_struct *work) 265 { 266 struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work); 267 268 rtnl_lock(); 269 __cls_bpf_delete_prog(prog); 270 rtnl_unlock(); 271 } 272 273 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu) 274 { 275 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu); 276 277 INIT_WORK(&prog->work, cls_bpf_delete_prog_work); 278 tcf_queue_work(&prog->work); 279 } 280 281 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog) 282 { 283 struct cls_bpf_head *head = rtnl_dereference(tp->root); 284 285 idr_remove_ext(&head->handle_idr, prog->handle); 286 cls_bpf_stop_offload(tp, prog); 287 list_del_rcu(&prog->link); 288 tcf_unbind_filter(tp, &prog->res); 289 if (tcf_exts_get_net(&prog->exts)) 290 call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu); 291 else 292 __cls_bpf_delete_prog(prog); 293 } 294 295 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last) 296 { 297 struct cls_bpf_head *head = rtnl_dereference(tp->root); 298 299 __cls_bpf_delete(tp, arg); 300 *last = list_empty(&head->plist); 301 return 0; 302 } 303 304 static void cls_bpf_destroy(struct tcf_proto *tp) 305 { 306 struct cls_bpf_head *head = rtnl_dereference(tp->root); 307 struct cls_bpf_prog *prog, *tmp; 308 309 list_for_each_entry_safe(prog, tmp, &head->plist, link) 310 __cls_bpf_delete(tp, prog); 311 312 idr_destroy(&head->handle_idr); 313 kfree_rcu(head, rcu); 314 } 315 316 static void *cls_bpf_get(struct tcf_proto *tp, u32 handle) 317 { 318 struct cls_bpf_head *head = rtnl_dereference(tp->root); 319 struct cls_bpf_prog *prog; 320 321 list_for_each_entry(prog, &head->plist, link) { 322 if (prog->handle == handle) 323 return prog; 324 } 325 326 return NULL; 327 } 328 329 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog) 330 { 331 struct sock_filter *bpf_ops; 332 struct sock_fprog_kern fprog_tmp; 333 struct bpf_prog *fp; 334 u16 bpf_size, bpf_num_ops; 335 int ret; 336 337 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]); 338 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) 339 return -EINVAL; 340 341 bpf_size = bpf_num_ops * sizeof(*bpf_ops); 342 if (bpf_size != nla_len(tb[TCA_BPF_OPS])) 343 return -EINVAL; 344 345 bpf_ops = kzalloc(bpf_size, GFP_KERNEL); 346 if (bpf_ops == NULL) 347 return -ENOMEM; 348 349 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); 350 351 fprog_tmp.len = bpf_num_ops; 352 fprog_tmp.filter = bpf_ops; 353 354 ret = bpf_prog_create(&fp, &fprog_tmp); 355 if (ret < 0) { 356 kfree(bpf_ops); 357 return ret; 358 } 359 360 prog->bpf_ops = bpf_ops; 361 prog->bpf_num_ops = bpf_num_ops; 362 prog->bpf_name = NULL; 363 prog->filter = fp; 364 365 return 0; 366 } 367 368 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog, 369 u32 gen_flags, const struct tcf_proto *tp) 370 { 371 struct bpf_prog *fp; 372 char *name = NULL; 373 bool skip_sw; 374 u32 bpf_fd; 375 376 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); 377 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW; 378 379 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw); 380 if (IS_ERR(fp)) 381 return PTR_ERR(fp); 382 383 if (tb[TCA_BPF_NAME]) { 384 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL); 385 if (!name) { 386 bpf_prog_put(fp); 387 return -ENOMEM; 388 } 389 } 390 391 prog->bpf_ops = NULL; 392 prog->bpf_name = name; 393 prog->filter = fp; 394 395 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS)) 396 netif_keep_dst(qdisc_dev(tp->q)); 397 398 return 0; 399 } 400 401 static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp, 402 struct cls_bpf_prog *prog, unsigned long base, 403 struct nlattr **tb, struct nlattr *est, bool ovr) 404 { 405 bool is_bpf, is_ebpf, have_exts = false; 406 u32 gen_flags = 0; 407 int ret; 408 409 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS]; 410 is_ebpf = tb[TCA_BPF_FD]; 411 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) 412 return -EINVAL; 413 414 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr); 415 if (ret < 0) 416 return ret; 417 418 if (tb[TCA_BPF_FLAGS]) { 419 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]); 420 421 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) 422 return -EINVAL; 423 424 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT; 425 } 426 if (tb[TCA_BPF_FLAGS_GEN]) { 427 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]); 428 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS || 429 !tc_flags_valid(gen_flags)) 430 return -EINVAL; 431 } 432 433 prog->exts_integrated = have_exts; 434 prog->gen_flags = gen_flags; 435 436 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) : 437 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp); 438 if (ret < 0) 439 return ret; 440 441 if (tb[TCA_BPF_CLASSID]) { 442 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]); 443 tcf_bind_filter(tp, &prog->res, base); 444 } 445 446 return 0; 447 } 448 449 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, 450 struct tcf_proto *tp, unsigned long base, 451 u32 handle, struct nlattr **tca, 452 void **arg, bool ovr) 453 { 454 struct cls_bpf_head *head = rtnl_dereference(tp->root); 455 struct cls_bpf_prog *oldprog = *arg; 456 struct nlattr *tb[TCA_BPF_MAX + 1]; 457 struct cls_bpf_prog *prog; 458 unsigned long idr_index; 459 int ret; 460 461 if (tca[TCA_OPTIONS] == NULL) 462 return -EINVAL; 463 464 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy, 465 NULL); 466 if (ret < 0) 467 return ret; 468 469 prog = kzalloc(sizeof(*prog), GFP_KERNEL); 470 if (!prog) 471 return -ENOBUFS; 472 473 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE); 474 if (ret < 0) 475 goto errout; 476 477 if (oldprog) { 478 if (handle && oldprog->handle != handle) { 479 ret = -EINVAL; 480 goto errout; 481 } 482 } 483 484 if (handle == 0) { 485 ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index, 486 1, 0x7FFFFFFF, GFP_KERNEL); 487 if (ret) 488 goto errout; 489 prog->handle = idr_index; 490 } else { 491 if (!oldprog) { 492 ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index, 493 handle, handle + 1, GFP_KERNEL); 494 if (ret) 495 goto errout; 496 } 497 prog->handle = handle; 498 } 499 500 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr); 501 if (ret < 0) 502 goto errout_idr; 503 504 ret = cls_bpf_offload(tp, prog, oldprog); 505 if (ret) 506 goto errout_parms; 507 508 if (!tc_in_hw(prog->gen_flags)) 509 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; 510 511 if (oldprog) { 512 idr_replace_ext(&head->handle_idr, prog, handle); 513 list_replace_rcu(&oldprog->link, &prog->link); 514 tcf_unbind_filter(tp, &oldprog->res); 515 tcf_exts_get_net(&oldprog->exts); 516 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu); 517 } else { 518 list_add_rcu(&prog->link, &head->plist); 519 } 520 521 *arg = prog; 522 return 0; 523 524 errout_parms: 525 cls_bpf_free_parms(prog); 526 errout_idr: 527 if (!oldprog) 528 idr_remove_ext(&head->handle_idr, prog->handle); 529 errout: 530 tcf_exts_destroy(&prog->exts); 531 kfree(prog); 532 return ret; 533 } 534 535 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog, 536 struct sk_buff *skb) 537 { 538 struct nlattr *nla; 539 540 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops)) 541 return -EMSGSIZE; 542 543 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops * 544 sizeof(struct sock_filter)); 545 if (nla == NULL) 546 return -EMSGSIZE; 547 548 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla)); 549 550 return 0; 551 } 552 553 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog, 554 struct sk_buff *skb) 555 { 556 struct nlattr *nla; 557 558 if (prog->bpf_name && 559 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name)) 560 return -EMSGSIZE; 561 562 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id)) 563 return -EMSGSIZE; 564 565 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag)); 566 if (nla == NULL) 567 return -EMSGSIZE; 568 569 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla)); 570 571 return 0; 572 } 573 574 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh, 575 struct sk_buff *skb, struct tcmsg *tm) 576 { 577 struct cls_bpf_prog *prog = fh; 578 struct nlattr *nest; 579 u32 bpf_flags = 0; 580 int ret; 581 582 if (prog == NULL) 583 return skb->len; 584 585 tm->tcm_handle = prog->handle; 586 587 cls_bpf_offload_update_stats(tp, prog); 588 589 nest = nla_nest_start(skb, TCA_OPTIONS); 590 if (nest == NULL) 591 goto nla_put_failure; 592 593 if (prog->res.classid && 594 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid)) 595 goto nla_put_failure; 596 597 if (cls_bpf_is_ebpf(prog)) 598 ret = cls_bpf_dump_ebpf_info(prog, skb); 599 else 600 ret = cls_bpf_dump_bpf_info(prog, skb); 601 if (ret) 602 goto nla_put_failure; 603 604 if (tcf_exts_dump(skb, &prog->exts) < 0) 605 goto nla_put_failure; 606 607 if (prog->exts_integrated) 608 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT; 609 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags)) 610 goto nla_put_failure; 611 if (prog->gen_flags && 612 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags)) 613 goto nla_put_failure; 614 615 nla_nest_end(skb, nest); 616 617 if (tcf_exts_dump_stats(skb, &prog->exts) < 0) 618 goto nla_put_failure; 619 620 return skb->len; 621 622 nla_put_failure: 623 nla_nest_cancel(skb, nest); 624 return -1; 625 } 626 627 static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl) 628 { 629 struct cls_bpf_prog *prog = fh; 630 631 if (prog && prog->res.classid == classid) 632 prog->res.class = cl; 633 } 634 635 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg) 636 { 637 struct cls_bpf_head *head = rtnl_dereference(tp->root); 638 struct cls_bpf_prog *prog; 639 640 list_for_each_entry(prog, &head->plist, link) { 641 if (arg->count < arg->skip) 642 goto skip; 643 if (arg->fn(tp, prog, arg) < 0) { 644 arg->stop = 1; 645 break; 646 } 647 skip: 648 arg->count++; 649 } 650 } 651 652 static struct tcf_proto_ops cls_bpf_ops __read_mostly = { 653 .kind = "bpf", 654 .owner = THIS_MODULE, 655 .classify = cls_bpf_classify, 656 .init = cls_bpf_init, 657 .destroy = cls_bpf_destroy, 658 .get = cls_bpf_get, 659 .change = cls_bpf_change, 660 .delete = cls_bpf_delete, 661 .walk = cls_bpf_walk, 662 .dump = cls_bpf_dump, 663 .bind_class = cls_bpf_bind_class, 664 }; 665 666 static int __init cls_bpf_init_mod(void) 667 { 668 return register_tcf_proto_ops(&cls_bpf_ops); 669 } 670 671 static void __exit cls_bpf_exit_mod(void) 672 { 673 unregister_tcf_proto_ops(&cls_bpf_ops); 674 } 675 676 module_init(cls_bpf_init_mod); 677 module_exit(cls_bpf_exit_mod); 678