1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/ife.c Inter-FE action based on ForCES WG InterFE LFB 4 * 5 * Refer to: 6 * draft-ietf-forces-interfelfb-03 7 * and 8 * netdev01 paper: 9 * "Distributing Linux Traffic Control Classifier-Action 10 * Subsystem" 11 * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai 12 * 13 * copyright Jamal Hadi Salim (2015) 14 */ 15 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/string.h> 19 #include <linux/errno.h> 20 #include <linux/skbuff.h> 21 #include <linux/rtnetlink.h> 22 #include <linux/module.h> 23 #include <linux/init.h> 24 #include <net/net_namespace.h> 25 #include <net/netlink.h> 26 #include <net/pkt_sched.h> 27 #include <net/pkt_cls.h> 28 #include <uapi/linux/tc_act/tc_ife.h> 29 #include <net/tc_act/tc_ife.h> 30 #include <linux/etherdevice.h> 31 #include <net/ife.h> 32 33 static unsigned int ife_net_id; 34 static int max_metacnt = IFE_META_MAX + 1; 35 static struct tc_action_ops act_ife_ops; 36 37 static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = { 38 [TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)}, 39 [TCA_IFE_DMAC] = { .len = ETH_ALEN}, 40 [TCA_IFE_SMAC] = { .len = ETH_ALEN}, 41 [TCA_IFE_TYPE] = { .type = NLA_U16}, 42 }; 43 44 int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi) 45 { 46 u16 edata = 0; 47 48 if (mi->metaval) 49 edata = *(u16 *)mi->metaval; 50 else if (metaval) 51 edata = metaval; 52 53 if (!edata) /* will not encode */ 54 return 0; 55 56 edata = htons(edata); 57 return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata); 58 } 59 EXPORT_SYMBOL_GPL(ife_encode_meta_u16); 60 61 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi) 62 { 63 if (mi->metaval) 64 return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval); 65 else 66 return nla_put(skb, mi->metaid, 0, NULL); 67 } 68 EXPORT_SYMBOL_GPL(ife_get_meta_u32); 69 70 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi) 71 { 72 if (metaval || mi->metaval) 73 return 8; /* T+L+V == 2+2+4 */ 74 75 return 0; 76 } 77 EXPORT_SYMBOL_GPL(ife_check_meta_u32); 78 79 int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi) 80 { 81 if (metaval || mi->metaval) 82 return 8; /* T+L+(V) == 2+2+(2+2bytepad) */ 83 84 return 0; 85 } 86 EXPORT_SYMBOL_GPL(ife_check_meta_u16); 87 88 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi) 89 { 90 u32 edata = metaval; 91 92 if (mi->metaval) 93 edata = *(u32 *)mi->metaval; 94 else if (metaval) 95 edata = metaval; 96 97 if (!edata) /* will not encode */ 98 return 0; 99 100 edata = htonl(edata); 101 return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata); 102 } 103 EXPORT_SYMBOL_GPL(ife_encode_meta_u32); 104 105 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi) 106 { 107 if (mi->metaval) 108 return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval); 109 else 110 return nla_put(skb, mi->metaid, 0, NULL); 111 } 112 EXPORT_SYMBOL_GPL(ife_get_meta_u16); 113 114 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) 115 { 116 mi->metaval = kmemdup(metaval, sizeof(u32), gfp); 117 if (!mi->metaval) 118 return -ENOMEM; 119 120 return 0; 121 } 122 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32); 123 124 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) 125 { 126 mi->metaval = kmemdup(metaval, sizeof(u16), gfp); 127 if (!mi->metaval) 128 return -ENOMEM; 129 130 return 0; 131 } 132 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16); 133 134 void ife_release_meta_gen(struct tcf_meta_info *mi) 135 { 136 kfree(mi->metaval); 137 } 138 EXPORT_SYMBOL_GPL(ife_release_meta_gen); 139 140 int ife_validate_meta_u32(void *val, int len) 141 { 142 if (len == sizeof(u32)) 143 return 0; 144 145 return -EINVAL; 146 } 147 EXPORT_SYMBOL_GPL(ife_validate_meta_u32); 148 149 int ife_validate_meta_u16(void *val, int len) 150 { 151 /* length will not include padding */ 152 if (len == sizeof(u16)) 153 return 0; 154 155 return -EINVAL; 156 } 157 EXPORT_SYMBOL_GPL(ife_validate_meta_u16); 158 159 static LIST_HEAD(ifeoplist); 160 static DEFINE_RWLOCK(ife_mod_lock); 161 162 static struct tcf_meta_ops *find_ife_oplist(u16 metaid) 163 { 164 struct tcf_meta_ops *o; 165 166 read_lock(&ife_mod_lock); 167 list_for_each_entry(o, &ifeoplist, list) { 168 if (o->metaid == metaid) { 169 if (!try_module_get(o->owner)) 170 o = NULL; 171 read_unlock(&ife_mod_lock); 172 return o; 173 } 174 } 175 read_unlock(&ife_mod_lock); 176 177 return NULL; 178 } 179 180 int register_ife_op(struct tcf_meta_ops *mops) 181 { 182 struct tcf_meta_ops *m; 183 184 if (!mops->metaid || !mops->metatype || !mops->name || 185 !mops->check_presence || !mops->encode || !mops->decode || 186 !mops->get || !mops->alloc) 187 return -EINVAL; 188 189 write_lock(&ife_mod_lock); 190 191 list_for_each_entry(m, &ifeoplist, list) { 192 if (m->metaid == mops->metaid || 193 (strcmp(mops->name, m->name) == 0)) { 194 write_unlock(&ife_mod_lock); 195 return -EEXIST; 196 } 197 } 198 199 if (!mops->release) 200 mops->release = ife_release_meta_gen; 201 202 list_add_tail(&mops->list, &ifeoplist); 203 write_unlock(&ife_mod_lock); 204 return 0; 205 } 206 EXPORT_SYMBOL_GPL(unregister_ife_op); 207 208 int unregister_ife_op(struct tcf_meta_ops *mops) 209 { 210 struct tcf_meta_ops *m; 211 int err = -ENOENT; 212 213 write_lock(&ife_mod_lock); 214 list_for_each_entry(m, &ifeoplist, list) { 215 if (m->metaid == mops->metaid) { 216 list_del(&mops->list); 217 err = 0; 218 break; 219 } 220 } 221 write_unlock(&ife_mod_lock); 222 223 return err; 224 } 225 EXPORT_SYMBOL_GPL(register_ife_op); 226 227 static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len) 228 { 229 int ret = 0; 230 /* XXX: unfortunately cant use nla_policy at this point 231 * because a length of 0 is valid in the case of 232 * "allow". "use" semantics do enforce for proper 233 * length and i couldve use nla_policy but it makes it hard 234 * to use it just for that.. 235 */ 236 if (ops->validate) 237 return ops->validate(val, len); 238 239 if (ops->metatype == NLA_U32) 240 ret = ife_validate_meta_u32(val, len); 241 else if (ops->metatype == NLA_U16) 242 ret = ife_validate_meta_u16(val, len); 243 244 return ret; 245 } 246 247 #ifdef CONFIG_MODULES 248 static const char *ife_meta_id2name(u32 metaid) 249 { 250 switch (metaid) { 251 case IFE_META_SKBMARK: 252 return "skbmark"; 253 case IFE_META_PRIO: 254 return "skbprio"; 255 case IFE_META_TCINDEX: 256 return "tcindex"; 257 default: 258 return "unknown"; 259 } 260 } 261 #endif 262 263 /* called when adding new meta information 264 */ 265 static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held) 266 { 267 struct tcf_meta_ops *ops = find_ife_oplist(metaid); 268 int ret = 0; 269 270 if (!ops) { 271 ret = -ENOENT; 272 #ifdef CONFIG_MODULES 273 if (rtnl_held) 274 rtnl_unlock(); 275 request_module("ife-meta-%s", ife_meta_id2name(metaid)); 276 if (rtnl_held) 277 rtnl_lock(); 278 ops = find_ife_oplist(metaid); 279 #endif 280 } 281 282 if (ops) { 283 ret = 0; 284 if (len) 285 ret = ife_validate_metatype(ops, val, len); 286 287 module_put(ops->owner); 288 } 289 290 return ret; 291 } 292 293 /* called when adding new meta information 294 */ 295 static int __add_metainfo(const struct tcf_meta_ops *ops, 296 struct tcf_ife_info *ife, u32 metaid, void *metaval, 297 int len, bool atomic, bool exists) 298 { 299 struct tcf_meta_info *mi = NULL; 300 int ret = 0; 301 302 mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); 303 if (!mi) 304 return -ENOMEM; 305 306 mi->metaid = metaid; 307 mi->ops = ops; 308 if (len > 0) { 309 ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); 310 if (ret != 0) { 311 kfree(mi); 312 return ret; 313 } 314 } 315 316 if (exists) 317 spin_lock_bh(&ife->tcf_lock); 318 list_add_tail(&mi->metalist, &ife->metalist); 319 if (exists) 320 spin_unlock_bh(&ife->tcf_lock); 321 322 return ret; 323 } 324 325 static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops, 326 struct tcf_ife_info *ife, u32 metaid, 327 bool exists) 328 { 329 int ret; 330 331 if (!try_module_get(ops->owner)) 332 return -ENOENT; 333 ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists); 334 if (ret) 335 module_put(ops->owner); 336 return ret; 337 } 338 339 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, 340 int len, bool exists) 341 { 342 const struct tcf_meta_ops *ops = find_ife_oplist(metaid); 343 int ret; 344 345 if (!ops) 346 return -ENOENT; 347 ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists); 348 if (ret) 349 /*put back what find_ife_oplist took */ 350 module_put(ops->owner); 351 return ret; 352 } 353 354 static int use_all_metadata(struct tcf_ife_info *ife, bool exists) 355 { 356 struct tcf_meta_ops *o; 357 int rc = 0; 358 int installed = 0; 359 360 read_lock(&ife_mod_lock); 361 list_for_each_entry(o, &ifeoplist, list) { 362 rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists); 363 if (rc == 0) 364 installed += 1; 365 } 366 read_unlock(&ife_mod_lock); 367 368 if (installed) 369 return 0; 370 else 371 return -EINVAL; 372 } 373 374 static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife) 375 { 376 struct tcf_meta_info *e; 377 struct nlattr *nest; 378 unsigned char *b = skb_tail_pointer(skb); 379 int total_encoded = 0; 380 381 /*can only happen on decode */ 382 if (list_empty(&ife->metalist)) 383 return 0; 384 385 nest = nla_nest_start_noflag(skb, TCA_IFE_METALST); 386 if (!nest) 387 goto out_nlmsg_trim; 388 389 list_for_each_entry(e, &ife->metalist, metalist) { 390 if (!e->ops->get(skb, e)) 391 total_encoded += 1; 392 } 393 394 if (!total_encoded) 395 goto out_nlmsg_trim; 396 397 nla_nest_end(skb, nest); 398 399 return 0; 400 401 out_nlmsg_trim: 402 nlmsg_trim(skb, b); 403 return -1; 404 } 405 406 /* under ife->tcf_lock */ 407 static void _tcf_ife_cleanup(struct tc_action *a) 408 { 409 struct tcf_ife_info *ife = to_ife(a); 410 struct tcf_meta_info *e, *n; 411 412 list_for_each_entry_safe(e, n, &ife->metalist, metalist) { 413 list_del(&e->metalist); 414 if (e->metaval) { 415 if (e->ops->release) 416 e->ops->release(e); 417 else 418 kfree(e->metaval); 419 } 420 module_put(e->ops->owner); 421 kfree(e); 422 } 423 } 424 425 static void tcf_ife_cleanup(struct tc_action *a) 426 { 427 struct tcf_ife_info *ife = to_ife(a); 428 struct tcf_ife_params *p; 429 430 spin_lock_bh(&ife->tcf_lock); 431 _tcf_ife_cleanup(a); 432 spin_unlock_bh(&ife->tcf_lock); 433 434 p = rcu_dereference_protected(ife->params, 1); 435 if (p) 436 kfree_rcu(p, rcu); 437 } 438 439 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, 440 bool exists, bool rtnl_held) 441 { 442 int len = 0; 443 int rc = 0; 444 int i = 0; 445 void *val; 446 447 for (i = 1; i < max_metacnt; i++) { 448 if (tb[i]) { 449 val = nla_data(tb[i]); 450 len = nla_len(tb[i]); 451 452 rc = load_metaops_and_vet(i, val, len, rtnl_held); 453 if (rc != 0) 454 return rc; 455 456 rc = add_metainfo(ife, i, val, len, exists); 457 if (rc) 458 return rc; 459 } 460 } 461 462 return rc; 463 } 464 465 static int tcf_ife_init(struct net *net, struct nlattr *nla, 466 struct nlattr *est, struct tc_action **a, 467 int ovr, int bind, bool rtnl_held, 468 struct tcf_proto *tp, struct netlink_ext_ack *extack) 469 { 470 struct tc_action_net *tn = net_generic(net, ife_net_id); 471 struct nlattr *tb[TCA_IFE_MAX + 1]; 472 struct nlattr *tb2[IFE_META_MAX + 1]; 473 struct tcf_chain *goto_ch = NULL; 474 struct tcf_ife_params *p; 475 struct tcf_ife_info *ife; 476 u16 ife_type = ETH_P_IFE; 477 struct tc_ife *parm; 478 u8 *daddr = NULL; 479 u8 *saddr = NULL; 480 bool exists = false; 481 int ret = 0; 482 u32 index; 483 int err; 484 485 if (!nla) { 486 NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed"); 487 return -EINVAL; 488 } 489 490 err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy, 491 NULL); 492 if (err < 0) 493 return err; 494 495 if (!tb[TCA_IFE_PARMS]) 496 return -EINVAL; 497 498 parm = nla_data(tb[TCA_IFE_PARMS]); 499 500 /* IFE_DECODE is 0 and indicates the opposite of IFE_ENCODE because 501 * they cannot run as the same time. Check on all other values which 502 * are not supported right now. 503 */ 504 if (parm->flags & ~IFE_ENCODE) 505 return -EINVAL; 506 507 p = kzalloc(sizeof(*p), GFP_KERNEL); 508 if (!p) 509 return -ENOMEM; 510 511 index = parm->index; 512 err = tcf_idr_check_alloc(tn, &index, a, bind); 513 if (err < 0) { 514 kfree(p); 515 return err; 516 } 517 exists = err; 518 if (exists && bind) { 519 kfree(p); 520 return 0; 521 } 522 523 if (!exists) { 524 ret = tcf_idr_create(tn, index, est, a, &act_ife_ops, 525 bind, true); 526 if (ret) { 527 tcf_idr_cleanup(tn, index); 528 kfree(p); 529 return ret; 530 } 531 ret = ACT_P_CREATED; 532 } else if (!ovr) { 533 tcf_idr_release(*a, bind); 534 kfree(p); 535 return -EEXIST; 536 } 537 538 ife = to_ife(*a); 539 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 540 if (err < 0) 541 goto release_idr; 542 543 p->flags = parm->flags; 544 545 if (parm->flags & IFE_ENCODE) { 546 if (tb[TCA_IFE_TYPE]) 547 ife_type = nla_get_u16(tb[TCA_IFE_TYPE]); 548 if (tb[TCA_IFE_DMAC]) 549 daddr = nla_data(tb[TCA_IFE_DMAC]); 550 if (tb[TCA_IFE_SMAC]) 551 saddr = nla_data(tb[TCA_IFE_SMAC]); 552 } 553 554 if (parm->flags & IFE_ENCODE) { 555 if (daddr) 556 ether_addr_copy(p->eth_dst, daddr); 557 else 558 eth_zero_addr(p->eth_dst); 559 560 if (saddr) 561 ether_addr_copy(p->eth_src, saddr); 562 else 563 eth_zero_addr(p->eth_src); 564 565 p->eth_type = ife_type; 566 } 567 568 569 if (ret == ACT_P_CREATED) 570 INIT_LIST_HEAD(&ife->metalist); 571 572 if (tb[TCA_IFE_METALST]) { 573 err = nla_parse_nested_deprecated(tb2, IFE_META_MAX, 574 tb[TCA_IFE_METALST], NULL, 575 NULL); 576 if (err) 577 goto metadata_parse_err; 578 err = populate_metalist(ife, tb2, exists, rtnl_held); 579 if (err) 580 goto metadata_parse_err; 581 582 } else { 583 /* if no passed metadata allow list or passed allow-all 584 * then here we process by adding as many supported metadatum 585 * as we can. You better have at least one else we are 586 * going to bail out 587 */ 588 err = use_all_metadata(ife, exists); 589 if (err) 590 goto metadata_parse_err; 591 } 592 593 if (exists) 594 spin_lock_bh(&ife->tcf_lock); 595 /* protected by tcf_lock when modifying existing action */ 596 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 597 rcu_swap_protected(ife->params, p, 1); 598 599 if (exists) 600 spin_unlock_bh(&ife->tcf_lock); 601 if (goto_ch) 602 tcf_chain_put_by_act(goto_ch); 603 if (p) 604 kfree_rcu(p, rcu); 605 606 if (ret == ACT_P_CREATED) 607 tcf_idr_insert(tn, *a); 608 609 return ret; 610 metadata_parse_err: 611 if (goto_ch) 612 tcf_chain_put_by_act(goto_ch); 613 release_idr: 614 kfree(p); 615 tcf_idr_release(*a, bind); 616 return err; 617 } 618 619 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, 620 int ref) 621 { 622 unsigned char *b = skb_tail_pointer(skb); 623 struct tcf_ife_info *ife = to_ife(a); 624 struct tcf_ife_params *p; 625 struct tc_ife opt = { 626 .index = ife->tcf_index, 627 .refcnt = refcount_read(&ife->tcf_refcnt) - ref, 628 .bindcnt = atomic_read(&ife->tcf_bindcnt) - bind, 629 }; 630 struct tcf_t t; 631 632 spin_lock_bh(&ife->tcf_lock); 633 opt.action = ife->tcf_action; 634 p = rcu_dereference_protected(ife->params, 635 lockdep_is_held(&ife->tcf_lock)); 636 opt.flags = p->flags; 637 638 if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt)) 639 goto nla_put_failure; 640 641 tcf_tm_dump(&t, &ife->tcf_tm); 642 if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD)) 643 goto nla_put_failure; 644 645 if (!is_zero_ether_addr(p->eth_dst)) { 646 if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, p->eth_dst)) 647 goto nla_put_failure; 648 } 649 650 if (!is_zero_ether_addr(p->eth_src)) { 651 if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, p->eth_src)) 652 goto nla_put_failure; 653 } 654 655 if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type)) 656 goto nla_put_failure; 657 658 if (dump_metalist(skb, ife)) { 659 /*ignore failure to dump metalist */ 660 pr_info("Failed to dump metalist\n"); 661 } 662 663 spin_unlock_bh(&ife->tcf_lock); 664 return skb->len; 665 666 nla_put_failure: 667 spin_unlock_bh(&ife->tcf_lock); 668 nlmsg_trim(skb, b); 669 return -1; 670 } 671 672 static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife, 673 u16 metaid, u16 mlen, void *mdata) 674 { 675 struct tcf_meta_info *e; 676 677 /* XXX: use hash to speed up */ 678 list_for_each_entry(e, &ife->metalist, metalist) { 679 if (metaid == e->metaid) { 680 if (e->ops) { 681 /* We check for decode presence already */ 682 return e->ops->decode(skb, mdata, mlen); 683 } 684 } 685 } 686 687 return -ENOENT; 688 } 689 690 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, 691 struct tcf_result *res) 692 { 693 struct tcf_ife_info *ife = to_ife(a); 694 int action = ife->tcf_action; 695 u8 *ifehdr_end; 696 u8 *tlv_data; 697 u16 metalen; 698 699 bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb); 700 tcf_lastuse_update(&ife->tcf_tm); 701 702 if (skb_at_tc_ingress(skb)) 703 skb_push(skb, skb->dev->hard_header_len); 704 705 tlv_data = ife_decode(skb, &metalen); 706 if (unlikely(!tlv_data)) { 707 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); 708 return TC_ACT_SHOT; 709 } 710 711 ifehdr_end = tlv_data + metalen; 712 for (; tlv_data < ifehdr_end; tlv_data = ife_tlv_meta_next(tlv_data)) { 713 u8 *curr_data; 714 u16 mtype; 715 u16 dlen; 716 717 curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype, 718 &dlen, NULL); 719 if (!curr_data) { 720 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); 721 return TC_ACT_SHOT; 722 } 723 724 if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) { 725 /* abuse overlimits to count when we receive metadata 726 * but dont have an ops for it 727 */ 728 pr_info_ratelimited("Unknown metaid %d dlen %d\n", 729 mtype, dlen); 730 qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats)); 731 } 732 } 733 734 if (WARN_ON(tlv_data != ifehdr_end)) { 735 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); 736 return TC_ACT_SHOT; 737 } 738 739 skb->protocol = eth_type_trans(skb, skb->dev); 740 skb_reset_network_header(skb); 741 742 return action; 743 } 744 745 /*XXX: check if we can do this at install time instead of current 746 * send data path 747 **/ 748 static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife) 749 { 750 struct tcf_meta_info *e, *n; 751 int tot_run_sz = 0, run_sz = 0; 752 753 list_for_each_entry_safe(e, n, &ife->metalist, metalist) { 754 if (e->ops->check_presence) { 755 run_sz = e->ops->check_presence(skb, e); 756 tot_run_sz += run_sz; 757 } 758 } 759 760 return tot_run_sz; 761 } 762 763 static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, 764 struct tcf_result *res, struct tcf_ife_params *p) 765 { 766 struct tcf_ife_info *ife = to_ife(a); 767 int action = ife->tcf_action; 768 struct ethhdr *oethh; /* outer ether header */ 769 struct tcf_meta_info *e; 770 /* 771 OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA 772 where ORIGDATA = original ethernet header ... 773 */ 774 u16 metalen = ife_get_sz(skb, ife); 775 int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN; 776 unsigned int skboff = 0; 777 int new_len = skb->len + hdrm; 778 bool exceed_mtu = false; 779 void *ife_meta; 780 int err = 0; 781 782 if (!skb_at_tc_ingress(skb)) { 783 if (new_len > skb->dev->mtu) 784 exceed_mtu = true; 785 } 786 787 bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb); 788 tcf_lastuse_update(&ife->tcf_tm); 789 790 if (!metalen) { /* no metadata to send */ 791 /* abuse overlimits to count when we allow packet 792 * with no metadata 793 */ 794 qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats)); 795 return action; 796 } 797 /* could be stupid policy setup or mtu config 798 * so lets be conservative.. */ 799 if ((action == TC_ACT_SHOT) || exceed_mtu) { 800 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); 801 return TC_ACT_SHOT; 802 } 803 804 if (skb_at_tc_ingress(skb)) 805 skb_push(skb, skb->dev->hard_header_len); 806 807 ife_meta = ife_encode(skb, metalen); 808 809 spin_lock(&ife->tcf_lock); 810 811 /* XXX: we dont have a clever way of telling encode to 812 * not repeat some of the computations that are done by 813 * ops->presence_check... 814 */ 815 list_for_each_entry(e, &ife->metalist, metalist) { 816 if (e->ops->encode) { 817 err = e->ops->encode(skb, (void *)(ife_meta + skboff), 818 e); 819 } 820 if (err < 0) { 821 /* too corrupt to keep around if overwritten */ 822 spin_unlock(&ife->tcf_lock); 823 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); 824 return TC_ACT_SHOT; 825 } 826 skboff += err; 827 } 828 spin_unlock(&ife->tcf_lock); 829 oethh = (struct ethhdr *)skb->data; 830 831 if (!is_zero_ether_addr(p->eth_src)) 832 ether_addr_copy(oethh->h_source, p->eth_src); 833 if (!is_zero_ether_addr(p->eth_dst)) 834 ether_addr_copy(oethh->h_dest, p->eth_dst); 835 oethh->h_proto = htons(p->eth_type); 836 837 if (skb_at_tc_ingress(skb)) 838 skb_pull(skb, skb->dev->hard_header_len); 839 840 return action; 841 } 842 843 static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a, 844 struct tcf_result *res) 845 { 846 struct tcf_ife_info *ife = to_ife(a); 847 struct tcf_ife_params *p; 848 int ret; 849 850 p = rcu_dereference_bh(ife->params); 851 if (p->flags & IFE_ENCODE) { 852 ret = tcf_ife_encode(skb, a, res, p); 853 return ret; 854 } 855 856 return tcf_ife_decode(skb, a, res); 857 } 858 859 static int tcf_ife_walker(struct net *net, struct sk_buff *skb, 860 struct netlink_callback *cb, int type, 861 const struct tc_action_ops *ops, 862 struct netlink_ext_ack *extack) 863 { 864 struct tc_action_net *tn = net_generic(net, ife_net_id); 865 866 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 867 } 868 869 static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index) 870 { 871 struct tc_action_net *tn = net_generic(net, ife_net_id); 872 873 return tcf_idr_search(tn, a, index); 874 } 875 876 static struct tc_action_ops act_ife_ops = { 877 .kind = "ife", 878 .id = TCA_ID_IFE, 879 .owner = THIS_MODULE, 880 .act = tcf_ife_act, 881 .dump = tcf_ife_dump, 882 .cleanup = tcf_ife_cleanup, 883 .init = tcf_ife_init, 884 .walk = tcf_ife_walker, 885 .lookup = tcf_ife_search, 886 .size = sizeof(struct tcf_ife_info), 887 }; 888 889 static __net_init int ife_init_net(struct net *net) 890 { 891 struct tc_action_net *tn = net_generic(net, ife_net_id); 892 893 return tc_action_net_init(net, tn, &act_ife_ops); 894 } 895 896 static void __net_exit ife_exit_net(struct list_head *net_list) 897 { 898 tc_action_net_exit(net_list, ife_net_id); 899 } 900 901 static struct pernet_operations ife_net_ops = { 902 .init = ife_init_net, 903 .exit_batch = ife_exit_net, 904 .id = &ife_net_id, 905 .size = sizeof(struct tc_action_net), 906 }; 907 908 static int __init ife_init_module(void) 909 { 910 return tcf_register_action(&act_ife_ops, &ife_net_ops); 911 } 912 913 static void __exit ife_cleanup_module(void) 914 { 915 tcf_unregister_action(&act_ife_ops, &ife_net_ops); 916 } 917 918 module_init(ife_init_module); 919 module_exit(ife_cleanup_module); 920 921 MODULE_AUTHOR("Jamal Hadi Salim(2015)"); 922 MODULE_DESCRIPTION("Inter-FE LFB action"); 923 MODULE_LICENSE("GPL"); 924