1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/ife.c Inter-FE action based on ForCES WG InterFE LFB 4 * 5 * Refer to: 6 * draft-ietf-forces-interfelfb-03 7 * and 8 * netdev01 paper: 9 * "Distributing Linux Traffic Control Classifier-Action 10 * Subsystem" 11 * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai 12 * 13 * copyright Jamal Hadi Salim (2015) 14 */ 15 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/string.h> 19 #include <linux/errno.h> 20 #include <linux/skbuff.h> 21 #include <linux/rtnetlink.h> 22 #include <linux/module.h> 23 #include <linux/init.h> 24 #include <net/net_namespace.h> 25 #include <net/netlink.h> 26 #include <net/pkt_sched.h> 27 #include <net/pkt_cls.h> 28 #include <uapi/linux/tc_act/tc_ife.h> 29 #include <net/tc_act/tc_ife.h> 30 #include <linux/etherdevice.h> 31 #include <net/ife.h> 32 33 static unsigned int ife_net_id; 34 static int max_metacnt = IFE_META_MAX + 1; 35 static struct tc_action_ops act_ife_ops; 36 37 static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = { 38 [TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)}, 39 [TCA_IFE_DMAC] = { .len = ETH_ALEN}, 40 [TCA_IFE_SMAC] = { .len = ETH_ALEN}, 41 [TCA_IFE_TYPE] = { .type = NLA_U16}, 42 }; 43 44 int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi) 45 { 46 u16 edata = 0; 47 48 if (mi->metaval) 49 edata = *(u16 *)mi->metaval; 50 else if (metaval) 51 edata = metaval; 52 53 if (!edata) /* will not encode */ 54 return 0; 55 56 edata = htons(edata); 57 return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata); 58 } 59 EXPORT_SYMBOL_GPL(ife_encode_meta_u16); 60 61 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi) 62 { 63 if (mi->metaval) 64 return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval); 65 else 66 return nla_put(skb, mi->metaid, 0, NULL); 67 } 68 EXPORT_SYMBOL_GPL(ife_get_meta_u32); 69 70 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi) 71 { 72 if (metaval || mi->metaval) 73 return 8; /* T+L+V == 2+2+4 */ 74 75 return 0; 76 } 77 EXPORT_SYMBOL_GPL(ife_check_meta_u32); 78 79 int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi) 80 { 81 if (metaval || mi->metaval) 82 return 8; /* T+L+(V) == 2+2+(2+2bytepad) */ 83 84 return 0; 85 } 86 EXPORT_SYMBOL_GPL(ife_check_meta_u16); 87 88 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi) 89 { 90 u32 edata = metaval; 91 92 if (mi->metaval) 93 edata = *(u32 *)mi->metaval; 94 else if (metaval) 95 edata = metaval; 96 97 if (!edata) /* will not encode */ 98 return 0; 99 100 edata = htonl(edata); 101 return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata); 102 } 103 EXPORT_SYMBOL_GPL(ife_encode_meta_u32); 104 105 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi) 106 { 107 if (mi->metaval) 108 return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval); 109 else 110 return nla_put(skb, mi->metaid, 0, NULL); 111 } 112 EXPORT_SYMBOL_GPL(ife_get_meta_u16); 113 114 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) 115 { 116 mi->metaval = kmemdup(metaval, sizeof(u32), gfp); 117 if (!mi->metaval) 118 return -ENOMEM; 119 120 return 0; 121 } 122 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32); 123 124 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) 125 { 126 mi->metaval = kmemdup(metaval, sizeof(u16), gfp); 127 if (!mi->metaval) 128 return -ENOMEM; 129 130 return 0; 131 } 132 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16); 133 134 void ife_release_meta_gen(struct tcf_meta_info *mi) 135 { 136 kfree(mi->metaval); 137 } 138 EXPORT_SYMBOL_GPL(ife_release_meta_gen); 139 140 int ife_validate_meta_u32(void *val, int len) 141 { 142 if (len == sizeof(u32)) 143 return 0; 144 145 return -EINVAL; 146 } 147 EXPORT_SYMBOL_GPL(ife_validate_meta_u32); 148 149 int ife_validate_meta_u16(void *val, int len) 150 { 151 /* length will not include padding */ 152 if (len == sizeof(u16)) 153 return 0; 154 155 return -EINVAL; 156 } 157 EXPORT_SYMBOL_GPL(ife_validate_meta_u16); 158 159 static LIST_HEAD(ifeoplist); 160 static DEFINE_RWLOCK(ife_mod_lock); 161 162 static struct tcf_meta_ops *find_ife_oplist(u16 metaid) 163 { 164 struct tcf_meta_ops *o; 165 166 read_lock(&ife_mod_lock); 167 list_for_each_entry(o, &ifeoplist, list) { 168 if (o->metaid == metaid) { 169 if (!try_module_get(o->owner)) 170 o = NULL; 171 read_unlock(&ife_mod_lock); 172 return o; 173 } 174 } 175 read_unlock(&ife_mod_lock); 176 177 return NULL; 178 } 179 180 int register_ife_op(struct tcf_meta_ops *mops) 181 { 182 struct tcf_meta_ops *m; 183 184 if (!mops->metaid || !mops->metatype || !mops->name || 185 !mops->check_presence || !mops->encode || !mops->decode || 186 !mops->get || !mops->alloc) 187 return -EINVAL; 188 189 write_lock(&ife_mod_lock); 190 191 list_for_each_entry(m, &ifeoplist, list) { 192 if (m->metaid == mops->metaid || 193 (strcmp(mops->name, m->name) == 0)) { 194 write_unlock(&ife_mod_lock); 195 return -EEXIST; 196 } 197 } 198 199 if (!mops->release) 200 mops->release = ife_release_meta_gen; 201 202 list_add_tail(&mops->list, &ifeoplist); 203 write_unlock(&ife_mod_lock); 204 return 0; 205 } 206 EXPORT_SYMBOL_GPL(unregister_ife_op); 207 208 int unregister_ife_op(struct tcf_meta_ops *mops) 209 { 210 struct tcf_meta_ops *m; 211 int err = -ENOENT; 212 213 write_lock(&ife_mod_lock); 214 list_for_each_entry(m, &ifeoplist, list) { 215 if (m->metaid == mops->metaid) { 216 list_del(&mops->list); 217 err = 0; 218 break; 219 } 220 } 221 write_unlock(&ife_mod_lock); 222 223 return err; 224 } 225 EXPORT_SYMBOL_GPL(register_ife_op); 226 227 static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len) 228 { 229 int ret = 0; 230 /* XXX: unfortunately cant use nla_policy at this point 231 * because a length of 0 is valid in the case of 232 * "allow". "use" semantics do enforce for proper 233 * length and i couldve use nla_policy but it makes it hard 234 * to use it just for that.. 235 */ 236 if (ops->validate) 237 return ops->validate(val, len); 238 239 if (ops->metatype == NLA_U32) 240 ret = ife_validate_meta_u32(val, len); 241 else if (ops->metatype == NLA_U16) 242 ret = ife_validate_meta_u16(val, len); 243 244 return ret; 245 } 246 247 #ifdef CONFIG_MODULES 248 static const char *ife_meta_id2name(u32 metaid) 249 { 250 switch (metaid) { 251 case IFE_META_SKBMARK: 252 return "skbmark"; 253 case IFE_META_PRIO: 254 return "skbprio"; 255 case IFE_META_TCINDEX: 256 return "tcindex"; 257 default: 258 return "unknown"; 259 } 260 } 261 #endif 262 263 /* called when adding new meta information 264 */ 265 static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held) 266 { 267 struct tcf_meta_ops *ops = find_ife_oplist(metaid); 268 int ret = 0; 269 270 if (!ops) { 271 ret = -ENOENT; 272 #ifdef CONFIG_MODULES 273 if (rtnl_held) 274 rtnl_unlock(); 275 request_module("ife-meta-%s", ife_meta_id2name(metaid)); 276 if (rtnl_held) 277 rtnl_lock(); 278 ops = find_ife_oplist(metaid); 279 #endif 280 } 281 282 if (ops) { 283 ret = 0; 284 if (len) 285 ret = ife_validate_metatype(ops, val, len); 286 287 module_put(ops->owner); 288 } 289 290 return ret; 291 } 292 293 /* called when adding new meta information 294 */ 295 static int __add_metainfo(const struct tcf_meta_ops *ops, 296 struct tcf_ife_info *ife, u32 metaid, void *metaval, 297 int len, bool atomic, bool exists) 298 { 299 struct tcf_meta_info *mi = NULL; 300 int ret = 0; 301 302 mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); 303 if (!mi) 304 return -ENOMEM; 305 306 mi->metaid = metaid; 307 mi->ops = ops; 308 if (len > 0) { 309 ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); 310 if (ret != 0) { 311 kfree(mi); 312 return ret; 313 } 314 } 315 316 if (exists) 317 spin_lock_bh(&ife->tcf_lock); 318 list_add_tail(&mi->metalist, &ife->metalist); 319 if (exists) 320 spin_unlock_bh(&ife->tcf_lock); 321 322 return ret; 323 } 324 325 static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops, 326 struct tcf_ife_info *ife, u32 metaid, 327 bool exists) 328 { 329 int ret; 330 331 if (!try_module_get(ops->owner)) 332 return -ENOENT; 333 ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists); 334 if (ret) 335 module_put(ops->owner); 336 return ret; 337 } 338 339 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, 340 int len, bool exists) 341 { 342 const struct tcf_meta_ops *ops = find_ife_oplist(metaid); 343 int ret; 344 345 if (!ops) 346 return -ENOENT; 347 ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists); 348 if (ret) 349 /*put back what find_ife_oplist took */ 350 module_put(ops->owner); 351 return ret; 352 } 353 354 static int use_all_metadata(struct tcf_ife_info *ife, bool exists) 355 { 356 struct tcf_meta_ops *o; 357 int rc = 0; 358 int installed = 0; 359 360 read_lock(&ife_mod_lock); 361 list_for_each_entry(o, &ifeoplist, list) { 362 rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists); 363 if (rc == 0) 364 installed += 1; 365 } 366 read_unlock(&ife_mod_lock); 367 368 if (installed) 369 return 0; 370 else 371 return -EINVAL; 372 } 373 374 static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife) 375 { 376 struct tcf_meta_info *e; 377 struct nlattr *nest; 378 unsigned char *b = skb_tail_pointer(skb); 379 int total_encoded = 0; 380 381 /*can only happen on decode */ 382 if (list_empty(&ife->metalist)) 383 return 0; 384 385 nest = nla_nest_start_noflag(skb, TCA_IFE_METALST); 386 if (!nest) 387 goto out_nlmsg_trim; 388 389 list_for_each_entry(e, &ife->metalist, metalist) { 390 if (!e->ops->get(skb, e)) 391 total_encoded += 1; 392 } 393 394 if (!total_encoded) 395 goto out_nlmsg_trim; 396 397 nla_nest_end(skb, nest); 398 399 return 0; 400 401 out_nlmsg_trim: 402 nlmsg_trim(skb, b); 403 return -1; 404 } 405 406 /* under ife->tcf_lock */ 407 static void _tcf_ife_cleanup(struct tc_action *a) 408 { 409 struct tcf_ife_info *ife = to_ife(a); 410 struct tcf_meta_info *e, *n; 411 412 list_for_each_entry_safe(e, n, &ife->metalist, metalist) { 413 list_del(&e->metalist); 414 if (e->metaval) { 415 if (e->ops->release) 416 e->ops->release(e); 417 else 418 kfree(e->metaval); 419 } 420 module_put(e->ops->owner); 421 kfree(e); 422 } 423 } 424 425 static void tcf_ife_cleanup(struct tc_action *a) 426 { 427 struct tcf_ife_info *ife = to_ife(a); 428 struct tcf_ife_params *p; 429 430 spin_lock_bh(&ife->tcf_lock); 431 _tcf_ife_cleanup(a); 432 spin_unlock_bh(&ife->tcf_lock); 433 434 p = rcu_dereference_protected(ife->params, 1); 435 if (p) 436 kfree_rcu(p, rcu); 437 } 438 439 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, 440 bool exists, bool rtnl_held) 441 { 442 int len = 0; 443 int rc = 0; 444 int i = 0; 445 void *val; 446 447 for (i = 1; i < max_metacnt; i++) { 448 if (tb[i]) { 449 val = nla_data(tb[i]); 450 len = nla_len(tb[i]); 451 452 rc = load_metaops_and_vet(i, val, len, rtnl_held); 453 if (rc != 0) 454 return rc; 455 456 rc = add_metainfo(ife, i, val, len, exists); 457 if (rc) 458 return rc; 459 } 460 } 461 462 return rc; 463 } 464 465 static int tcf_ife_init(struct net *net, struct nlattr *nla, 466 struct nlattr *est, struct tc_action **a, 467 int ovr, int bind, bool rtnl_held, 468 struct tcf_proto *tp, u32 flags, 469 struct netlink_ext_ack *extack) 470 { 471 struct tc_action_net *tn = net_generic(net, ife_net_id); 472 struct nlattr *tb[TCA_IFE_MAX + 1]; 473 struct nlattr *tb2[IFE_META_MAX + 1]; 474 struct tcf_chain *goto_ch = NULL; 475 struct tcf_ife_params *p; 476 struct tcf_ife_info *ife; 477 u16 ife_type = ETH_P_IFE; 478 struct tc_ife *parm; 479 u8 *daddr = NULL; 480 u8 *saddr = NULL; 481 bool exists = false; 482 int ret = 0; 483 u32 index; 484 int err; 485 486 if (!nla) { 487 NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed"); 488 return -EINVAL; 489 } 490 491 err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy, 492 NULL); 493 if (err < 0) 494 return err; 495 496 if (!tb[TCA_IFE_PARMS]) 497 return -EINVAL; 498 499 parm = nla_data(tb[TCA_IFE_PARMS]); 500 501 /* IFE_DECODE is 0 and indicates the opposite of IFE_ENCODE because 502 * they cannot run as the same time. Check on all other values which 503 * are not supported right now. 504 */ 505 if (parm->flags & ~IFE_ENCODE) 506 return -EINVAL; 507 508 p = kzalloc(sizeof(*p), GFP_KERNEL); 509 if (!p) 510 return -ENOMEM; 511 512 index = parm->index; 513 err = tcf_idr_check_alloc(tn, &index, a, bind); 514 if (err < 0) { 515 kfree(p); 516 return err; 517 } 518 exists = err; 519 if (exists && bind) { 520 kfree(p); 521 return 0; 522 } 523 524 if (!exists) { 525 ret = tcf_idr_create(tn, index, est, a, &act_ife_ops, 526 bind, true, 0); 527 if (ret) { 528 tcf_idr_cleanup(tn, index); 529 kfree(p); 530 return ret; 531 } 532 ret = ACT_P_CREATED; 533 } else if (!ovr) { 534 tcf_idr_release(*a, bind); 535 kfree(p); 536 return -EEXIST; 537 } 538 539 ife = to_ife(*a); 540 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 541 if (err < 0) 542 goto release_idr; 543 544 p->flags = parm->flags; 545 546 if (parm->flags & IFE_ENCODE) { 547 if (tb[TCA_IFE_TYPE]) 548 ife_type = nla_get_u16(tb[TCA_IFE_TYPE]); 549 if (tb[TCA_IFE_DMAC]) 550 daddr = nla_data(tb[TCA_IFE_DMAC]); 551 if (tb[TCA_IFE_SMAC]) 552 saddr = nla_data(tb[TCA_IFE_SMAC]); 553 } 554 555 if (parm->flags & IFE_ENCODE) { 556 if (daddr) 557 ether_addr_copy(p->eth_dst, daddr); 558 else 559 eth_zero_addr(p->eth_dst); 560 561 if (saddr) 562 ether_addr_copy(p->eth_src, saddr); 563 else 564 eth_zero_addr(p->eth_src); 565 566 p->eth_type = ife_type; 567 } 568 569 570 if (ret == ACT_P_CREATED) 571 INIT_LIST_HEAD(&ife->metalist); 572 573 if (tb[TCA_IFE_METALST]) { 574 err = nla_parse_nested_deprecated(tb2, IFE_META_MAX, 575 tb[TCA_IFE_METALST], NULL, 576 NULL); 577 if (err) 578 goto metadata_parse_err; 579 err = populate_metalist(ife, tb2, exists, rtnl_held); 580 if (err) 581 goto metadata_parse_err; 582 583 } else { 584 /* if no passed metadata allow list or passed allow-all 585 * then here we process by adding as many supported metadatum 586 * as we can. You better have at least one else we are 587 * going to bail out 588 */ 589 err = use_all_metadata(ife, exists); 590 if (err) 591 goto metadata_parse_err; 592 } 593 594 if (exists) 595 spin_lock_bh(&ife->tcf_lock); 596 /* protected by tcf_lock when modifying existing action */ 597 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 598 p = rcu_replace_pointer(ife->params, p, 1); 599 600 if (exists) 601 spin_unlock_bh(&ife->tcf_lock); 602 if (goto_ch) 603 tcf_chain_put_by_act(goto_ch); 604 if (p) 605 kfree_rcu(p, rcu); 606 607 if (ret == ACT_P_CREATED) 608 tcf_idr_insert(tn, *a); 609 610 return ret; 611 metadata_parse_err: 612 if (goto_ch) 613 tcf_chain_put_by_act(goto_ch); 614 release_idr: 615 kfree(p); 616 tcf_idr_release(*a, bind); 617 return err; 618 } 619 620 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, 621 int ref) 622 { 623 unsigned char *b = skb_tail_pointer(skb); 624 struct tcf_ife_info *ife = to_ife(a); 625 struct tcf_ife_params *p; 626 struct tc_ife opt = { 627 .index = ife->tcf_index, 628 .refcnt = refcount_read(&ife->tcf_refcnt) - ref, 629 .bindcnt = atomic_read(&ife->tcf_bindcnt) - bind, 630 }; 631 struct tcf_t t; 632 633 spin_lock_bh(&ife->tcf_lock); 634 opt.action = ife->tcf_action; 635 p = rcu_dereference_protected(ife->params, 636 lockdep_is_held(&ife->tcf_lock)); 637 opt.flags = p->flags; 638 639 if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt)) 640 goto nla_put_failure; 641 642 tcf_tm_dump(&t, &ife->tcf_tm); 643 if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD)) 644 goto nla_put_failure; 645 646 if (!is_zero_ether_addr(p->eth_dst)) { 647 if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, p->eth_dst)) 648 goto nla_put_failure; 649 } 650 651 if (!is_zero_ether_addr(p->eth_src)) { 652 if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, p->eth_src)) 653 goto nla_put_failure; 654 } 655 656 if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type)) 657 goto nla_put_failure; 658 659 if (dump_metalist(skb, ife)) { 660 /*ignore failure to dump metalist */ 661 pr_info("Failed to dump metalist\n"); 662 } 663 664 spin_unlock_bh(&ife->tcf_lock); 665 return skb->len; 666 667 nla_put_failure: 668 spin_unlock_bh(&ife->tcf_lock); 669 nlmsg_trim(skb, b); 670 return -1; 671 } 672 673 static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife, 674 u16 metaid, u16 mlen, void *mdata) 675 { 676 struct tcf_meta_info *e; 677 678 /* XXX: use hash to speed up */ 679 list_for_each_entry(e, &ife->metalist, metalist) { 680 if (metaid == e->metaid) { 681 if (e->ops) { 682 /* We check for decode presence already */ 683 return e->ops->decode(skb, mdata, mlen); 684 } 685 } 686 } 687 688 return -ENOENT; 689 } 690 691 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, 692 struct tcf_result *res) 693 { 694 struct tcf_ife_info *ife = to_ife(a); 695 int action = ife->tcf_action; 696 u8 *ifehdr_end; 697 u8 *tlv_data; 698 u16 metalen; 699 700 bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb); 701 tcf_lastuse_update(&ife->tcf_tm); 702 703 if (skb_at_tc_ingress(skb)) 704 skb_push(skb, skb->dev->hard_header_len); 705 706 tlv_data = ife_decode(skb, &metalen); 707 if (unlikely(!tlv_data)) { 708 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); 709 return TC_ACT_SHOT; 710 } 711 712 ifehdr_end = tlv_data + metalen; 713 for (; tlv_data < ifehdr_end; tlv_data = ife_tlv_meta_next(tlv_data)) { 714 u8 *curr_data; 715 u16 mtype; 716 u16 dlen; 717 718 curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype, 719 &dlen, NULL); 720 if (!curr_data) { 721 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); 722 return TC_ACT_SHOT; 723 } 724 725 if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) { 726 /* abuse overlimits to count when we receive metadata 727 * but dont have an ops for it 728 */ 729 pr_info_ratelimited("Unknown metaid %d dlen %d\n", 730 mtype, dlen); 731 qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats)); 732 } 733 } 734 735 if (WARN_ON(tlv_data != ifehdr_end)) { 736 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); 737 return TC_ACT_SHOT; 738 } 739 740 skb->protocol = eth_type_trans(skb, skb->dev); 741 skb_reset_network_header(skb); 742 743 return action; 744 } 745 746 /*XXX: check if we can do this at install time instead of current 747 * send data path 748 **/ 749 static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife) 750 { 751 struct tcf_meta_info *e, *n; 752 int tot_run_sz = 0, run_sz = 0; 753 754 list_for_each_entry_safe(e, n, &ife->metalist, metalist) { 755 if (e->ops->check_presence) { 756 run_sz = e->ops->check_presence(skb, e); 757 tot_run_sz += run_sz; 758 } 759 } 760 761 return tot_run_sz; 762 } 763 764 static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, 765 struct tcf_result *res, struct tcf_ife_params *p) 766 { 767 struct tcf_ife_info *ife = to_ife(a); 768 int action = ife->tcf_action; 769 struct ethhdr *oethh; /* outer ether header */ 770 struct tcf_meta_info *e; 771 /* 772 OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA 773 where ORIGDATA = original ethernet header ... 774 */ 775 u16 metalen = ife_get_sz(skb, ife); 776 int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN; 777 unsigned int skboff = 0; 778 int new_len = skb->len + hdrm; 779 bool exceed_mtu = false; 780 void *ife_meta; 781 int err = 0; 782 783 if (!skb_at_tc_ingress(skb)) { 784 if (new_len > skb->dev->mtu) 785 exceed_mtu = true; 786 } 787 788 bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb); 789 tcf_lastuse_update(&ife->tcf_tm); 790 791 if (!metalen) { /* no metadata to send */ 792 /* abuse overlimits to count when we allow packet 793 * with no metadata 794 */ 795 qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats)); 796 return action; 797 } 798 /* could be stupid policy setup or mtu config 799 * so lets be conservative.. */ 800 if ((action == TC_ACT_SHOT) || exceed_mtu) { 801 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); 802 return TC_ACT_SHOT; 803 } 804 805 if (skb_at_tc_ingress(skb)) 806 skb_push(skb, skb->dev->hard_header_len); 807 808 ife_meta = ife_encode(skb, metalen); 809 810 spin_lock(&ife->tcf_lock); 811 812 /* XXX: we dont have a clever way of telling encode to 813 * not repeat some of the computations that are done by 814 * ops->presence_check... 815 */ 816 list_for_each_entry(e, &ife->metalist, metalist) { 817 if (e->ops->encode) { 818 err = e->ops->encode(skb, (void *)(ife_meta + skboff), 819 e); 820 } 821 if (err < 0) { 822 /* too corrupt to keep around if overwritten */ 823 spin_unlock(&ife->tcf_lock); 824 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); 825 return TC_ACT_SHOT; 826 } 827 skboff += err; 828 } 829 spin_unlock(&ife->tcf_lock); 830 oethh = (struct ethhdr *)skb->data; 831 832 if (!is_zero_ether_addr(p->eth_src)) 833 ether_addr_copy(oethh->h_source, p->eth_src); 834 if (!is_zero_ether_addr(p->eth_dst)) 835 ether_addr_copy(oethh->h_dest, p->eth_dst); 836 oethh->h_proto = htons(p->eth_type); 837 838 if (skb_at_tc_ingress(skb)) 839 skb_pull(skb, skb->dev->hard_header_len); 840 841 return action; 842 } 843 844 static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a, 845 struct tcf_result *res) 846 { 847 struct tcf_ife_info *ife = to_ife(a); 848 struct tcf_ife_params *p; 849 int ret; 850 851 p = rcu_dereference_bh(ife->params); 852 if (p->flags & IFE_ENCODE) { 853 ret = tcf_ife_encode(skb, a, res, p); 854 return ret; 855 } 856 857 return tcf_ife_decode(skb, a, res); 858 } 859 860 static int tcf_ife_walker(struct net *net, struct sk_buff *skb, 861 struct netlink_callback *cb, int type, 862 const struct tc_action_ops *ops, 863 struct netlink_ext_ack *extack) 864 { 865 struct tc_action_net *tn = net_generic(net, ife_net_id); 866 867 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 868 } 869 870 static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index) 871 { 872 struct tc_action_net *tn = net_generic(net, ife_net_id); 873 874 return tcf_idr_search(tn, a, index); 875 } 876 877 static struct tc_action_ops act_ife_ops = { 878 .kind = "ife", 879 .id = TCA_ID_IFE, 880 .owner = THIS_MODULE, 881 .act = tcf_ife_act, 882 .dump = tcf_ife_dump, 883 .cleanup = tcf_ife_cleanup, 884 .init = tcf_ife_init, 885 .walk = tcf_ife_walker, 886 .lookup = tcf_ife_search, 887 .size = sizeof(struct tcf_ife_info), 888 }; 889 890 static __net_init int ife_init_net(struct net *net) 891 { 892 struct tc_action_net *tn = net_generic(net, ife_net_id); 893 894 return tc_action_net_init(net, tn, &act_ife_ops); 895 } 896 897 static void __net_exit ife_exit_net(struct list_head *net_list) 898 { 899 tc_action_net_exit(net_list, ife_net_id); 900 } 901 902 static struct pernet_operations ife_net_ops = { 903 .init = ife_init_net, 904 .exit_batch = ife_exit_net, 905 .id = &ife_net_id, 906 .size = sizeof(struct tc_action_net), 907 }; 908 909 static int __init ife_init_module(void) 910 { 911 return tcf_register_action(&act_ife_ops, &ife_net_ops); 912 } 913 914 static void __exit ife_cleanup_module(void) 915 { 916 tcf_unregister_action(&act_ife_ops, &ife_net_ops); 917 } 918 919 module_init(ife_init_module); 920 module_exit(ife_cleanup_module); 921 922 MODULE_AUTHOR("Jamal Hadi Salim(2015)"); 923 MODULE_DESCRIPTION("Inter-FE LFB action"); 924 MODULE_LICENSE("GPL"); 925