1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/init.h> 3 #include <linux/module.h> 4 #include <linux/netfilter.h> 5 #include <net/flow_offload.h> 6 #include <net/netfilter/nf_tables.h> 7 #include <net/netfilter/nf_tables_offload.h> 8 #include <net/pkt_cls.h> 9 10 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions) 11 { 12 struct nft_flow_rule *flow; 13 14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL); 15 if (!flow) 16 return NULL; 17 18 flow->rule = flow_rule_alloc(num_actions); 19 if (!flow->rule) { 20 kfree(flow); 21 return NULL; 22 } 23 24 flow->rule->match.dissector = &flow->match.dissector; 25 flow->rule->match.mask = &flow->match.mask; 26 flow->rule->match.key = &flow->match.key; 27 28 return flow; 29 } 30 31 void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow, 32 enum flow_dissector_key_id addr_type) 33 { 34 struct nft_flow_match *match = &flow->match; 35 struct nft_flow_key *mask = &match->mask; 36 struct nft_flow_key *key = &match->key; 37 38 if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) 39 return; 40 41 key->control.addr_type = addr_type; 42 mask->control.addr_type = 0xffff; 43 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL); 44 match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] = 45 offsetof(struct nft_flow_key, control); 46 } 47 48 struct nft_offload_ethertype { 49 __be16 value; 50 __be16 mask; 51 }; 52 53 static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx, 54 struct nft_flow_rule *flow) 55 { 56 struct nft_flow_match *match = &flow->match; 57 struct nft_offload_ethertype ethertype = { 58 .value = match->key.basic.n_proto, 59 .mask = match->mask.basic.n_proto, 60 }; 61 62 if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) && 63 (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) || 64 match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) { 65 match->key.basic.n_proto = match->key.cvlan.vlan_tpid; 66 match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid; 67 match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid; 68 match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid; 69 match->key.vlan.vlan_tpid = ethertype.value; 70 match->mask.vlan.vlan_tpid = ethertype.mask; 71 match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] = 72 offsetof(struct nft_flow_key, cvlan); 73 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN); 74 } else if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC) && 75 (match->key.basic.n_proto == htons(ETH_P_8021Q) || 76 match->key.basic.n_proto == htons(ETH_P_8021AD))) { 77 match->key.basic.n_proto = match->key.vlan.vlan_tpid; 78 match->mask.basic.n_proto = match->mask.vlan.vlan_tpid; 79 match->key.vlan.vlan_tpid = ethertype.value; 80 match->mask.vlan.vlan_tpid = ethertype.mask; 81 match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] = 82 offsetof(struct nft_flow_key, vlan); 83 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN); 84 } 85 } 86 87 struct nft_flow_rule *nft_flow_rule_create(struct net *net, 88 const struct nft_rule *rule) 89 { 90 struct nft_offload_ctx *ctx; 91 struct nft_flow_rule *flow; 92 int num_actions = 0, err; 93 struct nft_expr *expr; 94 95 expr = nft_expr_first(rule); 96 while (nft_expr_more(rule, expr)) { 97 if (expr->ops->offload_action && 98 expr->ops->offload_action(expr)) 99 num_actions++; 100 101 expr = nft_expr_next(expr); 102 } 103 104 if (num_actions == 0) 105 return ERR_PTR(-EOPNOTSUPP); 106 107 flow = nft_flow_rule_alloc(num_actions); 108 if (!flow) 109 return ERR_PTR(-ENOMEM); 110 111 expr = nft_expr_first(rule); 112 113 ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL); 114 if (!ctx) { 115 err = -ENOMEM; 116 goto err_out; 117 } 118 ctx->net = net; 119 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC; 120 121 while (nft_expr_more(rule, expr)) { 122 if (!expr->ops->offload) { 123 err = -EOPNOTSUPP; 124 goto err_out; 125 } 126 err = expr->ops->offload(ctx, flow, expr); 127 if (err < 0) 128 goto err_out; 129 130 expr = nft_expr_next(expr); 131 } 132 nft_flow_rule_transfer_vlan(ctx, flow); 133 134 flow->proto = ctx->dep.l3num; 135 kfree(ctx); 136 137 return flow; 138 err_out: 139 kfree(ctx); 140 nft_flow_rule_destroy(flow); 141 142 return ERR_PTR(err); 143 } 144 145 void nft_flow_rule_destroy(struct nft_flow_rule *flow) 146 { 147 struct flow_action_entry *entry; 148 int i; 149 150 flow_action_for_each(i, entry, &flow->rule->action) { 151 switch (entry->id) { 152 case FLOW_ACTION_REDIRECT: 153 case FLOW_ACTION_MIRRED: 154 dev_put(entry->dev); 155 break; 156 default: 157 break; 158 } 159 } 160 kfree(flow->rule); 161 kfree(flow); 162 } 163 164 void nft_offload_set_dependency(struct nft_offload_ctx *ctx, 165 enum nft_offload_dep_type type) 166 { 167 ctx->dep.type = type; 168 } 169 170 void nft_offload_update_dependency(struct nft_offload_ctx *ctx, 171 const void *data, u32 len) 172 { 173 switch (ctx->dep.type) { 174 case NFT_OFFLOAD_DEP_NETWORK: 175 WARN_ON(len != sizeof(__u16)); 176 memcpy(&ctx->dep.l3num, data, sizeof(__u16)); 177 break; 178 case NFT_OFFLOAD_DEP_TRANSPORT: 179 WARN_ON(len != sizeof(__u8)); 180 memcpy(&ctx->dep.protonum, data, sizeof(__u8)); 181 break; 182 default: 183 break; 184 } 185 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC; 186 } 187 188 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common, 189 __be16 proto, int priority, 190 struct netlink_ext_ack *extack) 191 { 192 common->protocol = proto; 193 common->prio = priority; 194 common->extack = extack; 195 } 196 197 static int nft_setup_cb_call(enum tc_setup_type type, void *type_data, 198 struct list_head *cb_list) 199 { 200 struct flow_block_cb *block_cb; 201 int err; 202 203 list_for_each_entry(block_cb, cb_list, list) { 204 err = block_cb->cb(type, type_data, block_cb->cb_priv); 205 if (err < 0) 206 return err; 207 } 208 return 0; 209 } 210 211 int nft_chain_offload_priority(struct nft_base_chain *basechain) 212 { 213 if (basechain->ops.priority <= 0 || 214 basechain->ops.priority > USHRT_MAX) 215 return -1; 216 217 return 0; 218 } 219 220 static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow, 221 const struct nft_base_chain *basechain, 222 const struct nft_rule *rule, 223 const struct nft_flow_rule *flow, 224 struct netlink_ext_ack *extack, 225 enum flow_cls_command command) 226 { 227 __be16 proto = ETH_P_ALL; 228 229 memset(cls_flow, 0, sizeof(*cls_flow)); 230 231 if (flow) 232 proto = flow->proto; 233 234 nft_flow_offload_common_init(&cls_flow->common, proto, 235 basechain->ops.priority, extack); 236 cls_flow->command = command; 237 cls_flow->cookie = (unsigned long) rule; 238 if (flow) 239 cls_flow->rule = flow->rule; 240 } 241 242 static int nft_flow_offload_cmd(const struct nft_chain *chain, 243 const struct nft_rule *rule, 244 struct nft_flow_rule *flow, 245 enum flow_cls_command command, 246 struct flow_cls_offload *cls_flow) 247 { 248 struct netlink_ext_ack extack = {}; 249 struct nft_base_chain *basechain; 250 251 if (!nft_is_base_chain(chain)) 252 return -EOPNOTSUPP; 253 254 basechain = nft_base_chain(chain); 255 nft_flow_cls_offload_setup(cls_flow, basechain, rule, flow, &extack, 256 command); 257 258 return nft_setup_cb_call(TC_SETUP_CLSFLOWER, cls_flow, 259 &basechain->flow_block.cb_list); 260 } 261 262 static int nft_flow_offload_rule(const struct nft_chain *chain, 263 struct nft_rule *rule, 264 struct nft_flow_rule *flow, 265 enum flow_cls_command command) 266 { 267 struct flow_cls_offload cls_flow; 268 269 return nft_flow_offload_cmd(chain, rule, flow, command, &cls_flow); 270 } 271 272 int nft_flow_rule_stats(const struct nft_chain *chain, 273 const struct nft_rule *rule) 274 { 275 struct flow_cls_offload cls_flow = {}; 276 struct nft_expr *expr, *next; 277 int err; 278 279 err = nft_flow_offload_cmd(chain, rule, NULL, FLOW_CLS_STATS, 280 &cls_flow); 281 if (err < 0) 282 return err; 283 284 nft_rule_for_each_expr(expr, next, rule) { 285 if (expr->ops->offload_stats) 286 expr->ops->offload_stats(expr, &cls_flow.stats); 287 } 288 289 return 0; 290 } 291 292 static int nft_flow_offload_bind(struct flow_block_offload *bo, 293 struct nft_base_chain *basechain) 294 { 295 list_splice(&bo->cb_list, &basechain->flow_block.cb_list); 296 return 0; 297 } 298 299 static int nft_flow_offload_unbind(struct flow_block_offload *bo, 300 struct nft_base_chain *basechain) 301 { 302 struct flow_block_cb *block_cb, *next; 303 struct flow_cls_offload cls_flow; 304 struct netlink_ext_ack extack; 305 struct nft_chain *chain; 306 struct nft_rule *rule; 307 308 chain = &basechain->chain; 309 list_for_each_entry(rule, &chain->rules, list) { 310 memset(&extack, 0, sizeof(extack)); 311 nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL, 312 &extack, FLOW_CLS_DESTROY); 313 nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list); 314 } 315 316 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 317 list_del(&block_cb->list); 318 flow_block_cb_free(block_cb); 319 } 320 321 return 0; 322 } 323 324 static int nft_block_setup(struct nft_base_chain *basechain, 325 struct flow_block_offload *bo, 326 enum flow_block_command cmd) 327 { 328 int err; 329 330 switch (cmd) { 331 case FLOW_BLOCK_BIND: 332 err = nft_flow_offload_bind(bo, basechain); 333 break; 334 case FLOW_BLOCK_UNBIND: 335 err = nft_flow_offload_unbind(bo, basechain); 336 break; 337 default: 338 WARN_ON_ONCE(1); 339 err = -EOPNOTSUPP; 340 } 341 342 return err; 343 } 344 345 static void nft_flow_block_offload_init(struct flow_block_offload *bo, 346 struct net *net, 347 enum flow_block_command cmd, 348 struct nft_base_chain *basechain, 349 struct netlink_ext_ack *extack) 350 { 351 memset(bo, 0, sizeof(*bo)); 352 bo->net = net; 353 bo->block = &basechain->flow_block; 354 bo->command = cmd; 355 bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; 356 bo->extack = extack; 357 bo->cb_list_head = &basechain->flow_block.cb_list; 358 INIT_LIST_HEAD(&bo->cb_list); 359 } 360 361 static int nft_block_offload_cmd(struct nft_base_chain *chain, 362 struct net_device *dev, 363 enum flow_block_command cmd) 364 { 365 struct netlink_ext_ack extack = {}; 366 struct flow_block_offload bo; 367 int err; 368 369 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack); 370 371 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 372 if (err < 0) 373 return err; 374 375 return nft_block_setup(chain, &bo, cmd); 376 } 377 378 static void nft_indr_block_cleanup(struct flow_block_cb *block_cb) 379 { 380 struct nft_base_chain *basechain = block_cb->indr.data; 381 struct net_device *dev = block_cb->indr.dev; 382 struct netlink_ext_ack extack = {}; 383 struct nftables_pernet *nft_net; 384 struct net *net = dev_net(dev); 385 struct flow_block_offload bo; 386 387 nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND, 388 basechain, &extack); 389 nft_net = nft_pernet(net); 390 mutex_lock(&nft_net->commit_mutex); 391 list_del(&block_cb->driver_list); 392 list_move(&block_cb->list, &bo.cb_list); 393 nft_flow_offload_unbind(&bo, basechain); 394 mutex_unlock(&nft_net->commit_mutex); 395 } 396 397 static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain, 398 struct net_device *dev, 399 enum flow_block_command cmd) 400 { 401 struct netlink_ext_ack extack = {}; 402 struct flow_block_offload bo; 403 int err; 404 405 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack); 406 407 err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo, 408 nft_indr_block_cleanup); 409 if (err < 0) 410 return err; 411 412 if (list_empty(&bo.cb_list)) 413 return -EOPNOTSUPP; 414 415 return nft_block_setup(basechain, &bo, cmd); 416 } 417 418 static int nft_chain_offload_cmd(struct nft_base_chain *basechain, 419 struct net_device *dev, 420 enum flow_block_command cmd) 421 { 422 int err; 423 424 if (dev->netdev_ops->ndo_setup_tc) 425 err = nft_block_offload_cmd(basechain, dev, cmd); 426 else 427 err = nft_indr_block_offload_cmd(basechain, dev, cmd); 428 429 return err; 430 } 431 432 static int nft_flow_block_chain(struct nft_base_chain *basechain, 433 const struct net_device *this_dev, 434 enum flow_block_command cmd) 435 { 436 struct net_device *dev; 437 struct nft_hook *hook; 438 int err, i = 0; 439 440 list_for_each_entry(hook, &basechain->hook_list, list) { 441 dev = hook->ops.dev; 442 if (this_dev && this_dev != dev) 443 continue; 444 445 err = nft_chain_offload_cmd(basechain, dev, cmd); 446 if (err < 0 && cmd == FLOW_BLOCK_BIND) { 447 if (!this_dev) 448 goto err_flow_block; 449 450 return err; 451 } 452 i++; 453 } 454 455 return 0; 456 457 err_flow_block: 458 list_for_each_entry(hook, &basechain->hook_list, list) { 459 if (i-- <= 0) 460 break; 461 462 dev = hook->ops.dev; 463 nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND); 464 } 465 return err; 466 } 467 468 static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy, 469 enum flow_block_command cmd) 470 { 471 struct nft_base_chain *basechain; 472 u8 policy; 473 474 if (!nft_is_base_chain(chain)) 475 return -EOPNOTSUPP; 476 477 basechain = nft_base_chain(chain); 478 policy = ppolicy ? *ppolicy : basechain->policy; 479 480 /* Only default policy to accept is supported for now. */ 481 if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP) 482 return -EOPNOTSUPP; 483 484 return nft_flow_block_chain(basechain, NULL, cmd); 485 } 486 487 static void nft_flow_rule_offload_abort(struct net *net, 488 struct nft_trans *trans) 489 { 490 struct nftables_pernet *nft_net = nft_pernet(net); 491 int err = 0; 492 493 list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) { 494 if (trans->ctx.family != NFPROTO_NETDEV) 495 continue; 496 497 switch (trans->msg_type) { 498 case NFT_MSG_NEWCHAIN: 499 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) || 500 nft_trans_chain_update(trans)) 501 continue; 502 503 err = nft_flow_offload_chain(trans->ctx.chain, NULL, 504 FLOW_BLOCK_UNBIND); 505 break; 506 case NFT_MSG_DELCHAIN: 507 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 508 continue; 509 510 err = nft_flow_offload_chain(trans->ctx.chain, NULL, 511 FLOW_BLOCK_BIND); 512 break; 513 case NFT_MSG_NEWRULE: 514 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 515 continue; 516 517 err = nft_flow_offload_rule(trans->ctx.chain, 518 nft_trans_rule(trans), 519 NULL, FLOW_CLS_DESTROY); 520 break; 521 case NFT_MSG_DELRULE: 522 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 523 continue; 524 525 err = nft_flow_offload_rule(trans->ctx.chain, 526 nft_trans_rule(trans), 527 nft_trans_flow_rule(trans), 528 FLOW_CLS_REPLACE); 529 break; 530 } 531 532 if (WARN_ON_ONCE(err)) 533 break; 534 } 535 } 536 537 int nft_flow_rule_offload_commit(struct net *net) 538 { 539 struct nftables_pernet *nft_net = nft_pernet(net); 540 struct nft_trans *trans; 541 int err = 0; 542 u8 policy; 543 544 list_for_each_entry(trans, &nft_net->commit_list, list) { 545 if (trans->ctx.family != NFPROTO_NETDEV) 546 continue; 547 548 switch (trans->msg_type) { 549 case NFT_MSG_NEWCHAIN: 550 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) || 551 nft_trans_chain_update(trans)) 552 continue; 553 554 policy = nft_trans_chain_policy(trans); 555 err = nft_flow_offload_chain(trans->ctx.chain, &policy, 556 FLOW_BLOCK_BIND); 557 break; 558 case NFT_MSG_DELCHAIN: 559 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 560 continue; 561 562 policy = nft_trans_chain_policy(trans); 563 err = nft_flow_offload_chain(trans->ctx.chain, &policy, 564 FLOW_BLOCK_UNBIND); 565 break; 566 case NFT_MSG_NEWRULE: 567 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 568 continue; 569 570 if (trans->ctx.flags & NLM_F_REPLACE || 571 !(trans->ctx.flags & NLM_F_APPEND)) { 572 err = -EOPNOTSUPP; 573 break; 574 } 575 err = nft_flow_offload_rule(trans->ctx.chain, 576 nft_trans_rule(trans), 577 nft_trans_flow_rule(trans), 578 FLOW_CLS_REPLACE); 579 break; 580 case NFT_MSG_DELRULE: 581 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 582 continue; 583 584 err = nft_flow_offload_rule(trans->ctx.chain, 585 nft_trans_rule(trans), 586 NULL, FLOW_CLS_DESTROY); 587 break; 588 } 589 590 if (err) { 591 nft_flow_rule_offload_abort(net, trans); 592 break; 593 } 594 } 595 596 return err; 597 } 598 599 static struct nft_chain *__nft_offload_get_chain(const struct nftables_pernet *nft_net, 600 struct net_device *dev) 601 { 602 struct nft_base_chain *basechain; 603 struct nft_hook *hook, *found; 604 const struct nft_table *table; 605 struct nft_chain *chain; 606 607 list_for_each_entry(table, &nft_net->tables, list) { 608 if (table->family != NFPROTO_NETDEV) 609 continue; 610 611 list_for_each_entry(chain, &table->chains, list) { 612 if (!nft_is_base_chain(chain) || 613 !(chain->flags & NFT_CHAIN_HW_OFFLOAD)) 614 continue; 615 616 found = NULL; 617 basechain = nft_base_chain(chain); 618 list_for_each_entry(hook, &basechain->hook_list, list) { 619 if (hook->ops.dev != dev) 620 continue; 621 622 found = hook; 623 break; 624 } 625 if (!found) 626 continue; 627 628 return chain; 629 } 630 } 631 632 return NULL; 633 } 634 635 static int nft_offload_netdev_event(struct notifier_block *this, 636 unsigned long event, void *ptr) 637 { 638 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 639 struct nftables_pernet *nft_net; 640 struct net *net = dev_net(dev); 641 struct nft_chain *chain; 642 643 if (event != NETDEV_UNREGISTER) 644 return NOTIFY_DONE; 645 646 nft_net = nft_pernet(net); 647 mutex_lock(&nft_net->commit_mutex); 648 chain = __nft_offload_get_chain(nft_net, dev); 649 if (chain) 650 nft_flow_block_chain(nft_base_chain(chain), dev, 651 FLOW_BLOCK_UNBIND); 652 653 mutex_unlock(&nft_net->commit_mutex); 654 655 return NOTIFY_DONE; 656 } 657 658 static struct notifier_block nft_offload_netdev_notifier = { 659 .notifier_call = nft_offload_netdev_event, 660 }; 661 662 int nft_offload_init(void) 663 { 664 return register_netdevice_notifier(&nft_offload_netdev_notifier); 665 } 666 667 void nft_offload_exit(void) 668 { 669 unregister_netdevice_notifier(&nft_offload_netdev_notifier); 670 } 671