1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/init.h> 3 #include <linux/module.h> 4 #include <linux/netfilter.h> 5 #include <net/flow_offload.h> 6 #include <net/netfilter/nf_tables.h> 7 #include <net/netfilter/nf_tables_offload.h> 8 #include <net/pkt_cls.h> 9 10 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions) 11 { 12 struct nft_flow_rule *flow; 13 14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL); 15 if (!flow) 16 return NULL; 17 18 flow->rule = flow_rule_alloc(num_actions); 19 if (!flow->rule) { 20 kfree(flow); 21 return NULL; 22 } 23 24 flow->rule->match.dissector = &flow->match.dissector; 25 flow->rule->match.mask = &flow->match.mask; 26 flow->rule->match.key = &flow->match.key; 27 28 return flow; 29 } 30 31 void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow, 32 enum flow_dissector_key_id addr_type) 33 { 34 struct nft_flow_match *match = &flow->match; 35 struct nft_flow_key *mask = &match->mask; 36 struct nft_flow_key *key = &match->key; 37 38 if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) 39 return; 40 41 key->control.addr_type = addr_type; 42 mask->control.addr_type = 0xffff; 43 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL); 44 match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] = 45 offsetof(struct nft_flow_key, control); 46 } 47 48 struct nft_offload_ethertype { 49 __be16 value; 50 __be16 mask; 51 }; 52 53 static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx, 54 struct nft_flow_rule *flow) 55 { 56 struct nft_flow_match *match = &flow->match; 57 struct nft_offload_ethertype ethertype = { 58 .value = match->key.basic.n_proto, 59 .mask = match->mask.basic.n_proto, 60 }; 61 62 if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) && 63 (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) || 64 match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) { 65 match->key.basic.n_proto = match->key.cvlan.vlan_tpid; 66 match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid; 67 match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid; 68 match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid; 69 match->key.vlan.vlan_tpid = ethertype.value; 70 match->mask.vlan.vlan_tpid = ethertype.mask; 71 match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] = 72 offsetof(struct nft_flow_key, cvlan); 73 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN); 74 } else if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC) && 75 (match->key.basic.n_proto == htons(ETH_P_8021Q) || 76 match->key.basic.n_proto == htons(ETH_P_8021AD))) { 77 match->key.basic.n_proto = match->key.vlan.vlan_tpid; 78 match->mask.basic.n_proto = match->mask.vlan.vlan_tpid; 79 match->key.vlan.vlan_tpid = ethertype.value; 80 match->mask.vlan.vlan_tpid = ethertype.mask; 81 match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] = 82 offsetof(struct nft_flow_key, vlan); 83 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN); 84 } 85 } 86 87 struct nft_flow_rule *nft_flow_rule_create(struct net *net, 88 const struct nft_rule *rule) 89 { 90 struct nft_offload_ctx *ctx; 91 struct nft_flow_rule *flow; 92 int num_actions = 0, err; 93 struct nft_expr *expr; 94 95 expr = nft_expr_first(rule); 96 while (nft_expr_more(rule, expr)) { 97 if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION) 98 num_actions++; 99 100 expr = nft_expr_next(expr); 101 } 102 103 if (num_actions == 0) 104 return ERR_PTR(-EOPNOTSUPP); 105 106 flow = nft_flow_rule_alloc(num_actions); 107 if (!flow) 108 return ERR_PTR(-ENOMEM); 109 110 expr = nft_expr_first(rule); 111 112 ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL); 113 if (!ctx) { 114 err = -ENOMEM; 115 goto err_out; 116 } 117 ctx->net = net; 118 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC; 119 120 while (nft_expr_more(rule, expr)) { 121 if (!expr->ops->offload) { 122 err = -EOPNOTSUPP; 123 goto err_out; 124 } 125 err = expr->ops->offload(ctx, flow, expr); 126 if (err < 0) 127 goto err_out; 128 129 expr = nft_expr_next(expr); 130 } 131 nft_flow_rule_transfer_vlan(ctx, flow); 132 133 flow->proto = ctx->dep.l3num; 134 kfree(ctx); 135 136 return flow; 137 err_out: 138 kfree(ctx); 139 nft_flow_rule_destroy(flow); 140 141 return ERR_PTR(err); 142 } 143 144 void nft_flow_rule_destroy(struct nft_flow_rule *flow) 145 { 146 struct flow_action_entry *entry; 147 int i; 148 149 flow_action_for_each(i, entry, &flow->rule->action) { 150 switch (entry->id) { 151 case FLOW_ACTION_REDIRECT: 152 case FLOW_ACTION_MIRRED: 153 dev_put(entry->dev); 154 break; 155 default: 156 break; 157 } 158 } 159 kfree(flow->rule); 160 kfree(flow); 161 } 162 163 void nft_offload_set_dependency(struct nft_offload_ctx *ctx, 164 enum nft_offload_dep_type type) 165 { 166 ctx->dep.type = type; 167 } 168 169 void nft_offload_update_dependency(struct nft_offload_ctx *ctx, 170 const void *data, u32 len) 171 { 172 switch (ctx->dep.type) { 173 case NFT_OFFLOAD_DEP_NETWORK: 174 WARN_ON(len != sizeof(__u16)); 175 memcpy(&ctx->dep.l3num, data, sizeof(__u16)); 176 break; 177 case NFT_OFFLOAD_DEP_TRANSPORT: 178 WARN_ON(len != sizeof(__u8)); 179 memcpy(&ctx->dep.protonum, data, sizeof(__u8)); 180 break; 181 default: 182 break; 183 } 184 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC; 185 } 186 187 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common, 188 __be16 proto, int priority, 189 struct netlink_ext_ack *extack) 190 { 191 common->protocol = proto; 192 common->prio = priority; 193 common->extack = extack; 194 } 195 196 static int nft_setup_cb_call(enum tc_setup_type type, void *type_data, 197 struct list_head *cb_list) 198 { 199 struct flow_block_cb *block_cb; 200 int err; 201 202 list_for_each_entry(block_cb, cb_list, list) { 203 err = block_cb->cb(type, type_data, block_cb->cb_priv); 204 if (err < 0) 205 return err; 206 } 207 return 0; 208 } 209 210 int nft_chain_offload_priority(struct nft_base_chain *basechain) 211 { 212 if (basechain->ops.priority <= 0 || 213 basechain->ops.priority > USHRT_MAX) 214 return -1; 215 216 return 0; 217 } 218 219 static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow, 220 const struct nft_base_chain *basechain, 221 const struct nft_rule *rule, 222 const struct nft_flow_rule *flow, 223 struct netlink_ext_ack *extack, 224 enum flow_cls_command command) 225 { 226 __be16 proto = ETH_P_ALL; 227 228 memset(cls_flow, 0, sizeof(*cls_flow)); 229 230 if (flow) 231 proto = flow->proto; 232 233 nft_flow_offload_common_init(&cls_flow->common, proto, 234 basechain->ops.priority, extack); 235 cls_flow->command = command; 236 cls_flow->cookie = (unsigned long) rule; 237 if (flow) 238 cls_flow->rule = flow->rule; 239 } 240 241 static int nft_flow_offload_cmd(const struct nft_chain *chain, 242 const struct nft_rule *rule, 243 struct nft_flow_rule *flow, 244 enum flow_cls_command command, 245 struct flow_cls_offload *cls_flow) 246 { 247 struct netlink_ext_ack extack = {}; 248 struct nft_base_chain *basechain; 249 250 if (!nft_is_base_chain(chain)) 251 return -EOPNOTSUPP; 252 253 basechain = nft_base_chain(chain); 254 nft_flow_cls_offload_setup(cls_flow, basechain, rule, flow, &extack, 255 command); 256 257 return nft_setup_cb_call(TC_SETUP_CLSFLOWER, cls_flow, 258 &basechain->flow_block.cb_list); 259 } 260 261 static int nft_flow_offload_rule(const struct nft_chain *chain, 262 struct nft_rule *rule, 263 struct nft_flow_rule *flow, 264 enum flow_cls_command command) 265 { 266 struct flow_cls_offload cls_flow; 267 268 return nft_flow_offload_cmd(chain, rule, flow, command, &cls_flow); 269 } 270 271 int nft_flow_rule_stats(const struct nft_chain *chain, 272 const struct nft_rule *rule) 273 { 274 struct flow_cls_offload cls_flow = {}; 275 struct nft_expr *expr, *next; 276 int err; 277 278 err = nft_flow_offload_cmd(chain, rule, NULL, FLOW_CLS_STATS, 279 &cls_flow); 280 if (err < 0) 281 return err; 282 283 nft_rule_for_each_expr(expr, next, rule) { 284 if (expr->ops->offload_stats) 285 expr->ops->offload_stats(expr, &cls_flow.stats); 286 } 287 288 return 0; 289 } 290 291 static int nft_flow_offload_bind(struct flow_block_offload *bo, 292 struct nft_base_chain *basechain) 293 { 294 list_splice(&bo->cb_list, &basechain->flow_block.cb_list); 295 return 0; 296 } 297 298 static int nft_flow_offload_unbind(struct flow_block_offload *bo, 299 struct nft_base_chain *basechain) 300 { 301 struct flow_block_cb *block_cb, *next; 302 struct flow_cls_offload cls_flow; 303 struct netlink_ext_ack extack; 304 struct nft_chain *chain; 305 struct nft_rule *rule; 306 307 chain = &basechain->chain; 308 list_for_each_entry(rule, &chain->rules, list) { 309 memset(&extack, 0, sizeof(extack)); 310 nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL, 311 &extack, FLOW_CLS_DESTROY); 312 nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list); 313 } 314 315 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 316 list_del(&block_cb->list); 317 flow_block_cb_free(block_cb); 318 } 319 320 return 0; 321 } 322 323 static int nft_block_setup(struct nft_base_chain *basechain, 324 struct flow_block_offload *bo, 325 enum flow_block_command cmd) 326 { 327 int err; 328 329 switch (cmd) { 330 case FLOW_BLOCK_BIND: 331 err = nft_flow_offload_bind(bo, basechain); 332 break; 333 case FLOW_BLOCK_UNBIND: 334 err = nft_flow_offload_unbind(bo, basechain); 335 break; 336 default: 337 WARN_ON_ONCE(1); 338 err = -EOPNOTSUPP; 339 } 340 341 return err; 342 } 343 344 static void nft_flow_block_offload_init(struct flow_block_offload *bo, 345 struct net *net, 346 enum flow_block_command cmd, 347 struct nft_base_chain *basechain, 348 struct netlink_ext_ack *extack) 349 { 350 memset(bo, 0, sizeof(*bo)); 351 bo->net = net; 352 bo->block = &basechain->flow_block; 353 bo->command = cmd; 354 bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; 355 bo->extack = extack; 356 INIT_LIST_HEAD(&bo->cb_list); 357 } 358 359 static int nft_block_offload_cmd(struct nft_base_chain *chain, 360 struct net_device *dev, 361 enum flow_block_command cmd) 362 { 363 struct netlink_ext_ack extack = {}; 364 struct flow_block_offload bo; 365 int err; 366 367 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack); 368 369 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 370 if (err < 0) 371 return err; 372 373 return nft_block_setup(chain, &bo, cmd); 374 } 375 376 static void nft_indr_block_cleanup(struct flow_block_cb *block_cb) 377 { 378 struct nft_base_chain *basechain = block_cb->indr.data; 379 struct net_device *dev = block_cb->indr.dev; 380 struct netlink_ext_ack extack = {}; 381 struct nftables_pernet *nft_net; 382 struct net *net = dev_net(dev); 383 struct flow_block_offload bo; 384 385 nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND, 386 basechain, &extack); 387 nft_net = nft_pernet(net); 388 mutex_lock(&nft_net->commit_mutex); 389 list_del(&block_cb->driver_list); 390 list_move(&block_cb->list, &bo.cb_list); 391 nft_flow_offload_unbind(&bo, basechain); 392 mutex_unlock(&nft_net->commit_mutex); 393 } 394 395 static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain, 396 struct net_device *dev, 397 enum flow_block_command cmd) 398 { 399 struct netlink_ext_ack extack = {}; 400 struct flow_block_offload bo; 401 int err; 402 403 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack); 404 405 err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo, 406 nft_indr_block_cleanup); 407 if (err < 0) 408 return err; 409 410 if (list_empty(&bo.cb_list)) 411 return -EOPNOTSUPP; 412 413 return nft_block_setup(basechain, &bo, cmd); 414 } 415 416 static int nft_chain_offload_cmd(struct nft_base_chain *basechain, 417 struct net_device *dev, 418 enum flow_block_command cmd) 419 { 420 int err; 421 422 if (dev->netdev_ops->ndo_setup_tc) 423 err = nft_block_offload_cmd(basechain, dev, cmd); 424 else 425 err = nft_indr_block_offload_cmd(basechain, dev, cmd); 426 427 return err; 428 } 429 430 static int nft_flow_block_chain(struct nft_base_chain *basechain, 431 const struct net_device *this_dev, 432 enum flow_block_command cmd) 433 { 434 struct net_device *dev; 435 struct nft_hook *hook; 436 int err, i = 0; 437 438 list_for_each_entry(hook, &basechain->hook_list, list) { 439 dev = hook->ops.dev; 440 if (this_dev && this_dev != dev) 441 continue; 442 443 err = nft_chain_offload_cmd(basechain, dev, cmd); 444 if (err < 0 && cmd == FLOW_BLOCK_BIND) { 445 if (!this_dev) 446 goto err_flow_block; 447 448 return err; 449 } 450 i++; 451 } 452 453 return 0; 454 455 err_flow_block: 456 list_for_each_entry(hook, &basechain->hook_list, list) { 457 if (i-- <= 0) 458 break; 459 460 dev = hook->ops.dev; 461 nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND); 462 } 463 return err; 464 } 465 466 static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy, 467 enum flow_block_command cmd) 468 { 469 struct nft_base_chain *basechain; 470 u8 policy; 471 472 if (!nft_is_base_chain(chain)) 473 return -EOPNOTSUPP; 474 475 basechain = nft_base_chain(chain); 476 policy = ppolicy ? *ppolicy : basechain->policy; 477 478 /* Only default policy to accept is supported for now. */ 479 if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP) 480 return -EOPNOTSUPP; 481 482 return nft_flow_block_chain(basechain, NULL, cmd); 483 } 484 485 static void nft_flow_rule_offload_abort(struct net *net, 486 struct nft_trans *trans) 487 { 488 struct nftables_pernet *nft_net = nft_pernet(net); 489 int err = 0; 490 491 list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) { 492 if (trans->ctx.family != NFPROTO_NETDEV) 493 continue; 494 495 switch (trans->msg_type) { 496 case NFT_MSG_NEWCHAIN: 497 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) || 498 nft_trans_chain_update(trans)) 499 continue; 500 501 err = nft_flow_offload_chain(trans->ctx.chain, NULL, 502 FLOW_BLOCK_UNBIND); 503 break; 504 case NFT_MSG_DELCHAIN: 505 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 506 continue; 507 508 err = nft_flow_offload_chain(trans->ctx.chain, NULL, 509 FLOW_BLOCK_BIND); 510 break; 511 case NFT_MSG_NEWRULE: 512 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 513 continue; 514 515 err = nft_flow_offload_rule(trans->ctx.chain, 516 nft_trans_rule(trans), 517 NULL, FLOW_CLS_DESTROY); 518 break; 519 case NFT_MSG_DELRULE: 520 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 521 continue; 522 523 err = nft_flow_offload_rule(trans->ctx.chain, 524 nft_trans_rule(trans), 525 nft_trans_flow_rule(trans), 526 FLOW_CLS_REPLACE); 527 break; 528 } 529 530 if (WARN_ON_ONCE(err)) 531 break; 532 } 533 } 534 535 int nft_flow_rule_offload_commit(struct net *net) 536 { 537 struct nftables_pernet *nft_net = nft_pernet(net); 538 struct nft_trans *trans; 539 int err = 0; 540 u8 policy; 541 542 list_for_each_entry(trans, &nft_net->commit_list, list) { 543 if (trans->ctx.family != NFPROTO_NETDEV) 544 continue; 545 546 switch (trans->msg_type) { 547 case NFT_MSG_NEWCHAIN: 548 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) || 549 nft_trans_chain_update(trans)) 550 continue; 551 552 policy = nft_trans_chain_policy(trans); 553 err = nft_flow_offload_chain(trans->ctx.chain, &policy, 554 FLOW_BLOCK_BIND); 555 break; 556 case NFT_MSG_DELCHAIN: 557 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 558 continue; 559 560 policy = nft_trans_chain_policy(trans); 561 err = nft_flow_offload_chain(trans->ctx.chain, &policy, 562 FLOW_BLOCK_UNBIND); 563 break; 564 case NFT_MSG_NEWRULE: 565 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 566 continue; 567 568 if (trans->ctx.flags & NLM_F_REPLACE || 569 !(trans->ctx.flags & NLM_F_APPEND)) { 570 err = -EOPNOTSUPP; 571 break; 572 } 573 err = nft_flow_offload_rule(trans->ctx.chain, 574 nft_trans_rule(trans), 575 nft_trans_flow_rule(trans), 576 FLOW_CLS_REPLACE); 577 break; 578 case NFT_MSG_DELRULE: 579 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 580 continue; 581 582 err = nft_flow_offload_rule(trans->ctx.chain, 583 nft_trans_rule(trans), 584 NULL, FLOW_CLS_DESTROY); 585 break; 586 } 587 588 if (err) { 589 nft_flow_rule_offload_abort(net, trans); 590 break; 591 } 592 } 593 594 return err; 595 } 596 597 static struct nft_chain *__nft_offload_get_chain(const struct nftables_pernet *nft_net, 598 struct net_device *dev) 599 { 600 struct nft_base_chain *basechain; 601 struct nft_hook *hook, *found; 602 const struct nft_table *table; 603 struct nft_chain *chain; 604 605 list_for_each_entry(table, &nft_net->tables, list) { 606 if (table->family != NFPROTO_NETDEV) 607 continue; 608 609 list_for_each_entry(chain, &table->chains, list) { 610 if (!nft_is_base_chain(chain) || 611 !(chain->flags & NFT_CHAIN_HW_OFFLOAD)) 612 continue; 613 614 found = NULL; 615 basechain = nft_base_chain(chain); 616 list_for_each_entry(hook, &basechain->hook_list, list) { 617 if (hook->ops.dev != dev) 618 continue; 619 620 found = hook; 621 break; 622 } 623 if (!found) 624 continue; 625 626 return chain; 627 } 628 } 629 630 return NULL; 631 } 632 633 static int nft_offload_netdev_event(struct notifier_block *this, 634 unsigned long event, void *ptr) 635 { 636 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 637 struct nftables_pernet *nft_net; 638 struct net *net = dev_net(dev); 639 struct nft_chain *chain; 640 641 if (event != NETDEV_UNREGISTER) 642 return NOTIFY_DONE; 643 644 nft_net = nft_pernet(net); 645 mutex_lock(&nft_net->commit_mutex); 646 chain = __nft_offload_get_chain(nft_net, dev); 647 if (chain) 648 nft_flow_block_chain(nft_base_chain(chain), dev, 649 FLOW_BLOCK_UNBIND); 650 651 mutex_unlock(&nft_net->commit_mutex); 652 653 return NOTIFY_DONE; 654 } 655 656 static struct notifier_block nft_offload_netdev_notifier = { 657 .notifier_call = nft_offload_netdev_event, 658 }; 659 660 int nft_offload_init(void) 661 { 662 return register_netdevice_notifier(&nft_offload_netdev_notifier); 663 } 664 665 void nft_offload_exit(void) 666 { 667 unregister_netdevice_notifier(&nft_offload_netdev_notifier); 668 } 669