1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/init.h> 3 #include <linux/module.h> 4 #include <linux/netfilter.h> 5 #include <net/flow_offload.h> 6 #include <net/netfilter/nf_tables.h> 7 #include <net/netfilter/nf_tables_offload.h> 8 #include <net/pkt_cls.h> 9 10 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions) 11 { 12 struct nft_flow_rule *flow; 13 14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL); 15 if (!flow) 16 return NULL; 17 18 flow->rule = flow_rule_alloc(num_actions); 19 if (!flow->rule) { 20 kfree(flow); 21 return NULL; 22 } 23 24 flow->rule->match.dissector = &flow->match.dissector; 25 flow->rule->match.mask = &flow->match.mask; 26 flow->rule->match.key = &flow->match.key; 27 28 return flow; 29 } 30 31 struct nft_flow_rule *nft_flow_rule_create(struct net *net, 32 const struct nft_rule *rule) 33 { 34 struct nft_offload_ctx *ctx; 35 struct nft_flow_rule *flow; 36 int num_actions = 0, err; 37 struct nft_expr *expr; 38 39 expr = nft_expr_first(rule); 40 while (expr->ops && expr != nft_expr_last(rule)) { 41 if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION) 42 num_actions++; 43 44 expr = nft_expr_next(expr); 45 } 46 47 if (num_actions == 0) 48 return ERR_PTR(-EOPNOTSUPP); 49 50 flow = nft_flow_rule_alloc(num_actions); 51 if (!flow) 52 return ERR_PTR(-ENOMEM); 53 54 expr = nft_expr_first(rule); 55 56 ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL); 57 if (!ctx) { 58 err = -ENOMEM; 59 goto err_out; 60 } 61 ctx->net = net; 62 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC; 63 64 while (expr->ops && expr != nft_expr_last(rule)) { 65 if (!expr->ops->offload) { 66 err = -EOPNOTSUPP; 67 goto err_out; 68 } 69 err = expr->ops->offload(ctx, flow, expr); 70 if (err < 0) 71 goto err_out; 72 73 expr = nft_expr_next(expr); 74 } 75 flow->proto = ctx->dep.l3num; 76 kfree(ctx); 77 78 return flow; 79 err_out: 80 kfree(ctx); 81 nft_flow_rule_destroy(flow); 82 83 return ERR_PTR(err); 84 } 85 86 void nft_flow_rule_destroy(struct nft_flow_rule *flow) 87 { 88 struct flow_action_entry *entry; 89 int i; 90 91 flow_action_for_each(i, entry, &flow->rule->action) { 92 switch (entry->id) { 93 case FLOW_ACTION_REDIRECT: 94 case FLOW_ACTION_MIRRED: 95 dev_put(entry->dev); 96 break; 97 default: 98 break; 99 } 100 } 101 kfree(flow->rule); 102 kfree(flow); 103 } 104 105 void nft_offload_set_dependency(struct nft_offload_ctx *ctx, 106 enum nft_offload_dep_type type) 107 { 108 ctx->dep.type = type; 109 } 110 111 void nft_offload_update_dependency(struct nft_offload_ctx *ctx, 112 const void *data, u32 len) 113 { 114 switch (ctx->dep.type) { 115 case NFT_OFFLOAD_DEP_NETWORK: 116 WARN_ON(len != sizeof(__u16)); 117 memcpy(&ctx->dep.l3num, data, sizeof(__u16)); 118 break; 119 case NFT_OFFLOAD_DEP_TRANSPORT: 120 WARN_ON(len != sizeof(__u8)); 121 memcpy(&ctx->dep.protonum, data, sizeof(__u8)); 122 break; 123 default: 124 break; 125 } 126 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC; 127 } 128 129 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common, 130 __be16 proto, int priority, 131 struct netlink_ext_ack *extack) 132 { 133 common->protocol = proto; 134 common->prio = priority; 135 common->extack = extack; 136 } 137 138 static int nft_setup_cb_call(enum tc_setup_type type, void *type_data, 139 struct list_head *cb_list) 140 { 141 struct flow_block_cb *block_cb; 142 int err; 143 144 list_for_each_entry(block_cb, cb_list, list) { 145 err = block_cb->cb(type, type_data, block_cb->cb_priv); 146 if (err < 0) 147 return err; 148 } 149 return 0; 150 } 151 152 int nft_chain_offload_priority(struct nft_base_chain *basechain) 153 { 154 if (basechain->ops.priority <= 0 || 155 basechain->ops.priority > USHRT_MAX) 156 return -1; 157 158 return 0; 159 } 160 161 static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow, 162 const struct nft_base_chain *basechain, 163 const struct nft_rule *rule, 164 const struct nft_flow_rule *flow, 165 struct netlink_ext_ack *extack, 166 enum flow_cls_command command) 167 { 168 __be16 proto = ETH_P_ALL; 169 170 memset(cls_flow, 0, sizeof(*cls_flow)); 171 172 if (flow) 173 proto = flow->proto; 174 175 nft_flow_offload_common_init(&cls_flow->common, proto, 176 basechain->ops.priority, extack); 177 cls_flow->command = command; 178 cls_flow->cookie = (unsigned long) rule; 179 if (flow) 180 cls_flow->rule = flow->rule; 181 } 182 183 static int nft_flow_offload_rule(struct nft_chain *chain, 184 struct nft_rule *rule, 185 struct nft_flow_rule *flow, 186 enum flow_cls_command command) 187 { 188 struct netlink_ext_ack extack = {}; 189 struct flow_cls_offload cls_flow; 190 struct nft_base_chain *basechain; 191 192 if (!nft_is_base_chain(chain)) 193 return -EOPNOTSUPP; 194 195 basechain = nft_base_chain(chain); 196 nft_flow_cls_offload_setup(&cls_flow, basechain, rule, flow, &extack, 197 command); 198 199 return nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, 200 &basechain->flow_block.cb_list); 201 } 202 203 static int nft_flow_offload_bind(struct flow_block_offload *bo, 204 struct nft_base_chain *basechain) 205 { 206 list_splice(&bo->cb_list, &basechain->flow_block.cb_list); 207 return 0; 208 } 209 210 static int nft_flow_offload_unbind(struct flow_block_offload *bo, 211 struct nft_base_chain *basechain) 212 { 213 struct flow_block_cb *block_cb, *next; 214 struct flow_cls_offload cls_flow; 215 struct netlink_ext_ack extack; 216 struct nft_chain *chain; 217 struct nft_rule *rule; 218 219 chain = &basechain->chain; 220 list_for_each_entry(rule, &chain->rules, list) { 221 memset(&extack, 0, sizeof(extack)); 222 nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL, 223 &extack, FLOW_CLS_DESTROY); 224 nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list); 225 } 226 227 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 228 list_del(&block_cb->list); 229 flow_block_cb_free(block_cb); 230 } 231 232 return 0; 233 } 234 235 static int nft_block_setup(struct nft_base_chain *basechain, 236 struct flow_block_offload *bo, 237 enum flow_block_command cmd) 238 { 239 int err; 240 241 switch (cmd) { 242 case FLOW_BLOCK_BIND: 243 err = nft_flow_offload_bind(bo, basechain); 244 break; 245 case FLOW_BLOCK_UNBIND: 246 err = nft_flow_offload_unbind(bo, basechain); 247 break; 248 default: 249 WARN_ON_ONCE(1); 250 err = -EOPNOTSUPP; 251 } 252 253 return err; 254 } 255 256 static void nft_flow_block_offload_init(struct flow_block_offload *bo, 257 struct net *net, 258 enum flow_block_command cmd, 259 struct nft_base_chain *basechain, 260 struct netlink_ext_ack *extack) 261 { 262 memset(bo, 0, sizeof(*bo)); 263 bo->net = net; 264 bo->block = &basechain->flow_block; 265 bo->command = cmd; 266 bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; 267 bo->extack = extack; 268 INIT_LIST_HEAD(&bo->cb_list); 269 } 270 271 static int nft_block_offload_cmd(struct nft_base_chain *chain, 272 struct net_device *dev, 273 enum flow_block_command cmd) 274 { 275 struct netlink_ext_ack extack = {}; 276 struct flow_block_offload bo; 277 int err; 278 279 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack); 280 281 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 282 if (err < 0) 283 return err; 284 285 return nft_block_setup(chain, &bo, cmd); 286 } 287 288 static void nft_indr_block_ing_cmd(struct net_device *dev, 289 struct nft_base_chain *chain, 290 flow_indr_block_bind_cb_t *cb, 291 void *cb_priv, 292 enum flow_block_command cmd) 293 { 294 struct netlink_ext_ack extack = {}; 295 struct flow_block_offload bo; 296 297 if (!chain) 298 return; 299 300 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack); 301 302 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo); 303 304 nft_block_setup(chain, &bo, cmd); 305 } 306 307 static int nft_indr_block_offload_cmd(struct nft_base_chain *chain, 308 struct net_device *dev, 309 enum flow_block_command cmd) 310 { 311 struct netlink_ext_ack extack = {}; 312 struct flow_block_offload bo; 313 314 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack); 315 316 flow_indr_block_call(dev, &bo, cmd); 317 318 if (list_empty(&bo.cb_list)) 319 return -EOPNOTSUPP; 320 321 return nft_block_setup(chain, &bo, cmd); 322 } 323 324 #define FLOW_SETUP_BLOCK TC_SETUP_BLOCK 325 326 static int nft_chain_offload_cmd(struct nft_base_chain *basechain, 327 struct net_device *dev, 328 enum flow_block_command cmd) 329 { 330 int err; 331 332 if (dev->netdev_ops->ndo_setup_tc) 333 err = nft_block_offload_cmd(basechain, dev, cmd); 334 else 335 err = nft_indr_block_offload_cmd(basechain, dev, cmd); 336 337 return err; 338 } 339 340 static int nft_flow_block_chain(struct nft_base_chain *basechain, 341 const struct net_device *this_dev, 342 enum flow_block_command cmd) 343 { 344 struct net_device *dev; 345 struct nft_hook *hook; 346 int err, i = 0; 347 348 list_for_each_entry(hook, &basechain->hook_list, list) { 349 dev = hook->ops.dev; 350 if (this_dev && this_dev != dev) 351 continue; 352 353 err = nft_chain_offload_cmd(basechain, dev, cmd); 354 if (err < 0 && cmd == FLOW_BLOCK_BIND) { 355 if (!this_dev) 356 goto err_flow_block; 357 358 return err; 359 } 360 i++; 361 } 362 363 return 0; 364 365 err_flow_block: 366 list_for_each_entry(hook, &basechain->hook_list, list) { 367 if (i-- <= 0) 368 break; 369 370 dev = hook->ops.dev; 371 nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND); 372 } 373 return err; 374 } 375 376 static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy, 377 enum flow_block_command cmd) 378 { 379 struct nft_base_chain *basechain; 380 u8 policy; 381 382 if (!nft_is_base_chain(chain)) 383 return -EOPNOTSUPP; 384 385 basechain = nft_base_chain(chain); 386 policy = ppolicy ? *ppolicy : basechain->policy; 387 388 /* Only default policy to accept is supported for now. */ 389 if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP) 390 return -EOPNOTSUPP; 391 392 return nft_flow_block_chain(basechain, NULL, cmd); 393 } 394 395 static void nft_flow_rule_offload_abort(struct net *net, 396 struct nft_trans *trans) 397 { 398 int err = 0; 399 400 list_for_each_entry_continue_reverse(trans, &net->nft.commit_list, list) { 401 if (trans->ctx.family != NFPROTO_NETDEV) 402 continue; 403 404 switch (trans->msg_type) { 405 case NFT_MSG_NEWCHAIN: 406 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) || 407 nft_trans_chain_update(trans)) 408 continue; 409 410 err = nft_flow_offload_chain(trans->ctx.chain, NULL, 411 FLOW_BLOCK_UNBIND); 412 break; 413 case NFT_MSG_DELCHAIN: 414 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 415 continue; 416 417 err = nft_flow_offload_chain(trans->ctx.chain, NULL, 418 FLOW_BLOCK_BIND); 419 break; 420 case NFT_MSG_NEWRULE: 421 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 422 continue; 423 424 err = nft_flow_offload_rule(trans->ctx.chain, 425 nft_trans_rule(trans), 426 NULL, FLOW_CLS_DESTROY); 427 break; 428 case NFT_MSG_DELRULE: 429 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 430 continue; 431 432 err = nft_flow_offload_rule(trans->ctx.chain, 433 nft_trans_rule(trans), 434 nft_trans_flow_rule(trans), 435 FLOW_CLS_REPLACE); 436 break; 437 } 438 439 if (WARN_ON_ONCE(err)) 440 break; 441 } 442 } 443 444 int nft_flow_rule_offload_commit(struct net *net) 445 { 446 struct nft_trans *trans; 447 int err = 0; 448 u8 policy; 449 450 list_for_each_entry(trans, &net->nft.commit_list, list) { 451 if (trans->ctx.family != NFPROTO_NETDEV) 452 continue; 453 454 switch (trans->msg_type) { 455 case NFT_MSG_NEWCHAIN: 456 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) || 457 nft_trans_chain_update(trans)) 458 continue; 459 460 policy = nft_trans_chain_policy(trans); 461 err = nft_flow_offload_chain(trans->ctx.chain, &policy, 462 FLOW_BLOCK_BIND); 463 break; 464 case NFT_MSG_DELCHAIN: 465 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 466 continue; 467 468 policy = nft_trans_chain_policy(trans); 469 err = nft_flow_offload_chain(trans->ctx.chain, &policy, 470 FLOW_BLOCK_UNBIND); 471 break; 472 case NFT_MSG_NEWRULE: 473 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 474 continue; 475 476 if (trans->ctx.flags & NLM_F_REPLACE || 477 !(trans->ctx.flags & NLM_F_APPEND)) { 478 err = -EOPNOTSUPP; 479 break; 480 } 481 err = nft_flow_offload_rule(trans->ctx.chain, 482 nft_trans_rule(trans), 483 nft_trans_flow_rule(trans), 484 FLOW_CLS_REPLACE); 485 break; 486 case NFT_MSG_DELRULE: 487 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 488 continue; 489 490 err = nft_flow_offload_rule(trans->ctx.chain, 491 nft_trans_rule(trans), 492 NULL, FLOW_CLS_DESTROY); 493 break; 494 } 495 496 if (err) { 497 nft_flow_rule_offload_abort(net, trans); 498 break; 499 } 500 } 501 502 list_for_each_entry(trans, &net->nft.commit_list, list) { 503 if (trans->ctx.family != NFPROTO_NETDEV) 504 continue; 505 506 switch (trans->msg_type) { 507 case NFT_MSG_NEWRULE: 508 case NFT_MSG_DELRULE: 509 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 510 continue; 511 512 nft_flow_rule_destroy(nft_trans_flow_rule(trans)); 513 break; 514 default: 515 break; 516 } 517 } 518 519 return err; 520 } 521 522 static struct nft_chain *__nft_offload_get_chain(struct net_device *dev) 523 { 524 struct nft_base_chain *basechain; 525 struct net *net = dev_net(dev); 526 struct nft_hook *hook, *found; 527 const struct nft_table *table; 528 struct nft_chain *chain; 529 530 list_for_each_entry(table, &net->nft.tables, list) { 531 if (table->family != NFPROTO_NETDEV) 532 continue; 533 534 list_for_each_entry(chain, &table->chains, list) { 535 if (!nft_is_base_chain(chain) || 536 !(chain->flags & NFT_CHAIN_HW_OFFLOAD)) 537 continue; 538 539 found = NULL; 540 basechain = nft_base_chain(chain); 541 list_for_each_entry(hook, &basechain->hook_list, list) { 542 if (hook->ops.dev != dev) 543 continue; 544 545 found = hook; 546 break; 547 } 548 if (!found) 549 continue; 550 551 return chain; 552 } 553 } 554 555 return NULL; 556 } 557 558 static void nft_indr_block_cb(struct net_device *dev, 559 flow_indr_block_bind_cb_t *cb, void *cb_priv, 560 enum flow_block_command cmd) 561 { 562 struct net *net = dev_net(dev); 563 struct nft_chain *chain; 564 565 mutex_lock(&net->nft.commit_mutex); 566 chain = __nft_offload_get_chain(dev); 567 if (chain && chain->flags & NFT_CHAIN_HW_OFFLOAD) { 568 struct nft_base_chain *basechain; 569 570 basechain = nft_base_chain(chain); 571 nft_indr_block_ing_cmd(dev, basechain, cb, cb_priv, cmd); 572 } 573 mutex_unlock(&net->nft.commit_mutex); 574 } 575 576 static int nft_offload_netdev_event(struct notifier_block *this, 577 unsigned long event, void *ptr) 578 { 579 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 580 struct net *net = dev_net(dev); 581 struct nft_chain *chain; 582 583 if (event != NETDEV_UNREGISTER) 584 return NOTIFY_DONE; 585 586 mutex_lock(&net->nft.commit_mutex); 587 chain = __nft_offload_get_chain(dev); 588 if (chain) 589 nft_flow_block_chain(nft_base_chain(chain), dev, 590 FLOW_BLOCK_UNBIND); 591 592 mutex_unlock(&net->nft.commit_mutex); 593 594 return NOTIFY_DONE; 595 } 596 597 static struct flow_indr_block_entry block_ing_entry = { 598 .cb = nft_indr_block_cb, 599 .list = LIST_HEAD_INIT(block_ing_entry.list), 600 }; 601 602 static struct notifier_block nft_offload_netdev_notifier = { 603 .notifier_call = nft_offload_netdev_event, 604 }; 605 606 int nft_offload_init(void) 607 { 608 int err; 609 610 err = register_netdevice_notifier(&nft_offload_netdev_notifier); 611 if (err < 0) 612 return err; 613 614 flow_indr_add_block_cb(&block_ing_entry); 615 616 return 0; 617 } 618 619 void nft_offload_exit(void) 620 { 621 flow_indr_del_block_cb(&block_ing_entry); 622 unregister_netdevice_notifier(&nft_offload_netdev_notifier); 623 } 624