1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> 4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org> 5 * 6 * Development of this code funded by Astaro AG (http://www.astaro.com/) 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/if_vlan.h> 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/netlink.h> 14 #include <linux/netfilter.h> 15 #include <linux/netfilter/nf_tables.h> 16 #include <net/netfilter/nf_tables_core.h> 17 #include <net/netfilter/nf_tables.h> 18 #include <net/netfilter/nf_tables_offload.h> 19 /* For layer 4 checksum field offset. */ 20 #include <linux/tcp.h> 21 #include <linux/udp.h> 22 #include <linux/icmpv6.h> 23 #include <linux/ip.h> 24 #include <linux/ipv6.h> 25 26 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off, 27 struct vlan_ethhdr *veth) 28 { 29 if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN)) 30 return false; 31 32 veth->h_vlan_proto = skb->vlan_proto; 33 veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 34 veth->h_vlan_encapsulated_proto = skb->protocol; 35 36 return true; 37 } 38 39 /* add vlan header into the user buffer for if tag was removed by offloads */ 40 static bool 41 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len) 42 { 43 int mac_off = skb_mac_header(skb) - skb->data; 44 u8 *vlanh, *dst_u8 = (u8 *) d; 45 struct vlan_ethhdr veth; 46 u8 vlan_hlen = 0; 47 48 if ((skb->protocol == htons(ETH_P_8021AD) || 49 skb->protocol == htons(ETH_P_8021Q)) && 50 offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN) 51 vlan_hlen += VLAN_HLEN; 52 53 vlanh = (u8 *) &veth; 54 if (offset < VLAN_ETH_HLEN + vlan_hlen) { 55 u8 ethlen = len; 56 57 if (vlan_hlen && 58 skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0) 59 return false; 60 else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth)) 61 return false; 62 63 if (offset + len > VLAN_ETH_HLEN + vlan_hlen) 64 ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen; 65 66 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen); 67 68 len -= ethlen; 69 if (len == 0) 70 return true; 71 72 dst_u8 += ethlen; 73 offset = ETH_HLEN + vlan_hlen; 74 } else { 75 offset -= VLAN_HLEN + vlan_hlen; 76 } 77 78 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0; 79 } 80 81 void nft_payload_eval(const struct nft_expr *expr, 82 struct nft_regs *regs, 83 const struct nft_pktinfo *pkt) 84 { 85 const struct nft_payload *priv = nft_expr_priv(expr); 86 const struct sk_buff *skb = pkt->skb; 87 u32 *dest = ®s->data[priv->dreg]; 88 int offset; 89 90 dest[priv->len / NFT_REG32_SIZE] = 0; 91 switch (priv->base) { 92 case NFT_PAYLOAD_LL_HEADER: 93 if (!skb_mac_header_was_set(skb)) 94 goto err; 95 96 if (skb_vlan_tag_present(skb)) { 97 if (!nft_payload_copy_vlan(dest, skb, 98 priv->offset, priv->len)) 99 goto err; 100 return; 101 } 102 offset = skb_mac_header(skb) - skb->data; 103 break; 104 case NFT_PAYLOAD_NETWORK_HEADER: 105 offset = skb_network_offset(skb); 106 break; 107 case NFT_PAYLOAD_TRANSPORT_HEADER: 108 if (!pkt->tprot_set) 109 goto err; 110 offset = pkt->xt.thoff; 111 break; 112 default: 113 BUG(); 114 } 115 offset += priv->offset; 116 117 if (skb_copy_bits(skb, offset, dest, priv->len) < 0) 118 goto err; 119 return; 120 err: 121 regs->verdict.code = NFT_BREAK; 122 } 123 124 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = { 125 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 }, 126 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 }, 127 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 }, 128 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 }, 129 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 }, 130 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 }, 131 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 }, 132 }; 133 134 static int nft_payload_init(const struct nft_ctx *ctx, 135 const struct nft_expr *expr, 136 const struct nlattr * const tb[]) 137 { 138 struct nft_payload *priv = nft_expr_priv(expr); 139 140 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); 141 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); 142 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); 143 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]); 144 145 return nft_validate_register_store(ctx, priv->dreg, NULL, 146 NFT_DATA_VALUE, priv->len); 147 } 148 149 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr) 150 { 151 const struct nft_payload *priv = nft_expr_priv(expr); 152 153 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) || 154 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) || 155 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) || 156 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len))) 157 goto nla_put_failure; 158 return 0; 159 160 nla_put_failure: 161 return -1; 162 } 163 164 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx, 165 struct nft_flow_rule *flow, 166 const struct nft_payload *priv) 167 { 168 struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; 169 170 switch (priv->offset) { 171 case offsetof(struct ethhdr, h_source): 172 if (priv->len != ETH_ALEN) 173 return -EOPNOTSUPP; 174 175 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs, 176 src, ETH_ALEN, reg); 177 break; 178 case offsetof(struct ethhdr, h_dest): 179 if (priv->len != ETH_ALEN) 180 return -EOPNOTSUPP; 181 182 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs, 183 dst, ETH_ALEN, reg); 184 break; 185 case offsetof(struct ethhdr, h_proto): 186 if (priv->len != sizeof(__be16)) 187 return -EOPNOTSUPP; 188 189 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, 190 n_proto, sizeof(__be16), reg); 191 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK); 192 break; 193 case offsetof(struct vlan_ethhdr, h_vlan_TCI): 194 if (priv->len != sizeof(__be16)) 195 return -EOPNOTSUPP; 196 197 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan, 198 vlan_tci, sizeof(__be16), reg); 199 break; 200 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto): 201 if (priv->len != sizeof(__be16)) 202 return -EOPNOTSUPP; 203 204 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan, 205 vlan_tpid, sizeof(__be16), reg); 206 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK); 207 break; 208 case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr): 209 if (priv->len != sizeof(__be16)) 210 return -EOPNOTSUPP; 211 212 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan, 213 vlan_tci, sizeof(__be16), reg); 214 break; 215 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) + 216 sizeof(struct vlan_hdr): 217 if (priv->len != sizeof(__be16)) 218 return -EOPNOTSUPP; 219 220 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan, 221 vlan_tpid, sizeof(__be16), reg); 222 break; 223 default: 224 return -EOPNOTSUPP; 225 } 226 227 return 0; 228 } 229 230 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx, 231 struct nft_flow_rule *flow, 232 const struct nft_payload *priv) 233 { 234 struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; 235 236 switch (priv->offset) { 237 case offsetof(struct iphdr, saddr): 238 if (priv->len != sizeof(struct in_addr)) 239 return -EOPNOTSUPP; 240 241 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src, 242 sizeof(struct in_addr), reg); 243 break; 244 case offsetof(struct iphdr, daddr): 245 if (priv->len != sizeof(struct in_addr)) 246 return -EOPNOTSUPP; 247 248 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst, 249 sizeof(struct in_addr), reg); 250 break; 251 case offsetof(struct iphdr, protocol): 252 if (priv->len != sizeof(__u8)) 253 return -EOPNOTSUPP; 254 255 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto, 256 sizeof(__u8), reg); 257 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT); 258 break; 259 default: 260 return -EOPNOTSUPP; 261 } 262 263 return 0; 264 } 265 266 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx, 267 struct nft_flow_rule *flow, 268 const struct nft_payload *priv) 269 { 270 struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; 271 272 switch (priv->offset) { 273 case offsetof(struct ipv6hdr, saddr): 274 if (priv->len != sizeof(struct in6_addr)) 275 return -EOPNOTSUPP; 276 277 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src, 278 sizeof(struct in6_addr), reg); 279 break; 280 case offsetof(struct ipv6hdr, daddr): 281 if (priv->len != sizeof(struct in6_addr)) 282 return -EOPNOTSUPP; 283 284 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst, 285 sizeof(struct in6_addr), reg); 286 break; 287 case offsetof(struct ipv6hdr, nexthdr): 288 if (priv->len != sizeof(__u8)) 289 return -EOPNOTSUPP; 290 291 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto, 292 sizeof(__u8), reg); 293 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT); 294 break; 295 default: 296 return -EOPNOTSUPP; 297 } 298 299 return 0; 300 } 301 302 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx, 303 struct nft_flow_rule *flow, 304 const struct nft_payload *priv) 305 { 306 int err; 307 308 switch (ctx->dep.l3num) { 309 case htons(ETH_P_IP): 310 err = nft_payload_offload_ip(ctx, flow, priv); 311 break; 312 case htons(ETH_P_IPV6): 313 err = nft_payload_offload_ip6(ctx, flow, priv); 314 break; 315 default: 316 return -EOPNOTSUPP; 317 } 318 319 return err; 320 } 321 322 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx, 323 struct nft_flow_rule *flow, 324 const struct nft_payload *priv) 325 { 326 struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; 327 328 switch (priv->offset) { 329 case offsetof(struct tcphdr, source): 330 if (priv->len != sizeof(__be16)) 331 return -EOPNOTSUPP; 332 333 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src, 334 sizeof(__be16), reg); 335 break; 336 case offsetof(struct tcphdr, dest): 337 if (priv->len != sizeof(__be16)) 338 return -EOPNOTSUPP; 339 340 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst, 341 sizeof(__be16), reg); 342 break; 343 default: 344 return -EOPNOTSUPP; 345 } 346 347 return 0; 348 } 349 350 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx, 351 struct nft_flow_rule *flow, 352 const struct nft_payload *priv) 353 { 354 struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; 355 356 switch (priv->offset) { 357 case offsetof(struct udphdr, source): 358 if (priv->len != sizeof(__be16)) 359 return -EOPNOTSUPP; 360 361 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src, 362 sizeof(__be16), reg); 363 break; 364 case offsetof(struct udphdr, dest): 365 if (priv->len != sizeof(__be16)) 366 return -EOPNOTSUPP; 367 368 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst, 369 sizeof(__be16), reg); 370 break; 371 default: 372 return -EOPNOTSUPP; 373 } 374 375 return 0; 376 } 377 378 static int nft_payload_offload_th(struct nft_offload_ctx *ctx, 379 struct nft_flow_rule *flow, 380 const struct nft_payload *priv) 381 { 382 int err; 383 384 switch (ctx->dep.protonum) { 385 case IPPROTO_TCP: 386 err = nft_payload_offload_tcp(ctx, flow, priv); 387 break; 388 case IPPROTO_UDP: 389 err = nft_payload_offload_udp(ctx, flow, priv); 390 break; 391 default: 392 return -EOPNOTSUPP; 393 } 394 395 return err; 396 } 397 398 static int nft_payload_offload(struct nft_offload_ctx *ctx, 399 struct nft_flow_rule *flow, 400 const struct nft_expr *expr) 401 { 402 const struct nft_payload *priv = nft_expr_priv(expr); 403 int err; 404 405 switch (priv->base) { 406 case NFT_PAYLOAD_LL_HEADER: 407 err = nft_payload_offload_ll(ctx, flow, priv); 408 break; 409 case NFT_PAYLOAD_NETWORK_HEADER: 410 err = nft_payload_offload_nh(ctx, flow, priv); 411 break; 412 case NFT_PAYLOAD_TRANSPORT_HEADER: 413 err = nft_payload_offload_th(ctx, flow, priv); 414 break; 415 default: 416 err = -EOPNOTSUPP; 417 break; 418 } 419 return err; 420 } 421 422 static const struct nft_expr_ops nft_payload_ops = { 423 .type = &nft_payload_type, 424 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)), 425 .eval = nft_payload_eval, 426 .init = nft_payload_init, 427 .dump = nft_payload_dump, 428 .offload = nft_payload_offload, 429 }; 430 431 const struct nft_expr_ops nft_payload_fast_ops = { 432 .type = &nft_payload_type, 433 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)), 434 .eval = nft_payload_eval, 435 .init = nft_payload_init, 436 .dump = nft_payload_dump, 437 .offload = nft_payload_offload, 438 }; 439 440 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum) 441 { 442 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum)); 443 if (*sum == 0) 444 *sum = CSUM_MANGLED_0; 445 } 446 447 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff) 448 { 449 struct udphdr *uh, _uh; 450 451 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh); 452 if (!uh) 453 return false; 454 455 return (__force bool)uh->check; 456 } 457 458 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt, 459 struct sk_buff *skb, 460 unsigned int *l4csum_offset) 461 { 462 switch (pkt->tprot) { 463 case IPPROTO_TCP: 464 *l4csum_offset = offsetof(struct tcphdr, check); 465 break; 466 case IPPROTO_UDP: 467 if (!nft_payload_udp_checksum(skb, pkt->xt.thoff)) 468 return -1; 469 /* Fall through. */ 470 case IPPROTO_UDPLITE: 471 *l4csum_offset = offsetof(struct udphdr, check); 472 break; 473 case IPPROTO_ICMPV6: 474 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum); 475 break; 476 default: 477 return -1; 478 } 479 480 *l4csum_offset += pkt->xt.thoff; 481 return 0; 482 } 483 484 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt, 485 struct sk_buff *skb, 486 __wsum fsum, __wsum tsum) 487 { 488 int l4csum_offset; 489 __sum16 sum; 490 491 /* If we cannot determine layer 4 checksum offset or this packet doesn't 492 * require layer 4 checksum recalculation, skip this packet. 493 */ 494 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0) 495 return 0; 496 497 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0) 498 return -1; 499 500 /* Checksum mangling for an arbitrary amount of bytes, based on 501 * inet_proto_csum_replace*() functions. 502 */ 503 if (skb->ip_summed != CHECKSUM_PARTIAL) { 504 nft_csum_replace(&sum, fsum, tsum); 505 if (skb->ip_summed == CHECKSUM_COMPLETE) { 506 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum), 507 tsum); 508 } 509 } else { 510 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum), 511 tsum)); 512 } 513 514 if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) || 515 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0) 516 return -1; 517 518 return 0; 519 } 520 521 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src, 522 __wsum fsum, __wsum tsum, int csum_offset) 523 { 524 __sum16 sum; 525 526 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0) 527 return -1; 528 529 nft_csum_replace(&sum, fsum, tsum); 530 if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) || 531 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0) 532 return -1; 533 534 return 0; 535 } 536 537 static void nft_payload_set_eval(const struct nft_expr *expr, 538 struct nft_regs *regs, 539 const struct nft_pktinfo *pkt) 540 { 541 const struct nft_payload_set *priv = nft_expr_priv(expr); 542 struct sk_buff *skb = pkt->skb; 543 const u32 *src = ®s->data[priv->sreg]; 544 int offset, csum_offset; 545 __wsum fsum, tsum; 546 547 switch (priv->base) { 548 case NFT_PAYLOAD_LL_HEADER: 549 if (!skb_mac_header_was_set(skb)) 550 goto err; 551 offset = skb_mac_header(skb) - skb->data; 552 break; 553 case NFT_PAYLOAD_NETWORK_HEADER: 554 offset = skb_network_offset(skb); 555 break; 556 case NFT_PAYLOAD_TRANSPORT_HEADER: 557 if (!pkt->tprot_set) 558 goto err; 559 offset = pkt->xt.thoff; 560 break; 561 default: 562 BUG(); 563 } 564 565 csum_offset = offset + priv->csum_offset; 566 offset += priv->offset; 567 568 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) && 569 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER || 570 skb->ip_summed != CHECKSUM_PARTIAL)) { 571 fsum = skb_checksum(skb, offset, priv->len, 0); 572 tsum = csum_partial(src, priv->len, 0); 573 574 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET && 575 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset)) 576 goto err; 577 578 if (priv->csum_flags && 579 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0) 580 goto err; 581 } 582 583 if (skb_ensure_writable(skb, max(offset + priv->len, 0)) || 584 skb_store_bits(skb, offset, src, priv->len) < 0) 585 goto err; 586 587 return; 588 err: 589 regs->verdict.code = NFT_BREAK; 590 } 591 592 static int nft_payload_set_init(const struct nft_ctx *ctx, 593 const struct nft_expr *expr, 594 const struct nlattr * const tb[]) 595 { 596 struct nft_payload_set *priv = nft_expr_priv(expr); 597 598 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); 599 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); 600 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); 601 priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]); 602 603 if (tb[NFTA_PAYLOAD_CSUM_TYPE]) 604 priv->csum_type = 605 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE])); 606 if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) 607 priv->csum_offset = 608 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET])); 609 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) { 610 u32 flags; 611 612 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS])); 613 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR) 614 return -EINVAL; 615 616 priv->csum_flags = flags; 617 } 618 619 switch (priv->csum_type) { 620 case NFT_PAYLOAD_CSUM_NONE: 621 case NFT_PAYLOAD_CSUM_INET: 622 break; 623 default: 624 return -EOPNOTSUPP; 625 } 626 627 return nft_validate_register_load(priv->sreg, priv->len); 628 } 629 630 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr) 631 { 632 const struct nft_payload_set *priv = nft_expr_priv(expr); 633 634 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) || 635 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) || 636 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) || 637 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) || 638 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) || 639 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET, 640 htonl(priv->csum_offset)) || 641 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags))) 642 goto nla_put_failure; 643 return 0; 644 645 nla_put_failure: 646 return -1; 647 } 648 649 static const struct nft_expr_ops nft_payload_set_ops = { 650 .type = &nft_payload_type, 651 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)), 652 .eval = nft_payload_set_eval, 653 .init = nft_payload_set_init, 654 .dump = nft_payload_set_dump, 655 }; 656 657 static const struct nft_expr_ops * 658 nft_payload_select_ops(const struct nft_ctx *ctx, 659 const struct nlattr * const tb[]) 660 { 661 enum nft_payload_bases base; 662 unsigned int offset, len; 663 664 if (tb[NFTA_PAYLOAD_BASE] == NULL || 665 tb[NFTA_PAYLOAD_OFFSET] == NULL || 666 tb[NFTA_PAYLOAD_LEN] == NULL) 667 return ERR_PTR(-EINVAL); 668 669 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); 670 switch (base) { 671 case NFT_PAYLOAD_LL_HEADER: 672 case NFT_PAYLOAD_NETWORK_HEADER: 673 case NFT_PAYLOAD_TRANSPORT_HEADER: 674 break; 675 default: 676 return ERR_PTR(-EOPNOTSUPP); 677 } 678 679 if (tb[NFTA_PAYLOAD_SREG] != NULL) { 680 if (tb[NFTA_PAYLOAD_DREG] != NULL) 681 return ERR_PTR(-EINVAL); 682 return &nft_payload_set_ops; 683 } 684 685 if (tb[NFTA_PAYLOAD_DREG] == NULL) 686 return ERR_PTR(-EINVAL); 687 688 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); 689 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); 690 691 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) && 692 base != NFT_PAYLOAD_LL_HEADER) 693 return &nft_payload_fast_ops; 694 else 695 return &nft_payload_ops; 696 } 697 698 struct nft_expr_type nft_payload_type __read_mostly = { 699 .name = "payload", 700 .select_ops = nft_payload_select_ops, 701 .policy = nft_payload_policy, 702 .maxattr = NFTA_PAYLOAD_MAX, 703 .owner = THIS_MODULE, 704 }; 705