1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> 4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org> 5 * 6 * Development of this code funded by Astaro AG (http://www.astaro.com/) 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/if_vlan.h> 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/netlink.h> 14 #include <linux/netfilter.h> 15 #include <linux/netfilter/nf_tables.h> 16 #include <net/netfilter/nf_tables_core.h> 17 #include <net/netfilter/nf_tables.h> 18 #include <net/netfilter/nf_tables_offload.h> 19 /* For layer 4 checksum field offset. */ 20 #include <linux/tcp.h> 21 #include <linux/udp.h> 22 #include <linux/icmpv6.h> 23 #include <linux/ip.h> 24 #include <linux/ipv6.h> 25 #include <net/sctp/checksum.h> 26 27 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off, 28 struct vlan_ethhdr *veth) 29 { 30 if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN)) 31 return false; 32 33 veth->h_vlan_proto = skb->vlan_proto; 34 veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 35 veth->h_vlan_encapsulated_proto = skb->protocol; 36 37 return true; 38 } 39 40 /* add vlan header into the user buffer for if tag was removed by offloads */ 41 static bool 42 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len) 43 { 44 int mac_off = skb_mac_header(skb) - skb->data; 45 u8 *vlanh, *dst_u8 = (u8 *) d; 46 struct vlan_ethhdr veth; 47 u8 vlan_hlen = 0; 48 49 if ((skb->protocol == htons(ETH_P_8021AD) || 50 skb->protocol == htons(ETH_P_8021Q)) && 51 offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN) 52 vlan_hlen += VLAN_HLEN; 53 54 vlanh = (u8 *) &veth; 55 if (offset < VLAN_ETH_HLEN + vlan_hlen) { 56 u8 ethlen = len; 57 58 if (vlan_hlen && 59 skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0) 60 return false; 61 else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth)) 62 return false; 63 64 if (offset + len > VLAN_ETH_HLEN + vlan_hlen) 65 ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen; 66 67 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen); 68 69 len -= ethlen; 70 if (len == 0) 71 return true; 72 73 dst_u8 += ethlen; 74 offset = ETH_HLEN + vlan_hlen; 75 } else { 76 offset -= VLAN_HLEN + vlan_hlen; 77 } 78 79 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0; 80 } 81 82 static int __nft_payload_inner_offset(struct nft_pktinfo *pkt) 83 { 84 unsigned int thoff = nft_thoff(pkt); 85 86 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff) 87 return -1; 88 89 switch (pkt->tprot) { 90 case IPPROTO_UDP: 91 pkt->inneroff = thoff + sizeof(struct udphdr); 92 break; 93 case IPPROTO_TCP: { 94 struct tcphdr *th, _tcph; 95 96 th = skb_header_pointer(pkt->skb, thoff, sizeof(_tcph), &_tcph); 97 if (!th) 98 return -1; 99 100 pkt->inneroff = thoff + __tcp_hdrlen(th); 101 } 102 break; 103 default: 104 return -1; 105 } 106 107 pkt->flags |= NFT_PKTINFO_INNER; 108 109 return 0; 110 } 111 112 static int nft_payload_inner_offset(const struct nft_pktinfo *pkt) 113 { 114 if (!(pkt->flags & NFT_PKTINFO_INNER) && 115 __nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0) 116 return -1; 117 118 return pkt->inneroff; 119 } 120 121 void nft_payload_eval(const struct nft_expr *expr, 122 struct nft_regs *regs, 123 const struct nft_pktinfo *pkt) 124 { 125 const struct nft_payload *priv = nft_expr_priv(expr); 126 const struct sk_buff *skb = pkt->skb; 127 u32 *dest = ®s->data[priv->dreg]; 128 int offset; 129 130 if (priv->len % NFT_REG32_SIZE) 131 dest[priv->len / NFT_REG32_SIZE] = 0; 132 133 switch (priv->base) { 134 case NFT_PAYLOAD_LL_HEADER: 135 if (!skb_mac_header_was_set(skb)) 136 goto err; 137 138 if (skb_vlan_tag_present(skb)) { 139 if (!nft_payload_copy_vlan(dest, skb, 140 priv->offset, priv->len)) 141 goto err; 142 return; 143 } 144 offset = skb_mac_header(skb) - skb->data; 145 break; 146 case NFT_PAYLOAD_NETWORK_HEADER: 147 offset = skb_network_offset(skb); 148 break; 149 case NFT_PAYLOAD_TRANSPORT_HEADER: 150 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff) 151 goto err; 152 offset = nft_thoff(pkt); 153 break; 154 case NFT_PAYLOAD_INNER_HEADER: 155 offset = nft_payload_inner_offset(pkt); 156 if (offset < 0) 157 goto err; 158 break; 159 default: 160 WARN_ON_ONCE(1); 161 goto err; 162 } 163 offset += priv->offset; 164 165 if (skb_copy_bits(skb, offset, dest, priv->len) < 0) 166 goto err; 167 return; 168 err: 169 regs->verdict.code = NFT_BREAK; 170 } 171 172 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = { 173 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 }, 174 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 }, 175 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 }, 176 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 }, 177 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 }, 178 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 }, 179 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 }, 180 [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 }, 181 }; 182 183 static int nft_payload_init(const struct nft_ctx *ctx, 184 const struct nft_expr *expr, 185 const struct nlattr * const tb[]) 186 { 187 struct nft_payload *priv = nft_expr_priv(expr); 188 189 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); 190 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); 191 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); 192 193 return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG], 194 &priv->dreg, NULL, NFT_DATA_VALUE, 195 priv->len); 196 } 197 198 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr) 199 { 200 const struct nft_payload *priv = nft_expr_priv(expr); 201 202 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) || 203 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) || 204 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) || 205 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len))) 206 goto nla_put_failure; 207 return 0; 208 209 nla_put_failure: 210 return -1; 211 } 212 213 static bool nft_payload_reduce(struct nft_regs_track *track, 214 const struct nft_expr *expr) 215 { 216 const struct nft_payload *priv = nft_expr_priv(expr); 217 const struct nft_payload *payload; 218 219 if (!track->regs[priv->dreg].selector || 220 track->regs[priv->dreg].selector->ops != expr->ops) { 221 track->regs[priv->dreg].selector = expr; 222 track->regs[priv->dreg].bitwise = NULL; 223 return false; 224 } 225 226 payload = nft_expr_priv(track->regs[priv->dreg].selector); 227 if (priv->base != payload->base || 228 priv->offset != payload->offset || 229 priv->len != payload->len) { 230 track->regs[priv->dreg].selector = expr; 231 track->regs[priv->dreg].bitwise = NULL; 232 return false; 233 } 234 235 if (!track->regs[priv->dreg].bitwise) 236 return true; 237 238 return nft_expr_reduce_bitwise(track, expr); 239 } 240 241 static bool nft_payload_offload_mask(struct nft_offload_reg *reg, 242 u32 priv_len, u32 field_len) 243 { 244 unsigned int remainder, delta, k; 245 struct nft_data mask = {}; 246 __be32 remainder_mask; 247 248 if (priv_len == field_len) { 249 memset(®->mask, 0xff, priv_len); 250 return true; 251 } else if (priv_len > field_len) { 252 return false; 253 } 254 255 memset(&mask, 0xff, field_len); 256 remainder = priv_len % sizeof(u32); 257 if (remainder) { 258 k = priv_len / sizeof(u32); 259 delta = field_len - priv_len; 260 remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1)); 261 mask.data[k] = (__force u32)remainder_mask; 262 } 263 264 memcpy(®->mask, &mask, field_len); 265 266 return true; 267 } 268 269 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx, 270 struct nft_flow_rule *flow, 271 const struct nft_payload *priv) 272 { 273 struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; 274 275 switch (priv->offset) { 276 case offsetof(struct ethhdr, h_source): 277 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN)) 278 return -EOPNOTSUPP; 279 280 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs, 281 src, ETH_ALEN, reg); 282 break; 283 case offsetof(struct ethhdr, h_dest): 284 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN)) 285 return -EOPNOTSUPP; 286 287 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs, 288 dst, ETH_ALEN, reg); 289 break; 290 case offsetof(struct ethhdr, h_proto): 291 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16))) 292 return -EOPNOTSUPP; 293 294 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, 295 n_proto, sizeof(__be16), reg); 296 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK); 297 break; 298 case offsetof(struct vlan_ethhdr, h_vlan_TCI): 299 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16))) 300 return -EOPNOTSUPP; 301 302 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan, 303 vlan_tci, sizeof(__be16), reg, 304 NFT_OFFLOAD_F_NETWORK2HOST); 305 break; 306 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto): 307 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16))) 308 return -EOPNOTSUPP; 309 310 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan, 311 vlan_tpid, sizeof(__be16), reg); 312 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK); 313 break; 314 case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr): 315 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16))) 316 return -EOPNOTSUPP; 317 318 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan, 319 vlan_tci, sizeof(__be16), reg, 320 NFT_OFFLOAD_F_NETWORK2HOST); 321 break; 322 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) + 323 sizeof(struct vlan_hdr): 324 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16))) 325 return -EOPNOTSUPP; 326 327 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan, 328 vlan_tpid, sizeof(__be16), reg); 329 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK); 330 break; 331 default: 332 return -EOPNOTSUPP; 333 } 334 335 return 0; 336 } 337 338 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx, 339 struct nft_flow_rule *flow, 340 const struct nft_payload *priv) 341 { 342 struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; 343 344 switch (priv->offset) { 345 case offsetof(struct iphdr, saddr): 346 if (!nft_payload_offload_mask(reg, priv->len, 347 sizeof(struct in_addr))) 348 return -EOPNOTSUPP; 349 350 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src, 351 sizeof(struct in_addr), reg); 352 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS); 353 break; 354 case offsetof(struct iphdr, daddr): 355 if (!nft_payload_offload_mask(reg, priv->len, 356 sizeof(struct in_addr))) 357 return -EOPNOTSUPP; 358 359 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst, 360 sizeof(struct in_addr), reg); 361 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS); 362 break; 363 case offsetof(struct iphdr, protocol): 364 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8))) 365 return -EOPNOTSUPP; 366 367 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto, 368 sizeof(__u8), reg); 369 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT); 370 break; 371 default: 372 return -EOPNOTSUPP; 373 } 374 375 return 0; 376 } 377 378 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx, 379 struct nft_flow_rule *flow, 380 const struct nft_payload *priv) 381 { 382 struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; 383 384 switch (priv->offset) { 385 case offsetof(struct ipv6hdr, saddr): 386 if (!nft_payload_offload_mask(reg, priv->len, 387 sizeof(struct in6_addr))) 388 return -EOPNOTSUPP; 389 390 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src, 391 sizeof(struct in6_addr), reg); 392 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS); 393 break; 394 case offsetof(struct ipv6hdr, daddr): 395 if (!nft_payload_offload_mask(reg, priv->len, 396 sizeof(struct in6_addr))) 397 return -EOPNOTSUPP; 398 399 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst, 400 sizeof(struct in6_addr), reg); 401 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS); 402 break; 403 case offsetof(struct ipv6hdr, nexthdr): 404 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8))) 405 return -EOPNOTSUPP; 406 407 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto, 408 sizeof(__u8), reg); 409 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT); 410 break; 411 default: 412 return -EOPNOTSUPP; 413 } 414 415 return 0; 416 } 417 418 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx, 419 struct nft_flow_rule *flow, 420 const struct nft_payload *priv) 421 { 422 int err; 423 424 switch (ctx->dep.l3num) { 425 case htons(ETH_P_IP): 426 err = nft_payload_offload_ip(ctx, flow, priv); 427 break; 428 case htons(ETH_P_IPV6): 429 err = nft_payload_offload_ip6(ctx, flow, priv); 430 break; 431 default: 432 return -EOPNOTSUPP; 433 } 434 435 return err; 436 } 437 438 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx, 439 struct nft_flow_rule *flow, 440 const struct nft_payload *priv) 441 { 442 struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; 443 444 switch (priv->offset) { 445 case offsetof(struct tcphdr, source): 446 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16))) 447 return -EOPNOTSUPP; 448 449 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src, 450 sizeof(__be16), reg); 451 break; 452 case offsetof(struct tcphdr, dest): 453 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16))) 454 return -EOPNOTSUPP; 455 456 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst, 457 sizeof(__be16), reg); 458 break; 459 default: 460 return -EOPNOTSUPP; 461 } 462 463 return 0; 464 } 465 466 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx, 467 struct nft_flow_rule *flow, 468 const struct nft_payload *priv) 469 { 470 struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; 471 472 switch (priv->offset) { 473 case offsetof(struct udphdr, source): 474 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16))) 475 return -EOPNOTSUPP; 476 477 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src, 478 sizeof(__be16), reg); 479 break; 480 case offsetof(struct udphdr, dest): 481 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16))) 482 return -EOPNOTSUPP; 483 484 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst, 485 sizeof(__be16), reg); 486 break; 487 default: 488 return -EOPNOTSUPP; 489 } 490 491 return 0; 492 } 493 494 static int nft_payload_offload_th(struct nft_offload_ctx *ctx, 495 struct nft_flow_rule *flow, 496 const struct nft_payload *priv) 497 { 498 int err; 499 500 switch (ctx->dep.protonum) { 501 case IPPROTO_TCP: 502 err = nft_payload_offload_tcp(ctx, flow, priv); 503 break; 504 case IPPROTO_UDP: 505 err = nft_payload_offload_udp(ctx, flow, priv); 506 break; 507 default: 508 return -EOPNOTSUPP; 509 } 510 511 return err; 512 } 513 514 static int nft_payload_offload(struct nft_offload_ctx *ctx, 515 struct nft_flow_rule *flow, 516 const struct nft_expr *expr) 517 { 518 const struct nft_payload *priv = nft_expr_priv(expr); 519 int err; 520 521 switch (priv->base) { 522 case NFT_PAYLOAD_LL_HEADER: 523 err = nft_payload_offload_ll(ctx, flow, priv); 524 break; 525 case NFT_PAYLOAD_NETWORK_HEADER: 526 err = nft_payload_offload_nh(ctx, flow, priv); 527 break; 528 case NFT_PAYLOAD_TRANSPORT_HEADER: 529 err = nft_payload_offload_th(ctx, flow, priv); 530 break; 531 default: 532 err = -EOPNOTSUPP; 533 break; 534 } 535 return err; 536 } 537 538 static const struct nft_expr_ops nft_payload_ops = { 539 .type = &nft_payload_type, 540 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)), 541 .eval = nft_payload_eval, 542 .init = nft_payload_init, 543 .dump = nft_payload_dump, 544 .reduce = nft_payload_reduce, 545 .offload = nft_payload_offload, 546 }; 547 548 const struct nft_expr_ops nft_payload_fast_ops = { 549 .type = &nft_payload_type, 550 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)), 551 .eval = nft_payload_eval, 552 .init = nft_payload_init, 553 .dump = nft_payload_dump, 554 .reduce = nft_payload_reduce, 555 .offload = nft_payload_offload, 556 }; 557 558 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum) 559 { 560 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum)); 561 if (*sum == 0) 562 *sum = CSUM_MANGLED_0; 563 } 564 565 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff) 566 { 567 struct udphdr *uh, _uh; 568 569 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh); 570 if (!uh) 571 return false; 572 573 return (__force bool)uh->check; 574 } 575 576 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt, 577 struct sk_buff *skb, 578 unsigned int *l4csum_offset) 579 { 580 if (pkt->fragoff) 581 return -1; 582 583 switch (pkt->tprot) { 584 case IPPROTO_TCP: 585 *l4csum_offset = offsetof(struct tcphdr, check); 586 break; 587 case IPPROTO_UDP: 588 if (!nft_payload_udp_checksum(skb, nft_thoff(pkt))) 589 return -1; 590 fallthrough; 591 case IPPROTO_UDPLITE: 592 *l4csum_offset = offsetof(struct udphdr, check); 593 break; 594 case IPPROTO_ICMPV6: 595 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum); 596 break; 597 default: 598 return -1; 599 } 600 601 *l4csum_offset += nft_thoff(pkt); 602 return 0; 603 } 604 605 static int nft_payload_csum_sctp(struct sk_buff *skb, int offset) 606 { 607 struct sctphdr *sh; 608 609 if (skb_ensure_writable(skb, offset + sizeof(*sh))) 610 return -1; 611 612 sh = (struct sctphdr *)(skb->data + offset); 613 sh->checksum = sctp_compute_cksum(skb, offset); 614 skb->ip_summed = CHECKSUM_UNNECESSARY; 615 return 0; 616 } 617 618 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt, 619 struct sk_buff *skb, 620 __wsum fsum, __wsum tsum) 621 { 622 int l4csum_offset; 623 __sum16 sum; 624 625 /* If we cannot determine layer 4 checksum offset or this packet doesn't 626 * require layer 4 checksum recalculation, skip this packet. 627 */ 628 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0) 629 return 0; 630 631 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0) 632 return -1; 633 634 /* Checksum mangling for an arbitrary amount of bytes, based on 635 * inet_proto_csum_replace*() functions. 636 */ 637 if (skb->ip_summed != CHECKSUM_PARTIAL) { 638 nft_csum_replace(&sum, fsum, tsum); 639 if (skb->ip_summed == CHECKSUM_COMPLETE) { 640 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum), 641 tsum); 642 } 643 } else { 644 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum), 645 tsum)); 646 } 647 648 if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) || 649 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0) 650 return -1; 651 652 return 0; 653 } 654 655 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src, 656 __wsum fsum, __wsum tsum, int csum_offset) 657 { 658 __sum16 sum; 659 660 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0) 661 return -1; 662 663 nft_csum_replace(&sum, fsum, tsum); 664 if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) || 665 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0) 666 return -1; 667 668 return 0; 669 } 670 671 static void nft_payload_set_eval(const struct nft_expr *expr, 672 struct nft_regs *regs, 673 const struct nft_pktinfo *pkt) 674 { 675 const struct nft_payload_set *priv = nft_expr_priv(expr); 676 struct sk_buff *skb = pkt->skb; 677 const u32 *src = ®s->data[priv->sreg]; 678 int offset, csum_offset; 679 __wsum fsum, tsum; 680 681 switch (priv->base) { 682 case NFT_PAYLOAD_LL_HEADER: 683 if (!skb_mac_header_was_set(skb)) 684 goto err; 685 offset = skb_mac_header(skb) - skb->data; 686 break; 687 case NFT_PAYLOAD_NETWORK_HEADER: 688 offset = skb_network_offset(skb); 689 break; 690 case NFT_PAYLOAD_TRANSPORT_HEADER: 691 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff) 692 goto err; 693 offset = nft_thoff(pkt); 694 break; 695 case NFT_PAYLOAD_INNER_HEADER: 696 offset = nft_payload_inner_offset(pkt); 697 if (offset < 0) 698 goto err; 699 break; 700 default: 701 WARN_ON_ONCE(1); 702 goto err; 703 } 704 705 csum_offset = offset + priv->csum_offset; 706 offset += priv->offset; 707 708 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) && 709 ((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER && 710 priv->base != NFT_PAYLOAD_INNER_HEADER) || 711 skb->ip_summed != CHECKSUM_PARTIAL)) { 712 fsum = skb_checksum(skb, offset, priv->len, 0); 713 tsum = csum_partial(src, priv->len, 0); 714 715 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET && 716 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset)) 717 goto err; 718 719 if (priv->csum_flags && 720 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0) 721 goto err; 722 } 723 724 if (skb_ensure_writable(skb, max(offset + priv->len, 0)) || 725 skb_store_bits(skb, offset, src, priv->len) < 0) 726 goto err; 727 728 if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP && 729 pkt->tprot == IPPROTO_SCTP && 730 skb->ip_summed != CHECKSUM_PARTIAL) { 731 if (pkt->fragoff == 0 && 732 nft_payload_csum_sctp(skb, nft_thoff(pkt))) 733 goto err; 734 } 735 736 return; 737 err: 738 regs->verdict.code = NFT_BREAK; 739 } 740 741 static int nft_payload_set_init(const struct nft_ctx *ctx, 742 const struct nft_expr *expr, 743 const struct nlattr * const tb[]) 744 { 745 struct nft_payload_set *priv = nft_expr_priv(expr); 746 747 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); 748 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); 749 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); 750 751 if (tb[NFTA_PAYLOAD_CSUM_TYPE]) 752 priv->csum_type = 753 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE])); 754 if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) 755 priv->csum_offset = 756 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET])); 757 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) { 758 u32 flags; 759 760 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS])); 761 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR) 762 return -EINVAL; 763 764 priv->csum_flags = flags; 765 } 766 767 switch (priv->csum_type) { 768 case NFT_PAYLOAD_CSUM_NONE: 769 case NFT_PAYLOAD_CSUM_INET: 770 break; 771 case NFT_PAYLOAD_CSUM_SCTP: 772 if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER) 773 return -EINVAL; 774 775 if (priv->csum_offset != offsetof(struct sctphdr, checksum)) 776 return -EINVAL; 777 break; 778 default: 779 return -EOPNOTSUPP; 780 } 781 782 return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg, 783 priv->len); 784 } 785 786 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr) 787 { 788 const struct nft_payload_set *priv = nft_expr_priv(expr); 789 790 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) || 791 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) || 792 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) || 793 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) || 794 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) || 795 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET, 796 htonl(priv->csum_offset)) || 797 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags))) 798 goto nla_put_failure; 799 return 0; 800 801 nla_put_failure: 802 return -1; 803 } 804 805 static bool nft_payload_set_reduce(struct nft_regs_track *track, 806 const struct nft_expr *expr) 807 { 808 int i; 809 810 for (i = 0; i < NFT_REG32_NUM; i++) { 811 if (!track->regs[i].selector) 812 continue; 813 814 if (track->regs[i].selector->ops != &nft_payload_ops && 815 track->regs[i].selector->ops != &nft_payload_fast_ops) 816 continue; 817 818 track->regs[i].selector = NULL; 819 track->regs[i].bitwise = NULL; 820 } 821 822 return false; 823 } 824 825 static const struct nft_expr_ops nft_payload_set_ops = { 826 .type = &nft_payload_type, 827 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)), 828 .eval = nft_payload_set_eval, 829 .init = nft_payload_set_init, 830 .dump = nft_payload_set_dump, 831 .reduce = nft_payload_set_reduce, 832 }; 833 834 static const struct nft_expr_ops * 835 nft_payload_select_ops(const struct nft_ctx *ctx, 836 const struct nlattr * const tb[]) 837 { 838 enum nft_payload_bases base; 839 unsigned int offset, len; 840 841 if (tb[NFTA_PAYLOAD_BASE] == NULL || 842 tb[NFTA_PAYLOAD_OFFSET] == NULL || 843 tb[NFTA_PAYLOAD_LEN] == NULL) 844 return ERR_PTR(-EINVAL); 845 846 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); 847 switch (base) { 848 case NFT_PAYLOAD_LL_HEADER: 849 case NFT_PAYLOAD_NETWORK_HEADER: 850 case NFT_PAYLOAD_TRANSPORT_HEADER: 851 case NFT_PAYLOAD_INNER_HEADER: 852 break; 853 default: 854 return ERR_PTR(-EOPNOTSUPP); 855 } 856 857 if (tb[NFTA_PAYLOAD_SREG] != NULL) { 858 if (tb[NFTA_PAYLOAD_DREG] != NULL) 859 return ERR_PTR(-EINVAL); 860 return &nft_payload_set_ops; 861 } 862 863 if (tb[NFTA_PAYLOAD_DREG] == NULL) 864 return ERR_PTR(-EINVAL); 865 866 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); 867 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); 868 869 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) && 870 base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER) 871 return &nft_payload_fast_ops; 872 else 873 return &nft_payload_ops; 874 } 875 876 struct nft_expr_type nft_payload_type __read_mostly = { 877 .name = "payload", 878 .select_ops = nft_payload_select_ops, 879 .policy = nft_payload_policy, 880 .maxattr = NFTA_PAYLOAD_MAX, 881 .owner = THIS_MODULE, 882 }; 883