1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Checksum updating actions 4 * 5 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/init.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/spinlock.h> 13 14 #include <linux/netlink.h> 15 #include <net/netlink.h> 16 #include <linux/rtnetlink.h> 17 18 #include <linux/skbuff.h> 19 20 #include <net/ip.h> 21 #include <net/ipv6.h> 22 #include <net/icmp.h> 23 #include <linux/icmpv6.h> 24 #include <linux/igmp.h> 25 #include <net/tcp.h> 26 #include <net/udp.h> 27 #include <net/ip6_checksum.h> 28 #include <net/sctp/checksum.h> 29 30 #include <net/act_api.h> 31 #include <net/pkt_cls.h> 32 33 #include <linux/tc_act/tc_csum.h> 34 #include <net/tc_act/tc_csum.h> 35 36 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = { 37 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), }, 38 }; 39 40 static unsigned int csum_net_id; 41 static struct tc_action_ops act_csum_ops; 42 43 static int tcf_csum_init(struct net *net, struct nlattr *nla, 44 struct nlattr *est, struct tc_action **a, int ovr, 45 int bind, bool rtnl_held, struct tcf_proto *tp, 46 u32 flags, struct netlink_ext_ack *extack) 47 { 48 struct tc_action_net *tn = net_generic(net, csum_net_id); 49 struct tcf_csum_params *params_new; 50 struct nlattr *tb[TCA_CSUM_MAX + 1]; 51 struct tcf_chain *goto_ch = NULL; 52 struct tc_csum *parm; 53 struct tcf_csum *p; 54 int ret = 0, err; 55 u32 index; 56 57 if (nla == NULL) 58 return -EINVAL; 59 60 err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, csum_policy, 61 NULL); 62 if (err < 0) 63 return err; 64 65 if (tb[TCA_CSUM_PARMS] == NULL) 66 return -EINVAL; 67 parm = nla_data(tb[TCA_CSUM_PARMS]); 68 index = parm->index; 69 err = tcf_idr_check_alloc(tn, &index, a, bind); 70 if (!err) { 71 ret = tcf_idr_create_from_flags(tn, index, est, a, 72 &act_csum_ops, bind, flags); 73 if (ret) { 74 tcf_idr_cleanup(tn, index); 75 return ret; 76 } 77 ret = ACT_P_CREATED; 78 } else if (err > 0) { 79 if (bind)/* dont override defaults */ 80 return 0; 81 if (!ovr) { 82 tcf_idr_release(*a, bind); 83 return -EEXIST; 84 } 85 } else { 86 return err; 87 } 88 89 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 90 if (err < 0) 91 goto release_idr; 92 93 p = to_tcf_csum(*a); 94 95 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); 96 if (unlikely(!params_new)) { 97 err = -ENOMEM; 98 goto put_chain; 99 } 100 params_new->update_flags = parm->update_flags; 101 102 spin_lock_bh(&p->tcf_lock); 103 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 104 params_new = rcu_replace_pointer(p->params, params_new, 105 lockdep_is_held(&p->tcf_lock)); 106 spin_unlock_bh(&p->tcf_lock); 107 108 if (goto_ch) 109 tcf_chain_put_by_act(goto_ch); 110 if (params_new) 111 kfree_rcu(params_new, rcu); 112 113 return ret; 114 put_chain: 115 if (goto_ch) 116 tcf_chain_put_by_act(goto_ch); 117 release_idr: 118 tcf_idr_release(*a, bind); 119 return err; 120 } 121 122 /** 123 * tcf_csum_skb_nextlayer - Get next layer pointer 124 * @skb: sk_buff to use 125 * @ihl: previous summed headers length 126 * @ipl: complete packet length 127 * @jhl: next header length 128 * 129 * Check the expected next layer availability in the specified sk_buff. 130 * Return the next layer pointer if pass, NULL otherwise. 131 */ 132 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, 133 unsigned int ihl, unsigned int ipl, 134 unsigned int jhl) 135 { 136 int ntkoff = skb_network_offset(skb); 137 int hl = ihl + jhl; 138 139 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) || 140 skb_try_make_writable(skb, hl + ntkoff)) 141 return NULL; 142 else 143 return (void *)(skb_network_header(skb) + ihl); 144 } 145 146 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl, 147 unsigned int ipl) 148 { 149 struct icmphdr *icmph; 150 151 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph)); 152 if (icmph == NULL) 153 return 0; 154 155 icmph->checksum = 0; 156 skb->csum = csum_partial(icmph, ipl - ihl, 0); 157 icmph->checksum = csum_fold(skb->csum); 158 159 skb->ip_summed = CHECKSUM_NONE; 160 161 return 1; 162 } 163 164 static int tcf_csum_ipv4_igmp(struct sk_buff *skb, 165 unsigned int ihl, unsigned int ipl) 166 { 167 struct igmphdr *igmph; 168 169 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph)); 170 if (igmph == NULL) 171 return 0; 172 173 igmph->csum = 0; 174 skb->csum = csum_partial(igmph, ipl - ihl, 0); 175 igmph->csum = csum_fold(skb->csum); 176 177 skb->ip_summed = CHECKSUM_NONE; 178 179 return 1; 180 } 181 182 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl, 183 unsigned int ipl) 184 { 185 struct icmp6hdr *icmp6h; 186 const struct ipv6hdr *ip6h; 187 188 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h)); 189 if (icmp6h == NULL) 190 return 0; 191 192 ip6h = ipv6_hdr(skb); 193 icmp6h->icmp6_cksum = 0; 194 skb->csum = csum_partial(icmp6h, ipl - ihl, 0); 195 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 196 ipl - ihl, IPPROTO_ICMPV6, 197 skb->csum); 198 199 skb->ip_summed = CHECKSUM_NONE; 200 201 return 1; 202 } 203 204 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl, 205 unsigned int ipl) 206 { 207 struct tcphdr *tcph; 208 const struct iphdr *iph; 209 210 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 211 return 1; 212 213 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); 214 if (tcph == NULL) 215 return 0; 216 217 iph = ip_hdr(skb); 218 tcph->check = 0; 219 skb->csum = csum_partial(tcph, ipl - ihl, 0); 220 tcph->check = tcp_v4_check(ipl - ihl, 221 iph->saddr, iph->daddr, skb->csum); 222 223 skb->ip_summed = CHECKSUM_NONE; 224 225 return 1; 226 } 227 228 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl, 229 unsigned int ipl) 230 { 231 struct tcphdr *tcph; 232 const struct ipv6hdr *ip6h; 233 234 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 235 return 1; 236 237 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); 238 if (tcph == NULL) 239 return 0; 240 241 ip6h = ipv6_hdr(skb); 242 tcph->check = 0; 243 skb->csum = csum_partial(tcph, ipl - ihl, 0); 244 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 245 ipl - ihl, IPPROTO_TCP, 246 skb->csum); 247 248 skb->ip_summed = CHECKSUM_NONE; 249 250 return 1; 251 } 252 253 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, 254 unsigned int ipl, int udplite) 255 { 256 struct udphdr *udph; 257 const struct iphdr *iph; 258 u16 ul; 259 260 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 261 return 1; 262 263 /* 264 * Support both UDP and UDPLITE checksum algorithms, Don't use 265 * udph->len to get the real length without any protocol check, 266 * UDPLITE uses udph->len for another thing, 267 * Use iph->tot_len, or just ipl. 268 */ 269 270 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); 271 if (udph == NULL) 272 return 0; 273 274 iph = ip_hdr(skb); 275 ul = ntohs(udph->len); 276 277 if (udplite || udph->check) { 278 279 udph->check = 0; 280 281 if (udplite) { 282 if (ul == 0) 283 skb->csum = csum_partial(udph, ipl - ihl, 0); 284 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) 285 skb->csum = csum_partial(udph, ul, 0); 286 else 287 goto ignore_obscure_skb; 288 } else { 289 if (ul != ipl - ihl) 290 goto ignore_obscure_skb; 291 292 skb->csum = csum_partial(udph, ul, 0); 293 } 294 295 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 296 ul, iph->protocol, 297 skb->csum); 298 299 if (!udph->check) 300 udph->check = CSUM_MANGLED_0; 301 } 302 303 skb->ip_summed = CHECKSUM_NONE; 304 305 ignore_obscure_skb: 306 return 1; 307 } 308 309 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, 310 unsigned int ipl, int udplite) 311 { 312 struct udphdr *udph; 313 const struct ipv6hdr *ip6h; 314 u16 ul; 315 316 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 317 return 1; 318 319 /* 320 * Support both UDP and UDPLITE checksum algorithms, Don't use 321 * udph->len to get the real length without any protocol check, 322 * UDPLITE uses udph->len for another thing, 323 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl. 324 */ 325 326 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); 327 if (udph == NULL) 328 return 0; 329 330 ip6h = ipv6_hdr(skb); 331 ul = ntohs(udph->len); 332 333 udph->check = 0; 334 335 if (udplite) { 336 if (ul == 0) 337 skb->csum = csum_partial(udph, ipl - ihl, 0); 338 339 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) 340 skb->csum = csum_partial(udph, ul, 0); 341 342 else 343 goto ignore_obscure_skb; 344 } else { 345 if (ul != ipl - ihl) 346 goto ignore_obscure_skb; 347 348 skb->csum = csum_partial(udph, ul, 0); 349 } 350 351 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul, 352 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP, 353 skb->csum); 354 355 if (!udph->check) 356 udph->check = CSUM_MANGLED_0; 357 358 skb->ip_summed = CHECKSUM_NONE; 359 360 ignore_obscure_skb: 361 return 1; 362 } 363 364 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl, 365 unsigned int ipl) 366 { 367 struct sctphdr *sctph; 368 369 if (skb_is_gso(skb) && skb_is_gso_sctp(skb)) 370 return 1; 371 372 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph)); 373 if (!sctph) 374 return 0; 375 376 sctph->checksum = sctp_compute_cksum(skb, 377 skb_network_offset(skb) + ihl); 378 skb->ip_summed = CHECKSUM_NONE; 379 skb->csum_not_inet = 0; 380 381 return 1; 382 } 383 384 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) 385 { 386 const struct iphdr *iph; 387 int ntkoff; 388 389 ntkoff = skb_network_offset(skb); 390 391 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff)) 392 goto fail; 393 394 iph = ip_hdr(skb); 395 396 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { 397 case IPPROTO_ICMP: 398 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) 399 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4, 400 ntohs(iph->tot_len))) 401 goto fail; 402 break; 403 case IPPROTO_IGMP: 404 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP) 405 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4, 406 ntohs(iph->tot_len))) 407 goto fail; 408 break; 409 case IPPROTO_TCP: 410 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) 411 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4, 412 ntohs(iph->tot_len))) 413 goto fail; 414 break; 415 case IPPROTO_UDP: 416 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) 417 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, 418 ntohs(iph->tot_len), 0)) 419 goto fail; 420 break; 421 case IPPROTO_UDPLITE: 422 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) 423 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, 424 ntohs(iph->tot_len), 1)) 425 goto fail; 426 break; 427 case IPPROTO_SCTP: 428 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && 429 !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len))) 430 goto fail; 431 break; 432 } 433 434 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { 435 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff)) 436 goto fail; 437 438 ip_send_check(ip_hdr(skb)); 439 } 440 441 return 1; 442 443 fail: 444 return 0; 445 } 446 447 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl, 448 unsigned int *pl) 449 { 450 int off, len, optlen; 451 unsigned char *xh = (void *)ip6xh; 452 453 off = sizeof(*ip6xh); 454 len = ixhl - off; 455 456 while (len > 1) { 457 switch (xh[off]) { 458 case IPV6_TLV_PAD1: 459 optlen = 1; 460 break; 461 case IPV6_TLV_JUMBO: 462 optlen = xh[off + 1] + 2; 463 if (optlen != 6 || len < 6 || (off & 3) != 2) 464 /* wrong jumbo option length/alignment */ 465 return 0; 466 *pl = ntohl(*(__be32 *)(xh + off + 2)); 467 goto done; 468 default: 469 optlen = xh[off + 1] + 2; 470 if (optlen > len) 471 /* ignore obscure options */ 472 goto done; 473 break; 474 } 475 off += optlen; 476 len -= optlen; 477 } 478 479 done: 480 return 1; 481 } 482 483 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags) 484 { 485 struct ipv6hdr *ip6h; 486 struct ipv6_opt_hdr *ip6xh; 487 unsigned int hl, ixhl; 488 unsigned int pl; 489 int ntkoff; 490 u8 nexthdr; 491 492 ntkoff = skb_network_offset(skb); 493 494 hl = sizeof(*ip6h); 495 496 if (!pskb_may_pull(skb, hl + ntkoff)) 497 goto fail; 498 499 ip6h = ipv6_hdr(skb); 500 501 pl = ntohs(ip6h->payload_len); 502 nexthdr = ip6h->nexthdr; 503 504 do { 505 switch (nexthdr) { 506 case NEXTHDR_FRAGMENT: 507 goto ignore_skb; 508 case NEXTHDR_ROUTING: 509 case NEXTHDR_HOP: 510 case NEXTHDR_DEST: 511 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff)) 512 goto fail; 513 ip6xh = (void *)(skb_network_header(skb) + hl); 514 ixhl = ipv6_optlen(ip6xh); 515 if (!pskb_may_pull(skb, hl + ixhl + ntkoff)) 516 goto fail; 517 ip6xh = (void *)(skb_network_header(skb) + hl); 518 if ((nexthdr == NEXTHDR_HOP) && 519 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl))) 520 goto fail; 521 nexthdr = ip6xh->nexthdr; 522 hl += ixhl; 523 break; 524 case IPPROTO_ICMPV6: 525 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) 526 if (!tcf_csum_ipv6_icmp(skb, 527 hl, pl + sizeof(*ip6h))) 528 goto fail; 529 goto done; 530 case IPPROTO_TCP: 531 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) 532 if (!tcf_csum_ipv6_tcp(skb, 533 hl, pl + sizeof(*ip6h))) 534 goto fail; 535 goto done; 536 case IPPROTO_UDP: 537 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) 538 if (!tcf_csum_ipv6_udp(skb, hl, 539 pl + sizeof(*ip6h), 0)) 540 goto fail; 541 goto done; 542 case IPPROTO_UDPLITE: 543 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) 544 if (!tcf_csum_ipv6_udp(skb, hl, 545 pl + sizeof(*ip6h), 1)) 546 goto fail; 547 goto done; 548 case IPPROTO_SCTP: 549 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && 550 !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h))) 551 goto fail; 552 goto done; 553 default: 554 goto ignore_skb; 555 } 556 } while (pskb_may_pull(skb, hl + 1 + ntkoff)); 557 558 done: 559 ignore_skb: 560 return 1; 561 562 fail: 563 return 0; 564 } 565 566 static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a, 567 struct tcf_result *res) 568 { 569 struct tcf_csum *p = to_tcf_csum(a); 570 bool orig_vlan_tag_present = false; 571 unsigned int vlan_hdr_count = 0; 572 struct tcf_csum_params *params; 573 u32 update_flags; 574 __be16 protocol; 575 int action; 576 577 params = rcu_dereference_bh(p->params); 578 579 tcf_lastuse_update(&p->tcf_tm); 580 tcf_action_update_bstats(&p->common, skb); 581 582 action = READ_ONCE(p->tcf_action); 583 if (unlikely(action == TC_ACT_SHOT)) 584 goto drop; 585 586 update_flags = params->update_flags; 587 protocol = skb_protocol(skb, false); 588 again: 589 switch (protocol) { 590 case cpu_to_be16(ETH_P_IP): 591 if (!tcf_csum_ipv4(skb, update_flags)) 592 goto drop; 593 break; 594 case cpu_to_be16(ETH_P_IPV6): 595 if (!tcf_csum_ipv6(skb, update_flags)) 596 goto drop; 597 break; 598 case cpu_to_be16(ETH_P_8021AD): 599 fallthrough; 600 case cpu_to_be16(ETH_P_8021Q): 601 if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) { 602 protocol = skb->protocol; 603 orig_vlan_tag_present = true; 604 } else { 605 struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data; 606 607 protocol = vlan->h_vlan_encapsulated_proto; 608 skb_pull(skb, VLAN_HLEN); 609 skb_reset_network_header(skb); 610 vlan_hdr_count++; 611 } 612 goto again; 613 } 614 615 out: 616 /* Restore the skb for the pulled VLAN tags */ 617 while (vlan_hdr_count--) { 618 skb_push(skb, VLAN_HLEN); 619 skb_reset_network_header(skb); 620 } 621 622 return action; 623 624 drop: 625 tcf_action_inc_drop_qstats(&p->common); 626 action = TC_ACT_SHOT; 627 goto out; 628 } 629 630 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, 631 int ref) 632 { 633 unsigned char *b = skb_tail_pointer(skb); 634 struct tcf_csum *p = to_tcf_csum(a); 635 struct tcf_csum_params *params; 636 struct tc_csum opt = { 637 .index = p->tcf_index, 638 .refcnt = refcount_read(&p->tcf_refcnt) - ref, 639 .bindcnt = atomic_read(&p->tcf_bindcnt) - bind, 640 }; 641 struct tcf_t t; 642 643 spin_lock_bh(&p->tcf_lock); 644 params = rcu_dereference_protected(p->params, 645 lockdep_is_held(&p->tcf_lock)); 646 opt.action = p->tcf_action; 647 opt.update_flags = params->update_flags; 648 649 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) 650 goto nla_put_failure; 651 652 tcf_tm_dump(&t, &p->tcf_tm); 653 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD)) 654 goto nla_put_failure; 655 spin_unlock_bh(&p->tcf_lock); 656 657 return skb->len; 658 659 nla_put_failure: 660 spin_unlock_bh(&p->tcf_lock); 661 nlmsg_trim(skb, b); 662 return -1; 663 } 664 665 static void tcf_csum_cleanup(struct tc_action *a) 666 { 667 struct tcf_csum *p = to_tcf_csum(a); 668 struct tcf_csum_params *params; 669 670 params = rcu_dereference_protected(p->params, 1); 671 if (params) 672 kfree_rcu(params, rcu); 673 } 674 675 static int tcf_csum_walker(struct net *net, struct sk_buff *skb, 676 struct netlink_callback *cb, int type, 677 const struct tc_action_ops *ops, 678 struct netlink_ext_ack *extack) 679 { 680 struct tc_action_net *tn = net_generic(net, csum_net_id); 681 682 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 683 } 684 685 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index) 686 { 687 struct tc_action_net *tn = net_generic(net, csum_net_id); 688 689 return tcf_idr_search(tn, a, index); 690 } 691 692 static size_t tcf_csum_get_fill_size(const struct tc_action *act) 693 { 694 return nla_total_size(sizeof(struct tc_csum)); 695 } 696 697 static struct tc_action_ops act_csum_ops = { 698 .kind = "csum", 699 .id = TCA_ID_CSUM, 700 .owner = THIS_MODULE, 701 .act = tcf_csum_act, 702 .dump = tcf_csum_dump, 703 .init = tcf_csum_init, 704 .cleanup = tcf_csum_cleanup, 705 .walk = tcf_csum_walker, 706 .lookup = tcf_csum_search, 707 .get_fill_size = tcf_csum_get_fill_size, 708 .size = sizeof(struct tcf_csum), 709 }; 710 711 static __net_init int csum_init_net(struct net *net) 712 { 713 struct tc_action_net *tn = net_generic(net, csum_net_id); 714 715 return tc_action_net_init(net, tn, &act_csum_ops); 716 } 717 718 static void __net_exit csum_exit_net(struct list_head *net_list) 719 { 720 tc_action_net_exit(net_list, csum_net_id); 721 } 722 723 static struct pernet_operations csum_net_ops = { 724 .init = csum_init_net, 725 .exit_batch = csum_exit_net, 726 .id = &csum_net_id, 727 .size = sizeof(struct tc_action_net), 728 }; 729 730 MODULE_DESCRIPTION("Checksum updating actions"); 731 MODULE_LICENSE("GPL"); 732 733 static int __init csum_init_module(void) 734 { 735 return tcf_register_action(&act_csum_ops, &csum_net_ops); 736 } 737 738 static void __exit csum_cleanup_module(void) 739 { 740 tcf_unregister_action(&act_csum_ops, &csum_net_ops); 741 } 742 743 module_init(csum_init_module); 744 module_exit(csum_cleanup_module); 745