1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Checksum updating actions 4 * 5 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/init.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/spinlock.h> 13 14 #include <linux/netlink.h> 15 #include <net/netlink.h> 16 #include <linux/rtnetlink.h> 17 18 #include <linux/skbuff.h> 19 20 #include <net/ip.h> 21 #include <net/ipv6.h> 22 #include <net/icmp.h> 23 #include <linux/icmpv6.h> 24 #include <linux/igmp.h> 25 #include <net/tcp.h> 26 #include <net/udp.h> 27 #include <net/ip6_checksum.h> 28 #include <net/sctp/checksum.h> 29 30 #include <net/act_api.h> 31 #include <net/pkt_cls.h> 32 33 #include <linux/tc_act/tc_csum.h> 34 #include <net/tc_act/tc_csum.h> 35 36 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = { 37 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), }, 38 }; 39 40 static unsigned int csum_net_id; 41 static struct tc_action_ops act_csum_ops; 42 43 static int tcf_csum_init(struct net *net, struct nlattr *nla, 44 struct nlattr *est, struct tc_action **a, int ovr, 45 int bind, bool rtnl_held, struct tcf_proto *tp, 46 struct netlink_ext_ack *extack) 47 { 48 struct tc_action_net *tn = net_generic(net, csum_net_id); 49 struct tcf_csum_params *params_new; 50 struct nlattr *tb[TCA_CSUM_MAX + 1]; 51 struct tcf_chain *goto_ch = NULL; 52 struct tc_csum *parm; 53 struct tcf_csum *p; 54 int ret = 0, err; 55 56 if (nla == NULL) 57 return -EINVAL; 58 59 err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, csum_policy, 60 NULL); 61 if (err < 0) 62 return err; 63 64 if (tb[TCA_CSUM_PARMS] == NULL) 65 return -EINVAL; 66 parm = nla_data(tb[TCA_CSUM_PARMS]); 67 68 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 69 if (!err) { 70 ret = tcf_idr_create(tn, parm->index, est, a, 71 &act_csum_ops, bind, true); 72 if (ret) { 73 tcf_idr_cleanup(tn, parm->index); 74 return ret; 75 } 76 ret = ACT_P_CREATED; 77 } else if (err > 0) { 78 if (bind)/* dont override defaults */ 79 return 0; 80 if (!ovr) { 81 tcf_idr_release(*a, bind); 82 return -EEXIST; 83 } 84 } else { 85 return err; 86 } 87 88 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 89 if (err < 0) 90 goto release_idr; 91 92 p = to_tcf_csum(*a); 93 94 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); 95 if (unlikely(!params_new)) { 96 err = -ENOMEM; 97 goto put_chain; 98 } 99 params_new->update_flags = parm->update_flags; 100 101 spin_lock_bh(&p->tcf_lock); 102 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 103 rcu_swap_protected(p->params, params_new, 104 lockdep_is_held(&p->tcf_lock)); 105 spin_unlock_bh(&p->tcf_lock); 106 107 if (goto_ch) 108 tcf_chain_put_by_act(goto_ch); 109 if (params_new) 110 kfree_rcu(params_new, rcu); 111 112 if (ret == ACT_P_CREATED) 113 tcf_idr_insert(tn, *a); 114 115 return ret; 116 put_chain: 117 if (goto_ch) 118 tcf_chain_put_by_act(goto_ch); 119 release_idr: 120 tcf_idr_release(*a, bind); 121 return err; 122 } 123 124 /** 125 * tcf_csum_skb_nextlayer - Get next layer pointer 126 * @skb: sk_buff to use 127 * @ihl: previous summed headers length 128 * @ipl: complete packet length 129 * @jhl: next header length 130 * 131 * Check the expected next layer availability in the specified sk_buff. 132 * Return the next layer pointer if pass, NULL otherwise. 133 */ 134 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, 135 unsigned int ihl, unsigned int ipl, 136 unsigned int jhl) 137 { 138 int ntkoff = skb_network_offset(skb); 139 int hl = ihl + jhl; 140 141 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) || 142 skb_try_make_writable(skb, hl + ntkoff)) 143 return NULL; 144 else 145 return (void *)(skb_network_header(skb) + ihl); 146 } 147 148 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl, 149 unsigned int ipl) 150 { 151 struct icmphdr *icmph; 152 153 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph)); 154 if (icmph == NULL) 155 return 0; 156 157 icmph->checksum = 0; 158 skb->csum = csum_partial(icmph, ipl - ihl, 0); 159 icmph->checksum = csum_fold(skb->csum); 160 161 skb->ip_summed = CHECKSUM_NONE; 162 163 return 1; 164 } 165 166 static int tcf_csum_ipv4_igmp(struct sk_buff *skb, 167 unsigned int ihl, unsigned int ipl) 168 { 169 struct igmphdr *igmph; 170 171 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph)); 172 if (igmph == NULL) 173 return 0; 174 175 igmph->csum = 0; 176 skb->csum = csum_partial(igmph, ipl - ihl, 0); 177 igmph->csum = csum_fold(skb->csum); 178 179 skb->ip_summed = CHECKSUM_NONE; 180 181 return 1; 182 } 183 184 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl, 185 unsigned int ipl) 186 { 187 struct icmp6hdr *icmp6h; 188 const struct ipv6hdr *ip6h; 189 190 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h)); 191 if (icmp6h == NULL) 192 return 0; 193 194 ip6h = ipv6_hdr(skb); 195 icmp6h->icmp6_cksum = 0; 196 skb->csum = csum_partial(icmp6h, ipl - ihl, 0); 197 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 198 ipl - ihl, IPPROTO_ICMPV6, 199 skb->csum); 200 201 skb->ip_summed = CHECKSUM_NONE; 202 203 return 1; 204 } 205 206 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl, 207 unsigned int ipl) 208 { 209 struct tcphdr *tcph; 210 const struct iphdr *iph; 211 212 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 213 return 1; 214 215 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); 216 if (tcph == NULL) 217 return 0; 218 219 iph = ip_hdr(skb); 220 tcph->check = 0; 221 skb->csum = csum_partial(tcph, ipl - ihl, 0); 222 tcph->check = tcp_v4_check(ipl - ihl, 223 iph->saddr, iph->daddr, skb->csum); 224 225 skb->ip_summed = CHECKSUM_NONE; 226 227 return 1; 228 } 229 230 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl, 231 unsigned int ipl) 232 { 233 struct tcphdr *tcph; 234 const struct ipv6hdr *ip6h; 235 236 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 237 return 1; 238 239 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); 240 if (tcph == NULL) 241 return 0; 242 243 ip6h = ipv6_hdr(skb); 244 tcph->check = 0; 245 skb->csum = csum_partial(tcph, ipl - ihl, 0); 246 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 247 ipl - ihl, IPPROTO_TCP, 248 skb->csum); 249 250 skb->ip_summed = CHECKSUM_NONE; 251 252 return 1; 253 } 254 255 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, 256 unsigned int ipl, int udplite) 257 { 258 struct udphdr *udph; 259 const struct iphdr *iph; 260 u16 ul; 261 262 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 263 return 1; 264 265 /* 266 * Support both UDP and UDPLITE checksum algorithms, Don't use 267 * udph->len to get the real length without any protocol check, 268 * UDPLITE uses udph->len for another thing, 269 * Use iph->tot_len, or just ipl. 270 */ 271 272 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); 273 if (udph == NULL) 274 return 0; 275 276 iph = ip_hdr(skb); 277 ul = ntohs(udph->len); 278 279 if (udplite || udph->check) { 280 281 udph->check = 0; 282 283 if (udplite) { 284 if (ul == 0) 285 skb->csum = csum_partial(udph, ipl - ihl, 0); 286 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) 287 skb->csum = csum_partial(udph, ul, 0); 288 else 289 goto ignore_obscure_skb; 290 } else { 291 if (ul != ipl - ihl) 292 goto ignore_obscure_skb; 293 294 skb->csum = csum_partial(udph, ul, 0); 295 } 296 297 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 298 ul, iph->protocol, 299 skb->csum); 300 301 if (!udph->check) 302 udph->check = CSUM_MANGLED_0; 303 } 304 305 skb->ip_summed = CHECKSUM_NONE; 306 307 ignore_obscure_skb: 308 return 1; 309 } 310 311 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, 312 unsigned int ipl, int udplite) 313 { 314 struct udphdr *udph; 315 const struct ipv6hdr *ip6h; 316 u16 ul; 317 318 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 319 return 1; 320 321 /* 322 * Support both UDP and UDPLITE checksum algorithms, Don't use 323 * udph->len to get the real length without any protocol check, 324 * UDPLITE uses udph->len for another thing, 325 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl. 326 */ 327 328 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); 329 if (udph == NULL) 330 return 0; 331 332 ip6h = ipv6_hdr(skb); 333 ul = ntohs(udph->len); 334 335 udph->check = 0; 336 337 if (udplite) { 338 if (ul == 0) 339 skb->csum = csum_partial(udph, ipl - ihl, 0); 340 341 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) 342 skb->csum = csum_partial(udph, ul, 0); 343 344 else 345 goto ignore_obscure_skb; 346 } else { 347 if (ul != ipl - ihl) 348 goto ignore_obscure_skb; 349 350 skb->csum = csum_partial(udph, ul, 0); 351 } 352 353 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul, 354 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP, 355 skb->csum); 356 357 if (!udph->check) 358 udph->check = CSUM_MANGLED_0; 359 360 skb->ip_summed = CHECKSUM_NONE; 361 362 ignore_obscure_skb: 363 return 1; 364 } 365 366 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl, 367 unsigned int ipl) 368 { 369 struct sctphdr *sctph; 370 371 if (skb_is_gso(skb) && skb_is_gso_sctp(skb)) 372 return 1; 373 374 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph)); 375 if (!sctph) 376 return 0; 377 378 sctph->checksum = sctp_compute_cksum(skb, 379 skb_network_offset(skb) + ihl); 380 skb->ip_summed = CHECKSUM_NONE; 381 skb->csum_not_inet = 0; 382 383 return 1; 384 } 385 386 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) 387 { 388 const struct iphdr *iph; 389 int ntkoff; 390 391 ntkoff = skb_network_offset(skb); 392 393 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff)) 394 goto fail; 395 396 iph = ip_hdr(skb); 397 398 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { 399 case IPPROTO_ICMP: 400 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) 401 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4, 402 ntohs(iph->tot_len))) 403 goto fail; 404 break; 405 case IPPROTO_IGMP: 406 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP) 407 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4, 408 ntohs(iph->tot_len))) 409 goto fail; 410 break; 411 case IPPROTO_TCP: 412 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) 413 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4, 414 ntohs(iph->tot_len))) 415 goto fail; 416 break; 417 case IPPROTO_UDP: 418 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) 419 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, 420 ntohs(iph->tot_len), 0)) 421 goto fail; 422 break; 423 case IPPROTO_UDPLITE: 424 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) 425 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, 426 ntohs(iph->tot_len), 1)) 427 goto fail; 428 break; 429 case IPPROTO_SCTP: 430 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && 431 !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len))) 432 goto fail; 433 break; 434 } 435 436 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { 437 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff)) 438 goto fail; 439 440 ip_send_check(ip_hdr(skb)); 441 } 442 443 return 1; 444 445 fail: 446 return 0; 447 } 448 449 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl, 450 unsigned int *pl) 451 { 452 int off, len, optlen; 453 unsigned char *xh = (void *)ip6xh; 454 455 off = sizeof(*ip6xh); 456 len = ixhl - off; 457 458 while (len > 1) { 459 switch (xh[off]) { 460 case IPV6_TLV_PAD1: 461 optlen = 1; 462 break; 463 case IPV6_TLV_JUMBO: 464 optlen = xh[off + 1] + 2; 465 if (optlen != 6 || len < 6 || (off & 3) != 2) 466 /* wrong jumbo option length/alignment */ 467 return 0; 468 *pl = ntohl(*(__be32 *)(xh + off + 2)); 469 goto done; 470 default: 471 optlen = xh[off + 1] + 2; 472 if (optlen > len) 473 /* ignore obscure options */ 474 goto done; 475 break; 476 } 477 off += optlen; 478 len -= optlen; 479 } 480 481 done: 482 return 1; 483 } 484 485 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags) 486 { 487 struct ipv6hdr *ip6h; 488 struct ipv6_opt_hdr *ip6xh; 489 unsigned int hl, ixhl; 490 unsigned int pl; 491 int ntkoff; 492 u8 nexthdr; 493 494 ntkoff = skb_network_offset(skb); 495 496 hl = sizeof(*ip6h); 497 498 if (!pskb_may_pull(skb, hl + ntkoff)) 499 goto fail; 500 501 ip6h = ipv6_hdr(skb); 502 503 pl = ntohs(ip6h->payload_len); 504 nexthdr = ip6h->nexthdr; 505 506 do { 507 switch (nexthdr) { 508 case NEXTHDR_FRAGMENT: 509 goto ignore_skb; 510 case NEXTHDR_ROUTING: 511 case NEXTHDR_HOP: 512 case NEXTHDR_DEST: 513 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff)) 514 goto fail; 515 ip6xh = (void *)(skb_network_header(skb) + hl); 516 ixhl = ipv6_optlen(ip6xh); 517 if (!pskb_may_pull(skb, hl + ixhl + ntkoff)) 518 goto fail; 519 ip6xh = (void *)(skb_network_header(skb) + hl); 520 if ((nexthdr == NEXTHDR_HOP) && 521 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl))) 522 goto fail; 523 nexthdr = ip6xh->nexthdr; 524 hl += ixhl; 525 break; 526 case IPPROTO_ICMPV6: 527 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) 528 if (!tcf_csum_ipv6_icmp(skb, 529 hl, pl + sizeof(*ip6h))) 530 goto fail; 531 goto done; 532 case IPPROTO_TCP: 533 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) 534 if (!tcf_csum_ipv6_tcp(skb, 535 hl, pl + sizeof(*ip6h))) 536 goto fail; 537 goto done; 538 case IPPROTO_UDP: 539 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) 540 if (!tcf_csum_ipv6_udp(skb, hl, 541 pl + sizeof(*ip6h), 0)) 542 goto fail; 543 goto done; 544 case IPPROTO_UDPLITE: 545 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) 546 if (!tcf_csum_ipv6_udp(skb, hl, 547 pl + sizeof(*ip6h), 1)) 548 goto fail; 549 goto done; 550 case IPPROTO_SCTP: 551 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && 552 !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h))) 553 goto fail; 554 goto done; 555 default: 556 goto ignore_skb; 557 } 558 } while (pskb_may_pull(skb, hl + 1 + ntkoff)); 559 560 done: 561 ignore_skb: 562 return 1; 563 564 fail: 565 return 0; 566 } 567 568 static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a, 569 struct tcf_result *res) 570 { 571 struct tcf_csum *p = to_tcf_csum(a); 572 bool orig_vlan_tag_present = false; 573 unsigned int vlan_hdr_count = 0; 574 struct tcf_csum_params *params; 575 u32 update_flags; 576 __be16 protocol; 577 int action; 578 579 params = rcu_dereference_bh(p->params); 580 581 tcf_lastuse_update(&p->tcf_tm); 582 bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb); 583 584 action = READ_ONCE(p->tcf_action); 585 if (unlikely(action == TC_ACT_SHOT)) 586 goto drop; 587 588 update_flags = params->update_flags; 589 protocol = tc_skb_protocol(skb); 590 again: 591 switch (protocol) { 592 case cpu_to_be16(ETH_P_IP): 593 if (!tcf_csum_ipv4(skb, update_flags)) 594 goto drop; 595 break; 596 case cpu_to_be16(ETH_P_IPV6): 597 if (!tcf_csum_ipv6(skb, update_flags)) 598 goto drop; 599 break; 600 case cpu_to_be16(ETH_P_8021AD): /* fall through */ 601 case cpu_to_be16(ETH_P_8021Q): 602 if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) { 603 protocol = skb->protocol; 604 orig_vlan_tag_present = true; 605 } else { 606 struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data; 607 608 protocol = vlan->h_vlan_encapsulated_proto; 609 skb_pull(skb, VLAN_HLEN); 610 skb_reset_network_header(skb); 611 vlan_hdr_count++; 612 } 613 goto again; 614 } 615 616 out: 617 /* Restore the skb for the pulled VLAN tags */ 618 while (vlan_hdr_count--) { 619 skb_push(skb, VLAN_HLEN); 620 skb_reset_network_header(skb); 621 } 622 623 return action; 624 625 drop: 626 qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats)); 627 action = TC_ACT_SHOT; 628 goto out; 629 } 630 631 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, 632 int ref) 633 { 634 unsigned char *b = skb_tail_pointer(skb); 635 struct tcf_csum *p = to_tcf_csum(a); 636 struct tcf_csum_params *params; 637 struct tc_csum opt = { 638 .index = p->tcf_index, 639 .refcnt = refcount_read(&p->tcf_refcnt) - ref, 640 .bindcnt = atomic_read(&p->tcf_bindcnt) - bind, 641 }; 642 struct tcf_t t; 643 644 spin_lock_bh(&p->tcf_lock); 645 params = rcu_dereference_protected(p->params, 646 lockdep_is_held(&p->tcf_lock)); 647 opt.action = p->tcf_action; 648 opt.update_flags = params->update_flags; 649 650 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) 651 goto nla_put_failure; 652 653 tcf_tm_dump(&t, &p->tcf_tm); 654 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD)) 655 goto nla_put_failure; 656 spin_unlock_bh(&p->tcf_lock); 657 658 return skb->len; 659 660 nla_put_failure: 661 spin_unlock_bh(&p->tcf_lock); 662 nlmsg_trim(skb, b); 663 return -1; 664 } 665 666 static void tcf_csum_cleanup(struct tc_action *a) 667 { 668 struct tcf_csum *p = to_tcf_csum(a); 669 struct tcf_csum_params *params; 670 671 params = rcu_dereference_protected(p->params, 1); 672 if (params) 673 kfree_rcu(params, rcu); 674 } 675 676 static int tcf_csum_walker(struct net *net, struct sk_buff *skb, 677 struct netlink_callback *cb, int type, 678 const struct tc_action_ops *ops, 679 struct netlink_ext_ack *extack) 680 { 681 struct tc_action_net *tn = net_generic(net, csum_net_id); 682 683 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 684 } 685 686 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index) 687 { 688 struct tc_action_net *tn = net_generic(net, csum_net_id); 689 690 return tcf_idr_search(tn, a, index); 691 } 692 693 static size_t tcf_csum_get_fill_size(const struct tc_action *act) 694 { 695 return nla_total_size(sizeof(struct tc_csum)); 696 } 697 698 static struct tc_action_ops act_csum_ops = { 699 .kind = "csum", 700 .id = TCA_ID_CSUM, 701 .owner = THIS_MODULE, 702 .act = tcf_csum_act, 703 .dump = tcf_csum_dump, 704 .init = tcf_csum_init, 705 .cleanup = tcf_csum_cleanup, 706 .walk = tcf_csum_walker, 707 .lookup = tcf_csum_search, 708 .get_fill_size = tcf_csum_get_fill_size, 709 .size = sizeof(struct tcf_csum), 710 }; 711 712 static __net_init int csum_init_net(struct net *net) 713 { 714 struct tc_action_net *tn = net_generic(net, csum_net_id); 715 716 return tc_action_net_init(tn, &act_csum_ops); 717 } 718 719 static void __net_exit csum_exit_net(struct list_head *net_list) 720 { 721 tc_action_net_exit(net_list, csum_net_id); 722 } 723 724 static struct pernet_operations csum_net_ops = { 725 .init = csum_init_net, 726 .exit_batch = csum_exit_net, 727 .id = &csum_net_id, 728 .size = sizeof(struct tc_action_net), 729 }; 730 731 MODULE_DESCRIPTION("Checksum updating actions"); 732 MODULE_LICENSE("GPL"); 733 734 static int __init csum_init_module(void) 735 { 736 return tcf_register_action(&act_csum_ops, &csum_net_ops); 737 } 738 739 static void __exit csum_cleanup_module(void) 740 { 741 tcf_unregister_action(&act_csum_ops, &csum_net_ops); 742 } 743 744 module_init(csum_init_module); 745 module_exit(csum_cleanup_module); 746