1 /* 2 * Checksum updating actions 3 * 4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/types.h> 14 #include <linux/init.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/spinlock.h> 18 19 #include <linux/netlink.h> 20 #include <net/netlink.h> 21 #include <linux/rtnetlink.h> 22 23 #include <linux/skbuff.h> 24 25 #include <net/ip.h> 26 #include <net/ipv6.h> 27 #include <net/icmp.h> 28 #include <linux/icmpv6.h> 29 #include <linux/igmp.h> 30 #include <net/tcp.h> 31 #include <net/udp.h> 32 #include <net/ip6_checksum.h> 33 #include <net/sctp/checksum.h> 34 35 #include <net/act_api.h> 36 #include <net/pkt_cls.h> 37 38 #include <linux/tc_act/tc_csum.h> 39 #include <net/tc_act/tc_csum.h> 40 41 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = { 42 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), }, 43 }; 44 45 static unsigned int csum_net_id; 46 static struct tc_action_ops act_csum_ops; 47 48 static int tcf_csum_init(struct net *net, struct nlattr *nla, 49 struct nlattr *est, struct tc_action **a, int ovr, 50 int bind, bool rtnl_held, struct tcf_proto *tp, 51 struct netlink_ext_ack *extack) 52 { 53 struct tc_action_net *tn = net_generic(net, csum_net_id); 54 struct tcf_csum_params *params_new; 55 struct nlattr *tb[TCA_CSUM_MAX + 1]; 56 struct tcf_chain *goto_ch = NULL; 57 struct tc_csum *parm; 58 struct tcf_csum *p; 59 int ret = 0, err; 60 61 if (nla == NULL) 62 return -EINVAL; 63 64 err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy, NULL); 65 if (err < 0) 66 return err; 67 68 if (tb[TCA_CSUM_PARMS] == NULL) 69 return -EINVAL; 70 parm = nla_data(tb[TCA_CSUM_PARMS]); 71 72 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 73 if (!err) { 74 ret = tcf_idr_create(tn, parm->index, est, a, 75 &act_csum_ops, bind, true); 76 if (ret) { 77 tcf_idr_cleanup(tn, parm->index); 78 return ret; 79 } 80 ret = ACT_P_CREATED; 81 } else if (err > 0) { 82 if (bind)/* dont override defaults */ 83 return 0; 84 if (!ovr) { 85 tcf_idr_release(*a, bind); 86 return -EEXIST; 87 } 88 } else { 89 return err; 90 } 91 92 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 93 if (err < 0) 94 goto release_idr; 95 96 p = to_tcf_csum(*a); 97 98 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); 99 if (unlikely(!params_new)) { 100 err = -ENOMEM; 101 goto put_chain; 102 } 103 params_new->update_flags = parm->update_flags; 104 105 spin_lock_bh(&p->tcf_lock); 106 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 107 rcu_swap_protected(p->params, params_new, 108 lockdep_is_held(&p->tcf_lock)); 109 spin_unlock_bh(&p->tcf_lock); 110 111 if (goto_ch) 112 tcf_chain_put_by_act(goto_ch); 113 if (params_new) 114 kfree_rcu(params_new, rcu); 115 116 if (ret == ACT_P_CREATED) 117 tcf_idr_insert(tn, *a); 118 119 return ret; 120 put_chain: 121 if (goto_ch) 122 tcf_chain_put_by_act(goto_ch); 123 release_idr: 124 tcf_idr_release(*a, bind); 125 return err; 126 } 127 128 /** 129 * tcf_csum_skb_nextlayer - Get next layer pointer 130 * @skb: sk_buff to use 131 * @ihl: previous summed headers length 132 * @ipl: complete packet length 133 * @jhl: next header length 134 * 135 * Check the expected next layer availability in the specified sk_buff. 136 * Return the next layer pointer if pass, NULL otherwise. 137 */ 138 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, 139 unsigned int ihl, unsigned int ipl, 140 unsigned int jhl) 141 { 142 int ntkoff = skb_network_offset(skb); 143 int hl = ihl + jhl; 144 145 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) || 146 skb_try_make_writable(skb, hl + ntkoff)) 147 return NULL; 148 else 149 return (void *)(skb_network_header(skb) + ihl); 150 } 151 152 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl, 153 unsigned int ipl) 154 { 155 struct icmphdr *icmph; 156 157 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph)); 158 if (icmph == NULL) 159 return 0; 160 161 icmph->checksum = 0; 162 skb->csum = csum_partial(icmph, ipl - ihl, 0); 163 icmph->checksum = csum_fold(skb->csum); 164 165 skb->ip_summed = CHECKSUM_NONE; 166 167 return 1; 168 } 169 170 static int tcf_csum_ipv4_igmp(struct sk_buff *skb, 171 unsigned int ihl, unsigned int ipl) 172 { 173 struct igmphdr *igmph; 174 175 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph)); 176 if (igmph == NULL) 177 return 0; 178 179 igmph->csum = 0; 180 skb->csum = csum_partial(igmph, ipl - ihl, 0); 181 igmph->csum = csum_fold(skb->csum); 182 183 skb->ip_summed = CHECKSUM_NONE; 184 185 return 1; 186 } 187 188 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl, 189 unsigned int ipl) 190 { 191 struct icmp6hdr *icmp6h; 192 const struct ipv6hdr *ip6h; 193 194 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h)); 195 if (icmp6h == NULL) 196 return 0; 197 198 ip6h = ipv6_hdr(skb); 199 icmp6h->icmp6_cksum = 0; 200 skb->csum = csum_partial(icmp6h, ipl - ihl, 0); 201 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 202 ipl - ihl, IPPROTO_ICMPV6, 203 skb->csum); 204 205 skb->ip_summed = CHECKSUM_NONE; 206 207 return 1; 208 } 209 210 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl, 211 unsigned int ipl) 212 { 213 struct tcphdr *tcph; 214 const struct iphdr *iph; 215 216 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 217 return 1; 218 219 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); 220 if (tcph == NULL) 221 return 0; 222 223 iph = ip_hdr(skb); 224 tcph->check = 0; 225 skb->csum = csum_partial(tcph, ipl - ihl, 0); 226 tcph->check = tcp_v4_check(ipl - ihl, 227 iph->saddr, iph->daddr, skb->csum); 228 229 skb->ip_summed = CHECKSUM_NONE; 230 231 return 1; 232 } 233 234 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl, 235 unsigned int ipl) 236 { 237 struct tcphdr *tcph; 238 const struct ipv6hdr *ip6h; 239 240 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 241 return 1; 242 243 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); 244 if (tcph == NULL) 245 return 0; 246 247 ip6h = ipv6_hdr(skb); 248 tcph->check = 0; 249 skb->csum = csum_partial(tcph, ipl - ihl, 0); 250 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 251 ipl - ihl, IPPROTO_TCP, 252 skb->csum); 253 254 skb->ip_summed = CHECKSUM_NONE; 255 256 return 1; 257 } 258 259 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, 260 unsigned int ipl, int udplite) 261 { 262 struct udphdr *udph; 263 const struct iphdr *iph; 264 u16 ul; 265 266 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 267 return 1; 268 269 /* 270 * Support both UDP and UDPLITE checksum algorithms, Don't use 271 * udph->len to get the real length without any protocol check, 272 * UDPLITE uses udph->len for another thing, 273 * Use iph->tot_len, or just ipl. 274 */ 275 276 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); 277 if (udph == NULL) 278 return 0; 279 280 iph = ip_hdr(skb); 281 ul = ntohs(udph->len); 282 283 if (udplite || udph->check) { 284 285 udph->check = 0; 286 287 if (udplite) { 288 if (ul == 0) 289 skb->csum = csum_partial(udph, ipl - ihl, 0); 290 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) 291 skb->csum = csum_partial(udph, ul, 0); 292 else 293 goto ignore_obscure_skb; 294 } else { 295 if (ul != ipl - ihl) 296 goto ignore_obscure_skb; 297 298 skb->csum = csum_partial(udph, ul, 0); 299 } 300 301 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 302 ul, iph->protocol, 303 skb->csum); 304 305 if (!udph->check) 306 udph->check = CSUM_MANGLED_0; 307 } 308 309 skb->ip_summed = CHECKSUM_NONE; 310 311 ignore_obscure_skb: 312 return 1; 313 } 314 315 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, 316 unsigned int ipl, int udplite) 317 { 318 struct udphdr *udph; 319 const struct ipv6hdr *ip6h; 320 u16 ul; 321 322 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 323 return 1; 324 325 /* 326 * Support both UDP and UDPLITE checksum algorithms, Don't use 327 * udph->len to get the real length without any protocol check, 328 * UDPLITE uses udph->len for another thing, 329 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl. 330 */ 331 332 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); 333 if (udph == NULL) 334 return 0; 335 336 ip6h = ipv6_hdr(skb); 337 ul = ntohs(udph->len); 338 339 udph->check = 0; 340 341 if (udplite) { 342 if (ul == 0) 343 skb->csum = csum_partial(udph, ipl - ihl, 0); 344 345 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) 346 skb->csum = csum_partial(udph, ul, 0); 347 348 else 349 goto ignore_obscure_skb; 350 } else { 351 if (ul != ipl - ihl) 352 goto ignore_obscure_skb; 353 354 skb->csum = csum_partial(udph, ul, 0); 355 } 356 357 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul, 358 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP, 359 skb->csum); 360 361 if (!udph->check) 362 udph->check = CSUM_MANGLED_0; 363 364 skb->ip_summed = CHECKSUM_NONE; 365 366 ignore_obscure_skb: 367 return 1; 368 } 369 370 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl, 371 unsigned int ipl) 372 { 373 struct sctphdr *sctph; 374 375 if (skb_is_gso(skb) && skb_is_gso_sctp(skb)) 376 return 1; 377 378 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph)); 379 if (!sctph) 380 return 0; 381 382 sctph->checksum = sctp_compute_cksum(skb, 383 skb_network_offset(skb) + ihl); 384 skb->ip_summed = CHECKSUM_NONE; 385 skb->csum_not_inet = 0; 386 387 return 1; 388 } 389 390 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) 391 { 392 const struct iphdr *iph; 393 int ntkoff; 394 395 ntkoff = skb_network_offset(skb); 396 397 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff)) 398 goto fail; 399 400 iph = ip_hdr(skb); 401 402 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { 403 case IPPROTO_ICMP: 404 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) 405 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4, 406 ntohs(iph->tot_len))) 407 goto fail; 408 break; 409 case IPPROTO_IGMP: 410 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP) 411 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4, 412 ntohs(iph->tot_len))) 413 goto fail; 414 break; 415 case IPPROTO_TCP: 416 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) 417 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4, 418 ntohs(iph->tot_len))) 419 goto fail; 420 break; 421 case IPPROTO_UDP: 422 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) 423 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, 424 ntohs(iph->tot_len), 0)) 425 goto fail; 426 break; 427 case IPPROTO_UDPLITE: 428 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) 429 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, 430 ntohs(iph->tot_len), 1)) 431 goto fail; 432 break; 433 case IPPROTO_SCTP: 434 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && 435 !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len))) 436 goto fail; 437 break; 438 } 439 440 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { 441 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff)) 442 goto fail; 443 444 ip_send_check(ip_hdr(skb)); 445 } 446 447 return 1; 448 449 fail: 450 return 0; 451 } 452 453 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl, 454 unsigned int *pl) 455 { 456 int off, len, optlen; 457 unsigned char *xh = (void *)ip6xh; 458 459 off = sizeof(*ip6xh); 460 len = ixhl - off; 461 462 while (len > 1) { 463 switch (xh[off]) { 464 case IPV6_TLV_PAD1: 465 optlen = 1; 466 break; 467 case IPV6_TLV_JUMBO: 468 optlen = xh[off + 1] + 2; 469 if (optlen != 6 || len < 6 || (off & 3) != 2) 470 /* wrong jumbo option length/alignment */ 471 return 0; 472 *pl = ntohl(*(__be32 *)(xh + off + 2)); 473 goto done; 474 default: 475 optlen = xh[off + 1] + 2; 476 if (optlen > len) 477 /* ignore obscure options */ 478 goto done; 479 break; 480 } 481 off += optlen; 482 len -= optlen; 483 } 484 485 done: 486 return 1; 487 } 488 489 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags) 490 { 491 struct ipv6hdr *ip6h; 492 struct ipv6_opt_hdr *ip6xh; 493 unsigned int hl, ixhl; 494 unsigned int pl; 495 int ntkoff; 496 u8 nexthdr; 497 498 ntkoff = skb_network_offset(skb); 499 500 hl = sizeof(*ip6h); 501 502 if (!pskb_may_pull(skb, hl + ntkoff)) 503 goto fail; 504 505 ip6h = ipv6_hdr(skb); 506 507 pl = ntohs(ip6h->payload_len); 508 nexthdr = ip6h->nexthdr; 509 510 do { 511 switch (nexthdr) { 512 case NEXTHDR_FRAGMENT: 513 goto ignore_skb; 514 case NEXTHDR_ROUTING: 515 case NEXTHDR_HOP: 516 case NEXTHDR_DEST: 517 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff)) 518 goto fail; 519 ip6xh = (void *)(skb_network_header(skb) + hl); 520 ixhl = ipv6_optlen(ip6xh); 521 if (!pskb_may_pull(skb, hl + ixhl + ntkoff)) 522 goto fail; 523 ip6xh = (void *)(skb_network_header(skb) + hl); 524 if ((nexthdr == NEXTHDR_HOP) && 525 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl))) 526 goto fail; 527 nexthdr = ip6xh->nexthdr; 528 hl += ixhl; 529 break; 530 case IPPROTO_ICMPV6: 531 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) 532 if (!tcf_csum_ipv6_icmp(skb, 533 hl, pl + sizeof(*ip6h))) 534 goto fail; 535 goto done; 536 case IPPROTO_TCP: 537 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) 538 if (!tcf_csum_ipv6_tcp(skb, 539 hl, pl + sizeof(*ip6h))) 540 goto fail; 541 goto done; 542 case IPPROTO_UDP: 543 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) 544 if (!tcf_csum_ipv6_udp(skb, hl, 545 pl + sizeof(*ip6h), 0)) 546 goto fail; 547 goto done; 548 case IPPROTO_UDPLITE: 549 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) 550 if (!tcf_csum_ipv6_udp(skb, hl, 551 pl + sizeof(*ip6h), 1)) 552 goto fail; 553 goto done; 554 case IPPROTO_SCTP: 555 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && 556 !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h))) 557 goto fail; 558 goto done; 559 default: 560 goto ignore_skb; 561 } 562 } while (pskb_may_pull(skb, hl + 1 + ntkoff)); 563 564 done: 565 ignore_skb: 566 return 1; 567 568 fail: 569 return 0; 570 } 571 572 static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a, 573 struct tcf_result *res) 574 { 575 struct tcf_csum *p = to_tcf_csum(a); 576 bool orig_vlan_tag_present = false; 577 unsigned int vlan_hdr_count = 0; 578 struct tcf_csum_params *params; 579 u32 update_flags; 580 __be16 protocol; 581 int action; 582 583 params = rcu_dereference_bh(p->params); 584 585 tcf_lastuse_update(&p->tcf_tm); 586 bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb); 587 588 action = READ_ONCE(p->tcf_action); 589 if (unlikely(action == TC_ACT_SHOT)) 590 goto drop; 591 592 update_flags = params->update_flags; 593 protocol = tc_skb_protocol(skb); 594 again: 595 switch (protocol) { 596 case cpu_to_be16(ETH_P_IP): 597 if (!tcf_csum_ipv4(skb, update_flags)) 598 goto drop; 599 break; 600 case cpu_to_be16(ETH_P_IPV6): 601 if (!tcf_csum_ipv6(skb, update_flags)) 602 goto drop; 603 break; 604 case cpu_to_be16(ETH_P_8021AD): /* fall through */ 605 case cpu_to_be16(ETH_P_8021Q): 606 if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) { 607 protocol = skb->protocol; 608 orig_vlan_tag_present = true; 609 } else { 610 struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data; 611 612 protocol = vlan->h_vlan_encapsulated_proto; 613 skb_pull(skb, VLAN_HLEN); 614 skb_reset_network_header(skb); 615 vlan_hdr_count++; 616 } 617 goto again; 618 } 619 620 out: 621 /* Restore the skb for the pulled VLAN tags */ 622 while (vlan_hdr_count--) { 623 skb_push(skb, VLAN_HLEN); 624 skb_reset_network_header(skb); 625 } 626 627 return action; 628 629 drop: 630 qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats)); 631 action = TC_ACT_SHOT; 632 goto out; 633 } 634 635 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, 636 int ref) 637 { 638 unsigned char *b = skb_tail_pointer(skb); 639 struct tcf_csum *p = to_tcf_csum(a); 640 struct tcf_csum_params *params; 641 struct tc_csum opt = { 642 .index = p->tcf_index, 643 .refcnt = refcount_read(&p->tcf_refcnt) - ref, 644 .bindcnt = atomic_read(&p->tcf_bindcnt) - bind, 645 }; 646 struct tcf_t t; 647 648 spin_lock_bh(&p->tcf_lock); 649 params = rcu_dereference_protected(p->params, 650 lockdep_is_held(&p->tcf_lock)); 651 opt.action = p->tcf_action; 652 opt.update_flags = params->update_flags; 653 654 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) 655 goto nla_put_failure; 656 657 tcf_tm_dump(&t, &p->tcf_tm); 658 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD)) 659 goto nla_put_failure; 660 spin_unlock_bh(&p->tcf_lock); 661 662 return skb->len; 663 664 nla_put_failure: 665 spin_unlock_bh(&p->tcf_lock); 666 nlmsg_trim(skb, b); 667 return -1; 668 } 669 670 static void tcf_csum_cleanup(struct tc_action *a) 671 { 672 struct tcf_csum *p = to_tcf_csum(a); 673 struct tcf_csum_params *params; 674 675 params = rcu_dereference_protected(p->params, 1); 676 if (params) 677 kfree_rcu(params, rcu); 678 } 679 680 static int tcf_csum_walker(struct net *net, struct sk_buff *skb, 681 struct netlink_callback *cb, int type, 682 const struct tc_action_ops *ops, 683 struct netlink_ext_ack *extack) 684 { 685 struct tc_action_net *tn = net_generic(net, csum_net_id); 686 687 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 688 } 689 690 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index) 691 { 692 struct tc_action_net *tn = net_generic(net, csum_net_id); 693 694 return tcf_idr_search(tn, a, index); 695 } 696 697 static size_t tcf_csum_get_fill_size(const struct tc_action *act) 698 { 699 return nla_total_size(sizeof(struct tc_csum)); 700 } 701 702 static struct tc_action_ops act_csum_ops = { 703 .kind = "csum", 704 .id = TCA_ID_CSUM, 705 .owner = THIS_MODULE, 706 .act = tcf_csum_act, 707 .dump = tcf_csum_dump, 708 .init = tcf_csum_init, 709 .cleanup = tcf_csum_cleanup, 710 .walk = tcf_csum_walker, 711 .lookup = tcf_csum_search, 712 .get_fill_size = tcf_csum_get_fill_size, 713 .size = sizeof(struct tcf_csum), 714 }; 715 716 static __net_init int csum_init_net(struct net *net) 717 { 718 struct tc_action_net *tn = net_generic(net, csum_net_id); 719 720 return tc_action_net_init(tn, &act_csum_ops); 721 } 722 723 static void __net_exit csum_exit_net(struct list_head *net_list) 724 { 725 tc_action_net_exit(net_list, csum_net_id); 726 } 727 728 static struct pernet_operations csum_net_ops = { 729 .init = csum_init_net, 730 .exit_batch = csum_exit_net, 731 .id = &csum_net_id, 732 .size = sizeof(struct tc_action_net), 733 }; 734 735 MODULE_DESCRIPTION("Checksum updating actions"); 736 MODULE_LICENSE("GPL"); 737 738 static int __init csum_init_module(void) 739 { 740 return tcf_register_action(&act_csum_ops, &csum_net_ops); 741 } 742 743 static void __exit csum_cleanup_module(void) 744 { 745 tcf_unregister_action(&act_csum_ops, &csum_net_ops); 746 } 747 748 module_init(csum_init_module); 749 module_exit(csum_cleanup_module); 750