1 /* 2 * Checksum updating actions 3 * 4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/types.h> 14 #include <linux/init.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/spinlock.h> 18 19 #include <linux/netlink.h> 20 #include <net/netlink.h> 21 #include <linux/rtnetlink.h> 22 23 #include <linux/skbuff.h> 24 25 #include <net/ip.h> 26 #include <net/ipv6.h> 27 #include <net/icmp.h> 28 #include <linux/icmpv6.h> 29 #include <linux/igmp.h> 30 #include <net/tcp.h> 31 #include <net/udp.h> 32 #include <net/ip6_checksum.h> 33 #include <net/sctp/checksum.h> 34 35 #include <net/act_api.h> 36 37 #include <linux/tc_act/tc_csum.h> 38 #include <net/tc_act/tc_csum.h> 39 40 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = { 41 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), }, 42 }; 43 44 static unsigned int csum_net_id; 45 static struct tc_action_ops act_csum_ops; 46 47 static int tcf_csum_init(struct net *net, struct nlattr *nla, 48 struct nlattr *est, struct tc_action **a, int ovr, 49 int bind, bool rtnl_held, 50 struct netlink_ext_ack *extack) 51 { 52 struct tc_action_net *tn = net_generic(net, csum_net_id); 53 struct tcf_csum_params *params_new; 54 struct nlattr *tb[TCA_CSUM_MAX + 1]; 55 struct tc_csum *parm; 56 struct tcf_csum *p; 57 int ret = 0, err; 58 59 if (nla == NULL) 60 return -EINVAL; 61 62 err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy, NULL); 63 if (err < 0) 64 return err; 65 66 if (tb[TCA_CSUM_PARMS] == NULL) 67 return -EINVAL; 68 parm = nla_data(tb[TCA_CSUM_PARMS]); 69 70 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 71 if (!err) { 72 ret = tcf_idr_create(tn, parm->index, est, a, 73 &act_csum_ops, bind, true); 74 if (ret) { 75 tcf_idr_cleanup(tn, parm->index); 76 return ret; 77 } 78 ret = ACT_P_CREATED; 79 } else if (err > 0) { 80 if (bind)/* dont override defaults */ 81 return 0; 82 if (!ovr) { 83 tcf_idr_release(*a, bind); 84 return -EEXIST; 85 } 86 } else { 87 return err; 88 } 89 90 p = to_tcf_csum(*a); 91 92 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); 93 if (unlikely(!params_new)) { 94 tcf_idr_release(*a, bind); 95 return -ENOMEM; 96 } 97 params_new->update_flags = parm->update_flags; 98 99 spin_lock_bh(&p->tcf_lock); 100 p->tcf_action = parm->action; 101 rcu_swap_protected(p->params, params_new, 102 lockdep_is_held(&p->tcf_lock)); 103 spin_unlock_bh(&p->tcf_lock); 104 105 if (params_new) 106 kfree_rcu(params_new, rcu); 107 108 if (ret == ACT_P_CREATED) 109 tcf_idr_insert(tn, *a); 110 111 return ret; 112 } 113 114 /** 115 * tcf_csum_skb_nextlayer - Get next layer pointer 116 * @skb: sk_buff to use 117 * @ihl: previous summed headers length 118 * @ipl: complete packet length 119 * @jhl: next header length 120 * 121 * Check the expected next layer availability in the specified sk_buff. 122 * Return the next layer pointer if pass, NULL otherwise. 123 */ 124 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, 125 unsigned int ihl, unsigned int ipl, 126 unsigned int jhl) 127 { 128 int ntkoff = skb_network_offset(skb); 129 int hl = ihl + jhl; 130 131 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) || 132 skb_try_make_writable(skb, hl + ntkoff)) 133 return NULL; 134 else 135 return (void *)(skb_network_header(skb) + ihl); 136 } 137 138 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl, 139 unsigned int ipl) 140 { 141 struct icmphdr *icmph; 142 143 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph)); 144 if (icmph == NULL) 145 return 0; 146 147 icmph->checksum = 0; 148 skb->csum = csum_partial(icmph, ipl - ihl, 0); 149 icmph->checksum = csum_fold(skb->csum); 150 151 skb->ip_summed = CHECKSUM_NONE; 152 153 return 1; 154 } 155 156 static int tcf_csum_ipv4_igmp(struct sk_buff *skb, 157 unsigned int ihl, unsigned int ipl) 158 { 159 struct igmphdr *igmph; 160 161 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph)); 162 if (igmph == NULL) 163 return 0; 164 165 igmph->csum = 0; 166 skb->csum = csum_partial(igmph, ipl - ihl, 0); 167 igmph->csum = csum_fold(skb->csum); 168 169 skb->ip_summed = CHECKSUM_NONE; 170 171 return 1; 172 } 173 174 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl, 175 unsigned int ipl) 176 { 177 struct icmp6hdr *icmp6h; 178 const struct ipv6hdr *ip6h; 179 180 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h)); 181 if (icmp6h == NULL) 182 return 0; 183 184 ip6h = ipv6_hdr(skb); 185 icmp6h->icmp6_cksum = 0; 186 skb->csum = csum_partial(icmp6h, ipl - ihl, 0); 187 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 188 ipl - ihl, IPPROTO_ICMPV6, 189 skb->csum); 190 191 skb->ip_summed = CHECKSUM_NONE; 192 193 return 1; 194 } 195 196 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl, 197 unsigned int ipl) 198 { 199 struct tcphdr *tcph; 200 const struct iphdr *iph; 201 202 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 203 return 1; 204 205 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); 206 if (tcph == NULL) 207 return 0; 208 209 iph = ip_hdr(skb); 210 tcph->check = 0; 211 skb->csum = csum_partial(tcph, ipl - ihl, 0); 212 tcph->check = tcp_v4_check(ipl - ihl, 213 iph->saddr, iph->daddr, skb->csum); 214 215 skb->ip_summed = CHECKSUM_NONE; 216 217 return 1; 218 } 219 220 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl, 221 unsigned int ipl) 222 { 223 struct tcphdr *tcph; 224 const struct ipv6hdr *ip6h; 225 226 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 227 return 1; 228 229 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); 230 if (tcph == NULL) 231 return 0; 232 233 ip6h = ipv6_hdr(skb); 234 tcph->check = 0; 235 skb->csum = csum_partial(tcph, ipl - ihl, 0); 236 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 237 ipl - ihl, IPPROTO_TCP, 238 skb->csum); 239 240 skb->ip_summed = CHECKSUM_NONE; 241 242 return 1; 243 } 244 245 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, 246 unsigned int ipl, int udplite) 247 { 248 struct udphdr *udph; 249 const struct iphdr *iph; 250 u16 ul; 251 252 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 253 return 1; 254 255 /* 256 * Support both UDP and UDPLITE checksum algorithms, Don't use 257 * udph->len to get the real length without any protocol check, 258 * UDPLITE uses udph->len for another thing, 259 * Use iph->tot_len, or just ipl. 260 */ 261 262 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); 263 if (udph == NULL) 264 return 0; 265 266 iph = ip_hdr(skb); 267 ul = ntohs(udph->len); 268 269 if (udplite || udph->check) { 270 271 udph->check = 0; 272 273 if (udplite) { 274 if (ul == 0) 275 skb->csum = csum_partial(udph, ipl - ihl, 0); 276 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) 277 skb->csum = csum_partial(udph, ul, 0); 278 else 279 goto ignore_obscure_skb; 280 } else { 281 if (ul != ipl - ihl) 282 goto ignore_obscure_skb; 283 284 skb->csum = csum_partial(udph, ul, 0); 285 } 286 287 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 288 ul, iph->protocol, 289 skb->csum); 290 291 if (!udph->check) 292 udph->check = CSUM_MANGLED_0; 293 } 294 295 skb->ip_summed = CHECKSUM_NONE; 296 297 ignore_obscure_skb: 298 return 1; 299 } 300 301 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, 302 unsigned int ipl, int udplite) 303 { 304 struct udphdr *udph; 305 const struct ipv6hdr *ip6h; 306 u16 ul; 307 308 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 309 return 1; 310 311 /* 312 * Support both UDP and UDPLITE checksum algorithms, Don't use 313 * udph->len to get the real length without any protocol check, 314 * UDPLITE uses udph->len for another thing, 315 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl. 316 */ 317 318 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); 319 if (udph == NULL) 320 return 0; 321 322 ip6h = ipv6_hdr(skb); 323 ul = ntohs(udph->len); 324 325 udph->check = 0; 326 327 if (udplite) { 328 if (ul == 0) 329 skb->csum = csum_partial(udph, ipl - ihl, 0); 330 331 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) 332 skb->csum = csum_partial(udph, ul, 0); 333 334 else 335 goto ignore_obscure_skb; 336 } else { 337 if (ul != ipl - ihl) 338 goto ignore_obscure_skb; 339 340 skb->csum = csum_partial(udph, ul, 0); 341 } 342 343 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul, 344 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP, 345 skb->csum); 346 347 if (!udph->check) 348 udph->check = CSUM_MANGLED_0; 349 350 skb->ip_summed = CHECKSUM_NONE; 351 352 ignore_obscure_skb: 353 return 1; 354 } 355 356 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl, 357 unsigned int ipl) 358 { 359 struct sctphdr *sctph; 360 361 if (skb_is_gso(skb) && skb_is_gso_sctp(skb)) 362 return 1; 363 364 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph)); 365 if (!sctph) 366 return 0; 367 368 sctph->checksum = sctp_compute_cksum(skb, 369 skb_network_offset(skb) + ihl); 370 skb->ip_summed = CHECKSUM_NONE; 371 skb->csum_not_inet = 0; 372 373 return 1; 374 } 375 376 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) 377 { 378 const struct iphdr *iph; 379 int ntkoff; 380 381 ntkoff = skb_network_offset(skb); 382 383 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff)) 384 goto fail; 385 386 iph = ip_hdr(skb); 387 388 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { 389 case IPPROTO_ICMP: 390 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) 391 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4, 392 ntohs(iph->tot_len))) 393 goto fail; 394 break; 395 case IPPROTO_IGMP: 396 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP) 397 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4, 398 ntohs(iph->tot_len))) 399 goto fail; 400 break; 401 case IPPROTO_TCP: 402 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) 403 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4, 404 ntohs(iph->tot_len))) 405 goto fail; 406 break; 407 case IPPROTO_UDP: 408 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) 409 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, 410 ntohs(iph->tot_len), 0)) 411 goto fail; 412 break; 413 case IPPROTO_UDPLITE: 414 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) 415 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, 416 ntohs(iph->tot_len), 1)) 417 goto fail; 418 break; 419 case IPPROTO_SCTP: 420 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && 421 !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len))) 422 goto fail; 423 break; 424 } 425 426 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { 427 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff)) 428 goto fail; 429 430 ip_send_check(ip_hdr(skb)); 431 } 432 433 return 1; 434 435 fail: 436 return 0; 437 } 438 439 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl, 440 unsigned int *pl) 441 { 442 int off, len, optlen; 443 unsigned char *xh = (void *)ip6xh; 444 445 off = sizeof(*ip6xh); 446 len = ixhl - off; 447 448 while (len > 1) { 449 switch (xh[off]) { 450 case IPV6_TLV_PAD1: 451 optlen = 1; 452 break; 453 case IPV6_TLV_JUMBO: 454 optlen = xh[off + 1] + 2; 455 if (optlen != 6 || len < 6 || (off & 3) != 2) 456 /* wrong jumbo option length/alignment */ 457 return 0; 458 *pl = ntohl(*(__be32 *)(xh + off + 2)); 459 goto done; 460 default: 461 optlen = xh[off + 1] + 2; 462 if (optlen > len) 463 /* ignore obscure options */ 464 goto done; 465 break; 466 } 467 off += optlen; 468 len -= optlen; 469 } 470 471 done: 472 return 1; 473 } 474 475 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags) 476 { 477 struct ipv6hdr *ip6h; 478 struct ipv6_opt_hdr *ip6xh; 479 unsigned int hl, ixhl; 480 unsigned int pl; 481 int ntkoff; 482 u8 nexthdr; 483 484 ntkoff = skb_network_offset(skb); 485 486 hl = sizeof(*ip6h); 487 488 if (!pskb_may_pull(skb, hl + ntkoff)) 489 goto fail; 490 491 ip6h = ipv6_hdr(skb); 492 493 pl = ntohs(ip6h->payload_len); 494 nexthdr = ip6h->nexthdr; 495 496 do { 497 switch (nexthdr) { 498 case NEXTHDR_FRAGMENT: 499 goto ignore_skb; 500 case NEXTHDR_ROUTING: 501 case NEXTHDR_HOP: 502 case NEXTHDR_DEST: 503 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff)) 504 goto fail; 505 ip6xh = (void *)(skb_network_header(skb) + hl); 506 ixhl = ipv6_optlen(ip6xh); 507 if (!pskb_may_pull(skb, hl + ixhl + ntkoff)) 508 goto fail; 509 ip6xh = (void *)(skb_network_header(skb) + hl); 510 if ((nexthdr == NEXTHDR_HOP) && 511 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl))) 512 goto fail; 513 nexthdr = ip6xh->nexthdr; 514 hl += ixhl; 515 break; 516 case IPPROTO_ICMPV6: 517 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) 518 if (!tcf_csum_ipv6_icmp(skb, 519 hl, pl + sizeof(*ip6h))) 520 goto fail; 521 goto done; 522 case IPPROTO_TCP: 523 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) 524 if (!tcf_csum_ipv6_tcp(skb, 525 hl, pl + sizeof(*ip6h))) 526 goto fail; 527 goto done; 528 case IPPROTO_UDP: 529 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) 530 if (!tcf_csum_ipv6_udp(skb, hl, 531 pl + sizeof(*ip6h), 0)) 532 goto fail; 533 goto done; 534 case IPPROTO_UDPLITE: 535 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) 536 if (!tcf_csum_ipv6_udp(skb, hl, 537 pl + sizeof(*ip6h), 1)) 538 goto fail; 539 goto done; 540 case IPPROTO_SCTP: 541 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && 542 !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h))) 543 goto fail; 544 goto done; 545 default: 546 goto ignore_skb; 547 } 548 } while (pskb_may_pull(skb, hl + 1 + ntkoff)); 549 550 done: 551 ignore_skb: 552 return 1; 553 554 fail: 555 return 0; 556 } 557 558 static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a, 559 struct tcf_result *res) 560 { 561 struct tcf_csum *p = to_tcf_csum(a); 562 struct tcf_csum_params *params; 563 u32 update_flags; 564 int action; 565 566 params = rcu_dereference_bh(p->params); 567 568 tcf_lastuse_update(&p->tcf_tm); 569 bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb); 570 571 action = READ_ONCE(p->tcf_action); 572 if (unlikely(action == TC_ACT_SHOT)) 573 goto drop; 574 575 update_flags = params->update_flags; 576 switch (tc_skb_protocol(skb)) { 577 case cpu_to_be16(ETH_P_IP): 578 if (!tcf_csum_ipv4(skb, update_flags)) 579 goto drop; 580 break; 581 case cpu_to_be16(ETH_P_IPV6): 582 if (!tcf_csum_ipv6(skb, update_flags)) 583 goto drop; 584 break; 585 } 586 587 return action; 588 589 drop: 590 qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats)); 591 return TC_ACT_SHOT; 592 } 593 594 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, 595 int ref) 596 { 597 unsigned char *b = skb_tail_pointer(skb); 598 struct tcf_csum *p = to_tcf_csum(a); 599 struct tcf_csum_params *params; 600 struct tc_csum opt = { 601 .index = p->tcf_index, 602 .refcnt = refcount_read(&p->tcf_refcnt) - ref, 603 .bindcnt = atomic_read(&p->tcf_bindcnt) - bind, 604 }; 605 struct tcf_t t; 606 607 spin_lock_bh(&p->tcf_lock); 608 params = rcu_dereference_protected(p->params, 609 lockdep_is_held(&p->tcf_lock)); 610 opt.action = p->tcf_action; 611 opt.update_flags = params->update_flags; 612 613 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) 614 goto nla_put_failure; 615 616 tcf_tm_dump(&t, &p->tcf_tm); 617 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD)) 618 goto nla_put_failure; 619 spin_unlock_bh(&p->tcf_lock); 620 621 return skb->len; 622 623 nla_put_failure: 624 spin_unlock_bh(&p->tcf_lock); 625 nlmsg_trim(skb, b); 626 return -1; 627 } 628 629 static void tcf_csum_cleanup(struct tc_action *a) 630 { 631 struct tcf_csum *p = to_tcf_csum(a); 632 struct tcf_csum_params *params; 633 634 params = rcu_dereference_protected(p->params, 1); 635 if (params) 636 kfree_rcu(params, rcu); 637 } 638 639 static int tcf_csum_walker(struct net *net, struct sk_buff *skb, 640 struct netlink_callback *cb, int type, 641 const struct tc_action_ops *ops, 642 struct netlink_ext_ack *extack) 643 { 644 struct tc_action_net *tn = net_generic(net, csum_net_id); 645 646 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 647 } 648 649 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index, 650 struct netlink_ext_ack *extack) 651 { 652 struct tc_action_net *tn = net_generic(net, csum_net_id); 653 654 return tcf_idr_search(tn, a, index); 655 } 656 657 static size_t tcf_csum_get_fill_size(const struct tc_action *act) 658 { 659 return nla_total_size(sizeof(struct tc_csum)); 660 } 661 662 static int tcf_csum_delete(struct net *net, u32 index) 663 { 664 struct tc_action_net *tn = net_generic(net, csum_net_id); 665 666 return tcf_idr_delete_index(tn, index); 667 } 668 669 static struct tc_action_ops act_csum_ops = { 670 .kind = "csum", 671 .type = TCA_ACT_CSUM, 672 .owner = THIS_MODULE, 673 .act = tcf_csum_act, 674 .dump = tcf_csum_dump, 675 .init = tcf_csum_init, 676 .cleanup = tcf_csum_cleanup, 677 .walk = tcf_csum_walker, 678 .lookup = tcf_csum_search, 679 .get_fill_size = tcf_csum_get_fill_size, 680 .delete = tcf_csum_delete, 681 .size = sizeof(struct tcf_csum), 682 }; 683 684 static __net_init int csum_init_net(struct net *net) 685 { 686 struct tc_action_net *tn = net_generic(net, csum_net_id); 687 688 return tc_action_net_init(tn, &act_csum_ops); 689 } 690 691 static void __net_exit csum_exit_net(struct list_head *net_list) 692 { 693 tc_action_net_exit(net_list, csum_net_id); 694 } 695 696 static struct pernet_operations csum_net_ops = { 697 .init = csum_init_net, 698 .exit_batch = csum_exit_net, 699 .id = &csum_net_id, 700 .size = sizeof(struct tc_action_net), 701 }; 702 703 MODULE_DESCRIPTION("Checksum updating actions"); 704 MODULE_LICENSE("GPL"); 705 706 static int __init csum_init_module(void) 707 { 708 return tcf_register_action(&act_csum_ops, &csum_net_ops); 709 } 710 711 static void __exit csum_cleanup_module(void) 712 { 713 tcf_unregister_action(&act_csum_ops, &csum_net_ops); 714 } 715 716 module_init(csum_init_module); 717 module_exit(csum_cleanup_module); 718