1 /* 2 * net/sched/cls_flow.c Generic flow classifier 3 * 4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/list.h> 15 #include <linux/jhash.h> 16 #include <linux/random.h> 17 #include <linux/pkt_cls.h> 18 #include <linux/skbuff.h> 19 #include <linux/in.h> 20 #include <linux/ip.h> 21 #include <linux/ipv6.h> 22 #include <linux/if_vlan.h> 23 #include <linux/slab.h> 24 25 #include <net/pkt_cls.h> 26 #include <net/ip.h> 27 #include <net/route.h> 28 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 29 #include <net/netfilter/nf_conntrack.h> 30 #endif 31 32 struct flow_head { 33 struct list_head filters; 34 }; 35 36 struct flow_filter { 37 struct list_head list; 38 struct tcf_exts exts; 39 struct tcf_ematch_tree ematches; 40 struct timer_list perturb_timer; 41 u32 perturb_period; 42 u32 handle; 43 44 u32 nkeys; 45 u32 keymask; 46 u32 mode; 47 u32 mask; 48 u32 xor; 49 u32 rshift; 50 u32 addend; 51 u32 divisor; 52 u32 baseclass; 53 u32 hashrnd; 54 }; 55 56 static const struct tcf_ext_map flow_ext_map = { 57 .action = TCA_FLOW_ACT, 58 .police = TCA_FLOW_POLICE, 59 }; 60 61 static inline u32 addr_fold(void *addr) 62 { 63 unsigned long a = (unsigned long)addr; 64 65 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); 66 } 67 68 static u32 flow_get_src(struct sk_buff *skb) 69 { 70 switch (skb->protocol) { 71 case htons(ETH_P_IP): 72 if (pskb_network_may_pull(skb, sizeof(struct iphdr))) 73 return ntohl(ip_hdr(skb)->saddr); 74 break; 75 case htons(ETH_P_IPV6): 76 if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) 77 return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]); 78 break; 79 } 80 81 return addr_fold(skb->sk); 82 } 83 84 static u32 flow_get_dst(struct sk_buff *skb) 85 { 86 switch (skb->protocol) { 87 case htons(ETH_P_IP): 88 if (pskb_network_may_pull(skb, sizeof(struct iphdr))) 89 return ntohl(ip_hdr(skb)->daddr); 90 break; 91 case htons(ETH_P_IPV6): 92 if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) 93 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); 94 break; 95 } 96 97 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 98 } 99 100 static u32 flow_get_proto(struct sk_buff *skb) 101 { 102 switch (skb->protocol) { 103 case htons(ETH_P_IP): 104 return pskb_network_may_pull(skb, sizeof(struct iphdr)) ? 105 ip_hdr(skb)->protocol : 0; 106 case htons(ETH_P_IPV6): 107 return pskb_network_may_pull(skb, sizeof(struct ipv6hdr)) ? 108 ipv6_hdr(skb)->nexthdr : 0; 109 default: 110 return 0; 111 } 112 } 113 114 static u32 flow_get_proto_src(struct sk_buff *skb) 115 { 116 switch (skb->protocol) { 117 case htons(ETH_P_IP): { 118 struct iphdr *iph; 119 int poff; 120 121 if (!pskb_network_may_pull(skb, sizeof(*iph))) 122 break; 123 iph = ip_hdr(skb); 124 if (iph->frag_off & htons(IP_MF|IP_OFFSET)) 125 break; 126 poff = proto_ports_offset(iph->protocol); 127 if (poff >= 0 && 128 pskb_network_may_pull(skb, iph->ihl * 4 + 2 + poff)) { 129 iph = ip_hdr(skb); 130 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 131 poff)); 132 } 133 break; 134 } 135 case htons(ETH_P_IPV6): { 136 struct ipv6hdr *iph; 137 int poff; 138 139 if (!pskb_network_may_pull(skb, sizeof(*iph))) 140 break; 141 iph = ipv6_hdr(skb); 142 poff = proto_ports_offset(iph->nexthdr); 143 if (poff >= 0 && 144 pskb_network_may_pull(skb, sizeof(*iph) + poff + 2)) { 145 iph = ipv6_hdr(skb); 146 return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) + 147 poff)); 148 } 149 break; 150 } 151 } 152 153 return addr_fold(skb->sk); 154 } 155 156 static u32 flow_get_proto_dst(struct sk_buff *skb) 157 { 158 switch (skb->protocol) { 159 case htons(ETH_P_IP): { 160 struct iphdr *iph; 161 int poff; 162 163 if (!pskb_network_may_pull(skb, sizeof(*iph))) 164 break; 165 iph = ip_hdr(skb); 166 if (iph->frag_off & htons(IP_MF|IP_OFFSET)) 167 break; 168 poff = proto_ports_offset(iph->protocol); 169 if (poff >= 0 && 170 pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) { 171 iph = ip_hdr(skb); 172 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 173 2 + poff)); 174 } 175 break; 176 } 177 case htons(ETH_P_IPV6): { 178 struct ipv6hdr *iph; 179 int poff; 180 181 if (!pskb_network_may_pull(skb, sizeof(*iph))) 182 break; 183 iph = ipv6_hdr(skb); 184 poff = proto_ports_offset(iph->nexthdr); 185 if (poff >= 0 && 186 pskb_network_may_pull(skb, sizeof(*iph) + poff + 4)) { 187 iph = ipv6_hdr(skb); 188 return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) + 189 poff + 2)); 190 } 191 break; 192 } 193 } 194 195 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 196 } 197 198 static u32 flow_get_iif(const struct sk_buff *skb) 199 { 200 return skb->skb_iif; 201 } 202 203 static u32 flow_get_priority(const struct sk_buff *skb) 204 { 205 return skb->priority; 206 } 207 208 static u32 flow_get_mark(const struct sk_buff *skb) 209 { 210 return skb->mark; 211 } 212 213 static u32 flow_get_nfct(const struct sk_buff *skb) 214 { 215 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 216 return addr_fold(skb->nfct); 217 #else 218 return 0; 219 #endif 220 } 221 222 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 223 #define CTTUPLE(skb, member) \ 224 ({ \ 225 enum ip_conntrack_info ctinfo; \ 226 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \ 227 if (ct == NULL) \ 228 goto fallback; \ 229 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \ 230 }) 231 #else 232 #define CTTUPLE(skb, member) \ 233 ({ \ 234 goto fallback; \ 235 0; \ 236 }) 237 #endif 238 239 static u32 flow_get_nfct_src(struct sk_buff *skb) 240 { 241 switch (skb->protocol) { 242 case htons(ETH_P_IP): 243 return ntohl(CTTUPLE(skb, src.u3.ip)); 244 case htons(ETH_P_IPV6): 245 return ntohl(CTTUPLE(skb, src.u3.ip6[3])); 246 } 247 fallback: 248 return flow_get_src(skb); 249 } 250 251 static u32 flow_get_nfct_dst(struct sk_buff *skb) 252 { 253 switch (skb->protocol) { 254 case htons(ETH_P_IP): 255 return ntohl(CTTUPLE(skb, dst.u3.ip)); 256 case htons(ETH_P_IPV6): 257 return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); 258 } 259 fallback: 260 return flow_get_dst(skb); 261 } 262 263 static u32 flow_get_nfct_proto_src(struct sk_buff *skb) 264 { 265 return ntohs(CTTUPLE(skb, src.u.all)); 266 fallback: 267 return flow_get_proto_src(skb); 268 } 269 270 static u32 flow_get_nfct_proto_dst(struct sk_buff *skb) 271 { 272 return ntohs(CTTUPLE(skb, dst.u.all)); 273 fallback: 274 return flow_get_proto_dst(skb); 275 } 276 277 static u32 flow_get_rtclassid(const struct sk_buff *skb) 278 { 279 #ifdef CONFIG_NET_CLS_ROUTE 280 if (skb_dst(skb)) 281 return skb_dst(skb)->tclassid; 282 #endif 283 return 0; 284 } 285 286 static u32 flow_get_skuid(const struct sk_buff *skb) 287 { 288 if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) 289 return skb->sk->sk_socket->file->f_cred->fsuid; 290 return 0; 291 } 292 293 static u32 flow_get_skgid(const struct sk_buff *skb) 294 { 295 if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) 296 return skb->sk->sk_socket->file->f_cred->fsgid; 297 return 0; 298 } 299 300 static u32 flow_get_vlan_tag(const struct sk_buff *skb) 301 { 302 u16 uninitialized_var(tag); 303 304 if (vlan_get_tag(skb, &tag) < 0) 305 return 0; 306 return tag & VLAN_VID_MASK; 307 } 308 309 static u32 flow_get_rxhash(struct sk_buff *skb) 310 { 311 return skb_get_rxhash(skb); 312 } 313 314 static u32 flow_key_get(struct sk_buff *skb, int key) 315 { 316 switch (key) { 317 case FLOW_KEY_SRC: 318 return flow_get_src(skb); 319 case FLOW_KEY_DST: 320 return flow_get_dst(skb); 321 case FLOW_KEY_PROTO: 322 return flow_get_proto(skb); 323 case FLOW_KEY_PROTO_SRC: 324 return flow_get_proto_src(skb); 325 case FLOW_KEY_PROTO_DST: 326 return flow_get_proto_dst(skb); 327 case FLOW_KEY_IIF: 328 return flow_get_iif(skb); 329 case FLOW_KEY_PRIORITY: 330 return flow_get_priority(skb); 331 case FLOW_KEY_MARK: 332 return flow_get_mark(skb); 333 case FLOW_KEY_NFCT: 334 return flow_get_nfct(skb); 335 case FLOW_KEY_NFCT_SRC: 336 return flow_get_nfct_src(skb); 337 case FLOW_KEY_NFCT_DST: 338 return flow_get_nfct_dst(skb); 339 case FLOW_KEY_NFCT_PROTO_SRC: 340 return flow_get_nfct_proto_src(skb); 341 case FLOW_KEY_NFCT_PROTO_DST: 342 return flow_get_nfct_proto_dst(skb); 343 case FLOW_KEY_RTCLASSID: 344 return flow_get_rtclassid(skb); 345 case FLOW_KEY_SKUID: 346 return flow_get_skuid(skb); 347 case FLOW_KEY_SKGID: 348 return flow_get_skgid(skb); 349 case FLOW_KEY_VLAN_TAG: 350 return flow_get_vlan_tag(skb); 351 case FLOW_KEY_RXHASH: 352 return flow_get_rxhash(skb); 353 default: 354 WARN_ON(1); 355 return 0; 356 } 357 } 358 359 static int flow_classify(struct sk_buff *skb, struct tcf_proto *tp, 360 struct tcf_result *res) 361 { 362 struct flow_head *head = tp->root; 363 struct flow_filter *f; 364 u32 keymask; 365 u32 classid; 366 unsigned int n, key; 367 int r; 368 369 list_for_each_entry(f, &head->filters, list) { 370 u32 keys[f->nkeys]; 371 372 if (!tcf_em_tree_match(skb, &f->ematches, NULL)) 373 continue; 374 375 keymask = f->keymask; 376 377 for (n = 0; n < f->nkeys; n++) { 378 key = ffs(keymask) - 1; 379 keymask &= ~(1 << key); 380 keys[n] = flow_key_get(skb, key); 381 } 382 383 if (f->mode == FLOW_MODE_HASH) 384 classid = jhash2(keys, f->nkeys, f->hashrnd); 385 else { 386 classid = keys[0]; 387 classid = (classid & f->mask) ^ f->xor; 388 classid = (classid >> f->rshift) + f->addend; 389 } 390 391 if (f->divisor) 392 classid %= f->divisor; 393 394 res->class = 0; 395 res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid); 396 397 r = tcf_exts_exec(skb, &f->exts, res); 398 if (r < 0) 399 continue; 400 return r; 401 } 402 return -1; 403 } 404 405 static void flow_perturbation(unsigned long arg) 406 { 407 struct flow_filter *f = (struct flow_filter *)arg; 408 409 get_random_bytes(&f->hashrnd, 4); 410 if (f->perturb_period) 411 mod_timer(&f->perturb_timer, jiffies + f->perturb_period); 412 } 413 414 static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { 415 [TCA_FLOW_KEYS] = { .type = NLA_U32 }, 416 [TCA_FLOW_MODE] = { .type = NLA_U32 }, 417 [TCA_FLOW_BASECLASS] = { .type = NLA_U32 }, 418 [TCA_FLOW_RSHIFT] = { .type = NLA_U32 }, 419 [TCA_FLOW_ADDEND] = { .type = NLA_U32 }, 420 [TCA_FLOW_MASK] = { .type = NLA_U32 }, 421 [TCA_FLOW_XOR] = { .type = NLA_U32 }, 422 [TCA_FLOW_DIVISOR] = { .type = NLA_U32 }, 423 [TCA_FLOW_ACT] = { .type = NLA_NESTED }, 424 [TCA_FLOW_POLICE] = { .type = NLA_NESTED }, 425 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED }, 426 [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, 427 }; 428 429 static int flow_change(struct tcf_proto *tp, unsigned long base, 430 u32 handle, struct nlattr **tca, 431 unsigned long *arg) 432 { 433 struct flow_head *head = tp->root; 434 struct flow_filter *f; 435 struct nlattr *opt = tca[TCA_OPTIONS]; 436 struct nlattr *tb[TCA_FLOW_MAX + 1]; 437 struct tcf_exts e; 438 struct tcf_ematch_tree t; 439 unsigned int nkeys = 0; 440 unsigned int perturb_period = 0; 441 u32 baseclass = 0; 442 u32 keymask = 0; 443 u32 mode; 444 int err; 445 446 if (opt == NULL) 447 return -EINVAL; 448 449 err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy); 450 if (err < 0) 451 return err; 452 453 if (tb[TCA_FLOW_BASECLASS]) { 454 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]); 455 if (TC_H_MIN(baseclass) == 0) 456 return -EINVAL; 457 } 458 459 if (tb[TCA_FLOW_KEYS]) { 460 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]); 461 462 nkeys = hweight32(keymask); 463 if (nkeys == 0) 464 return -EINVAL; 465 466 if (fls(keymask) - 1 > FLOW_KEY_MAX) 467 return -EOPNOTSUPP; 468 } 469 470 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map); 471 if (err < 0) 472 return err; 473 474 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t); 475 if (err < 0) 476 goto err1; 477 478 f = (struct flow_filter *)*arg; 479 if (f != NULL) { 480 err = -EINVAL; 481 if (f->handle != handle && handle) 482 goto err2; 483 484 mode = f->mode; 485 if (tb[TCA_FLOW_MODE]) 486 mode = nla_get_u32(tb[TCA_FLOW_MODE]); 487 if (mode != FLOW_MODE_HASH && nkeys > 1) 488 goto err2; 489 490 if (mode == FLOW_MODE_HASH) 491 perturb_period = f->perturb_period; 492 if (tb[TCA_FLOW_PERTURB]) { 493 if (mode != FLOW_MODE_HASH) 494 goto err2; 495 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; 496 } 497 } else { 498 err = -EINVAL; 499 if (!handle) 500 goto err2; 501 if (!tb[TCA_FLOW_KEYS]) 502 goto err2; 503 504 mode = FLOW_MODE_MAP; 505 if (tb[TCA_FLOW_MODE]) 506 mode = nla_get_u32(tb[TCA_FLOW_MODE]); 507 if (mode != FLOW_MODE_HASH && nkeys > 1) 508 goto err2; 509 510 if (tb[TCA_FLOW_PERTURB]) { 511 if (mode != FLOW_MODE_HASH) 512 goto err2; 513 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; 514 } 515 516 if (TC_H_MAJ(baseclass) == 0) 517 baseclass = TC_H_MAKE(tp->q->handle, baseclass); 518 if (TC_H_MIN(baseclass) == 0) 519 baseclass = TC_H_MAKE(baseclass, 1); 520 521 err = -ENOBUFS; 522 f = kzalloc(sizeof(*f), GFP_KERNEL); 523 if (f == NULL) 524 goto err2; 525 526 f->handle = handle; 527 f->mask = ~0U; 528 529 get_random_bytes(&f->hashrnd, 4); 530 f->perturb_timer.function = flow_perturbation; 531 f->perturb_timer.data = (unsigned long)f; 532 init_timer_deferrable(&f->perturb_timer); 533 } 534 535 tcf_exts_change(tp, &f->exts, &e); 536 tcf_em_tree_change(tp, &f->ematches, &t); 537 538 tcf_tree_lock(tp); 539 540 if (tb[TCA_FLOW_KEYS]) { 541 f->keymask = keymask; 542 f->nkeys = nkeys; 543 } 544 545 f->mode = mode; 546 547 if (tb[TCA_FLOW_MASK]) 548 f->mask = nla_get_u32(tb[TCA_FLOW_MASK]); 549 if (tb[TCA_FLOW_XOR]) 550 f->xor = nla_get_u32(tb[TCA_FLOW_XOR]); 551 if (tb[TCA_FLOW_RSHIFT]) 552 f->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]); 553 if (tb[TCA_FLOW_ADDEND]) 554 f->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]); 555 556 if (tb[TCA_FLOW_DIVISOR]) 557 f->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]); 558 if (baseclass) 559 f->baseclass = baseclass; 560 561 f->perturb_period = perturb_period; 562 del_timer(&f->perturb_timer); 563 if (perturb_period) 564 mod_timer(&f->perturb_timer, jiffies + perturb_period); 565 566 if (*arg == 0) 567 list_add_tail(&f->list, &head->filters); 568 569 tcf_tree_unlock(tp); 570 571 *arg = (unsigned long)f; 572 return 0; 573 574 err2: 575 tcf_em_tree_destroy(tp, &t); 576 err1: 577 tcf_exts_destroy(tp, &e); 578 return err; 579 } 580 581 static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f) 582 { 583 del_timer_sync(&f->perturb_timer); 584 tcf_exts_destroy(tp, &f->exts); 585 tcf_em_tree_destroy(tp, &f->ematches); 586 kfree(f); 587 } 588 589 static int flow_delete(struct tcf_proto *tp, unsigned long arg) 590 { 591 struct flow_filter *f = (struct flow_filter *)arg; 592 593 tcf_tree_lock(tp); 594 list_del(&f->list); 595 tcf_tree_unlock(tp); 596 flow_destroy_filter(tp, f); 597 return 0; 598 } 599 600 static int flow_init(struct tcf_proto *tp) 601 { 602 struct flow_head *head; 603 604 head = kzalloc(sizeof(*head), GFP_KERNEL); 605 if (head == NULL) 606 return -ENOBUFS; 607 INIT_LIST_HEAD(&head->filters); 608 tp->root = head; 609 return 0; 610 } 611 612 static void flow_destroy(struct tcf_proto *tp) 613 { 614 struct flow_head *head = tp->root; 615 struct flow_filter *f, *next; 616 617 list_for_each_entry_safe(f, next, &head->filters, list) { 618 list_del(&f->list); 619 flow_destroy_filter(tp, f); 620 } 621 kfree(head); 622 } 623 624 static unsigned long flow_get(struct tcf_proto *tp, u32 handle) 625 { 626 struct flow_head *head = tp->root; 627 struct flow_filter *f; 628 629 list_for_each_entry(f, &head->filters, list) 630 if (f->handle == handle) 631 return (unsigned long)f; 632 return 0; 633 } 634 635 static void flow_put(struct tcf_proto *tp, unsigned long f) 636 { 637 } 638 639 static int flow_dump(struct tcf_proto *tp, unsigned long fh, 640 struct sk_buff *skb, struct tcmsg *t) 641 { 642 struct flow_filter *f = (struct flow_filter *)fh; 643 struct nlattr *nest; 644 645 if (f == NULL) 646 return skb->len; 647 648 t->tcm_handle = f->handle; 649 650 nest = nla_nest_start(skb, TCA_OPTIONS); 651 if (nest == NULL) 652 goto nla_put_failure; 653 654 NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask); 655 NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode); 656 657 if (f->mask != ~0 || f->xor != 0) { 658 NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask); 659 NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor); 660 } 661 if (f->rshift) 662 NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift); 663 if (f->addend) 664 NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend); 665 666 if (f->divisor) 667 NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor); 668 if (f->baseclass) 669 NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass); 670 671 if (f->perturb_period) 672 NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ); 673 674 if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0) 675 goto nla_put_failure; 676 #ifdef CONFIG_NET_EMATCH 677 if (f->ematches.hdr.nmatches && 678 tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0) 679 goto nla_put_failure; 680 #endif 681 nla_nest_end(skb, nest); 682 683 if (tcf_exts_dump_stats(skb, &f->exts, &flow_ext_map) < 0) 684 goto nla_put_failure; 685 686 return skb->len; 687 688 nla_put_failure: 689 nlmsg_trim(skb, nest); 690 return -1; 691 } 692 693 static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg) 694 { 695 struct flow_head *head = tp->root; 696 struct flow_filter *f; 697 698 list_for_each_entry(f, &head->filters, list) { 699 if (arg->count < arg->skip) 700 goto skip; 701 if (arg->fn(tp, (unsigned long)f, arg) < 0) { 702 arg->stop = 1; 703 break; 704 } 705 skip: 706 arg->count++; 707 } 708 } 709 710 static struct tcf_proto_ops cls_flow_ops __read_mostly = { 711 .kind = "flow", 712 .classify = flow_classify, 713 .init = flow_init, 714 .destroy = flow_destroy, 715 .change = flow_change, 716 .delete = flow_delete, 717 .get = flow_get, 718 .put = flow_put, 719 .dump = flow_dump, 720 .walk = flow_walk, 721 .owner = THIS_MODULE, 722 }; 723 724 static int __init cls_flow_init(void) 725 { 726 return register_tcf_proto_ops(&cls_flow_ops); 727 } 728 729 static void __exit cls_flow_exit(void) 730 { 731 unregister_tcf_proto_ops(&cls_flow_ops); 732 } 733 734 module_init(cls_flow_init); 735 module_exit(cls_flow_exit); 736 737 MODULE_LICENSE("GPL"); 738 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 739 MODULE_DESCRIPTION("TC flow classifier"); 740