1 /* 2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * The filters are packed to hash tables of key nodes 12 * with a set of 32bit key/mask pairs at every node. 13 * Nodes reference next level hash tables etc. 14 * 15 * This scheme is the best universal classifier I managed to 16 * invent; it is not super-fast, but it is not slow (provided you 17 * program it correctly), and general enough. And its relative 18 * speed grows as the number of rules becomes larger. 19 * 20 * It seems that it represents the best middle point between 21 * speed and manageability both by human and by machine. 22 * 23 * It is especially useful for link sharing combined with QoS; 24 * pure RSVP doesn't need such a general approach and can use 25 * much simpler (and faster) schemes, sort of cls_rsvp.c. 26 * 27 * JHS: We should remove the CONFIG_NET_CLS_IND from here 28 * eventually when the meta match extension is made available 29 * 30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro> 31 */ 32 33 #include <linux/module.h> 34 #include <linux/slab.h> 35 #include <linux/types.h> 36 #include <linux/kernel.h> 37 #include <linux/string.h> 38 #include <linux/errno.h> 39 #include <linux/percpu.h> 40 #include <linux/rtnetlink.h> 41 #include <linux/skbuff.h> 42 #include <linux/bitmap.h> 43 #include <linux/netdevice.h> 44 #include <linux/hash.h> 45 #include <net/netlink.h> 46 #include <net/act_api.h> 47 #include <net/pkt_cls.h> 48 #include <linux/idr.h> 49 50 struct tc_u_knode { 51 struct tc_u_knode __rcu *next; 52 u32 handle; 53 struct tc_u_hnode __rcu *ht_up; 54 struct tcf_exts exts; 55 #ifdef CONFIG_NET_CLS_IND 56 int ifindex; 57 #endif 58 u8 fshift; 59 struct tcf_result res; 60 struct tc_u_hnode __rcu *ht_down; 61 #ifdef CONFIG_CLS_U32_PERF 62 struct tc_u32_pcnt __percpu *pf; 63 #endif 64 u32 flags; 65 unsigned int in_hw_count; 66 #ifdef CONFIG_CLS_U32_MARK 67 u32 val; 68 u32 mask; 69 u32 __percpu *pcpu_success; 70 #endif 71 struct tcf_proto *tp; 72 struct rcu_work rwork; 73 /* The 'sel' field MUST be the last field in structure to allow for 74 * tc_u32_keys allocated at end of structure. 75 */ 76 struct tc_u32_sel sel; 77 }; 78 79 struct tc_u_hnode { 80 struct tc_u_hnode __rcu *next; 81 u32 handle; 82 u32 prio; 83 struct tc_u_common *tp_c; 84 int refcnt; 85 unsigned int divisor; 86 struct idr handle_idr; 87 struct rcu_head rcu; 88 u32 flags; 89 /* The 'ht' field MUST be the last field in structure to allow for 90 * more entries allocated at end of structure. 91 */ 92 struct tc_u_knode __rcu *ht[1]; 93 }; 94 95 struct tc_u_common { 96 struct tc_u_hnode __rcu *hlist; 97 void *ptr; 98 int refcnt; 99 struct idr handle_idr; 100 struct hlist_node hnode; 101 struct rcu_head rcu; 102 }; 103 104 static inline unsigned int u32_hash_fold(__be32 key, 105 const struct tc_u32_sel *sel, 106 u8 fshift) 107 { 108 unsigned int h = ntohl(key & sel->hmask) >> fshift; 109 110 return h; 111 } 112 113 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, 114 struct tcf_result *res) 115 { 116 struct { 117 struct tc_u_knode *knode; 118 unsigned int off; 119 } stack[TC_U32_MAXDEPTH]; 120 121 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root); 122 unsigned int off = skb_network_offset(skb); 123 struct tc_u_knode *n; 124 int sdepth = 0; 125 int off2 = 0; 126 int sel = 0; 127 #ifdef CONFIG_CLS_U32_PERF 128 int j; 129 #endif 130 int i, r; 131 132 next_ht: 133 n = rcu_dereference_bh(ht->ht[sel]); 134 135 next_knode: 136 if (n) { 137 struct tc_u32_key *key = n->sel.keys; 138 139 #ifdef CONFIG_CLS_U32_PERF 140 __this_cpu_inc(n->pf->rcnt); 141 j = 0; 142 #endif 143 144 if (tc_skip_sw(n->flags)) { 145 n = rcu_dereference_bh(n->next); 146 goto next_knode; 147 } 148 149 #ifdef CONFIG_CLS_U32_MARK 150 if ((skb->mark & n->mask) != n->val) { 151 n = rcu_dereference_bh(n->next); 152 goto next_knode; 153 } else { 154 __this_cpu_inc(*n->pcpu_success); 155 } 156 #endif 157 158 for (i = n->sel.nkeys; i > 0; i--, key++) { 159 int toff = off + key->off + (off2 & key->offmask); 160 __be32 *data, hdata; 161 162 if (skb_headroom(skb) + toff > INT_MAX) 163 goto out; 164 165 data = skb_header_pointer(skb, toff, 4, &hdata); 166 if (!data) 167 goto out; 168 if ((*data ^ key->val) & key->mask) { 169 n = rcu_dereference_bh(n->next); 170 goto next_knode; 171 } 172 #ifdef CONFIG_CLS_U32_PERF 173 __this_cpu_inc(n->pf->kcnts[j]); 174 j++; 175 #endif 176 } 177 178 ht = rcu_dereference_bh(n->ht_down); 179 if (!ht) { 180 check_terminal: 181 if (n->sel.flags & TC_U32_TERMINAL) { 182 183 *res = n->res; 184 #ifdef CONFIG_NET_CLS_IND 185 if (!tcf_match_indev(skb, n->ifindex)) { 186 n = rcu_dereference_bh(n->next); 187 goto next_knode; 188 } 189 #endif 190 #ifdef CONFIG_CLS_U32_PERF 191 __this_cpu_inc(n->pf->rhit); 192 #endif 193 r = tcf_exts_exec(skb, &n->exts, res); 194 if (r < 0) { 195 n = rcu_dereference_bh(n->next); 196 goto next_knode; 197 } 198 199 return r; 200 } 201 n = rcu_dereference_bh(n->next); 202 goto next_knode; 203 } 204 205 /* PUSH */ 206 if (sdepth >= TC_U32_MAXDEPTH) 207 goto deadloop; 208 stack[sdepth].knode = n; 209 stack[sdepth].off = off; 210 sdepth++; 211 212 ht = rcu_dereference_bh(n->ht_down); 213 sel = 0; 214 if (ht->divisor) { 215 __be32 *data, hdata; 216 217 data = skb_header_pointer(skb, off + n->sel.hoff, 4, 218 &hdata); 219 if (!data) 220 goto out; 221 sel = ht->divisor & u32_hash_fold(*data, &n->sel, 222 n->fshift); 223 } 224 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT))) 225 goto next_ht; 226 227 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) { 228 off2 = n->sel.off + 3; 229 if (n->sel.flags & TC_U32_VAROFFSET) { 230 __be16 *data, hdata; 231 232 data = skb_header_pointer(skb, 233 off + n->sel.offoff, 234 2, &hdata); 235 if (!data) 236 goto out; 237 off2 += ntohs(n->sel.offmask & *data) >> 238 n->sel.offshift; 239 } 240 off2 &= ~3; 241 } 242 if (n->sel.flags & TC_U32_EAT) { 243 off += off2; 244 off2 = 0; 245 } 246 247 if (off < skb->len) 248 goto next_ht; 249 } 250 251 /* POP */ 252 if (sdepth--) { 253 n = stack[sdepth].knode; 254 ht = rcu_dereference_bh(n->ht_up); 255 off = stack[sdepth].off; 256 goto check_terminal; 257 } 258 out: 259 return -1; 260 261 deadloop: 262 net_warn_ratelimited("cls_u32: dead loop\n"); 263 return -1; 264 } 265 266 static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) 267 { 268 struct tc_u_hnode *ht; 269 270 for (ht = rtnl_dereference(tp_c->hlist); 271 ht; 272 ht = rtnl_dereference(ht->next)) 273 if (ht->handle == handle) 274 break; 275 276 return ht; 277 } 278 279 static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle) 280 { 281 unsigned int sel; 282 struct tc_u_knode *n = NULL; 283 284 sel = TC_U32_HASH(handle); 285 if (sel > ht->divisor) 286 goto out; 287 288 for (n = rtnl_dereference(ht->ht[sel]); 289 n; 290 n = rtnl_dereference(n->next)) 291 if (n->handle == handle) 292 break; 293 out: 294 return n; 295 } 296 297 298 static void *u32_get(struct tcf_proto *tp, u32 handle) 299 { 300 struct tc_u_hnode *ht; 301 struct tc_u_common *tp_c = tp->data; 302 303 if (TC_U32_HTID(handle) == TC_U32_ROOT) 304 ht = rtnl_dereference(tp->root); 305 else 306 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle)); 307 308 if (!ht) 309 return NULL; 310 311 if (TC_U32_KEY(handle) == 0) 312 return ht; 313 314 return u32_lookup_key(ht, handle); 315 } 316 317 /* Protected by rtnl lock */ 318 static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr) 319 { 320 int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL); 321 if (id < 0) 322 return 0; 323 return (id | 0x800U) << 20; 324 } 325 326 static struct hlist_head *tc_u_common_hash; 327 328 #define U32_HASH_SHIFT 10 329 #define U32_HASH_SIZE (1 << U32_HASH_SHIFT) 330 331 static void *tc_u_common_ptr(const struct tcf_proto *tp) 332 { 333 struct tcf_block *block = tp->chain->block; 334 335 /* The block sharing is currently supported only 336 * for classless qdiscs. In that case we use block 337 * for tc_u_common identification. In case the 338 * block is not shared, block->q is a valid pointer 339 * and we can use that. That works for classful qdiscs. 340 */ 341 if (tcf_block_shared(block)) 342 return block; 343 else 344 return block->q; 345 } 346 347 static unsigned int tc_u_hash(const struct tcf_proto *tp) 348 { 349 return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT); 350 } 351 352 static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp) 353 { 354 struct tc_u_common *tc; 355 unsigned int h; 356 357 h = tc_u_hash(tp); 358 hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) { 359 if (tc->ptr == tc_u_common_ptr(tp)) 360 return tc; 361 } 362 return NULL; 363 } 364 365 static int u32_init(struct tcf_proto *tp) 366 { 367 struct tc_u_hnode *root_ht; 368 struct tc_u_common *tp_c; 369 unsigned int h; 370 371 tp_c = tc_u_common_find(tp); 372 373 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL); 374 if (root_ht == NULL) 375 return -ENOBUFS; 376 377 root_ht->refcnt++; 378 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000; 379 root_ht->prio = tp->prio; 380 idr_init(&root_ht->handle_idr); 381 382 if (tp_c == NULL) { 383 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL); 384 if (tp_c == NULL) { 385 kfree(root_ht); 386 return -ENOBUFS; 387 } 388 tp_c->ptr = tc_u_common_ptr(tp); 389 INIT_HLIST_NODE(&tp_c->hnode); 390 idr_init(&tp_c->handle_idr); 391 392 h = tc_u_hash(tp); 393 hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]); 394 } 395 396 tp_c->refcnt++; 397 RCU_INIT_POINTER(root_ht->next, tp_c->hlist); 398 rcu_assign_pointer(tp_c->hlist, root_ht); 399 root_ht->tp_c = tp_c; 400 401 rcu_assign_pointer(tp->root, root_ht); 402 tp->data = tp_c; 403 return 0; 404 } 405 406 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n, 407 bool free_pf) 408 { 409 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 410 411 tcf_exts_destroy(&n->exts); 412 tcf_exts_put_net(&n->exts); 413 if (ht && --ht->refcnt == 0) 414 kfree(ht); 415 #ifdef CONFIG_CLS_U32_PERF 416 if (free_pf) 417 free_percpu(n->pf); 418 #endif 419 #ifdef CONFIG_CLS_U32_MARK 420 if (free_pf) 421 free_percpu(n->pcpu_success); 422 #endif 423 kfree(n); 424 return 0; 425 } 426 427 /* u32_delete_key_rcu should be called when free'ing a copied 428 * version of a tc_u_knode obtained from u32_init_knode(). When 429 * copies are obtained from u32_init_knode() the statistics are 430 * shared between the old and new copies to allow readers to 431 * continue to update the statistics during the copy. To support 432 * this the u32_delete_key_rcu variant does not free the percpu 433 * statistics. 434 */ 435 static void u32_delete_key_work(struct work_struct *work) 436 { 437 struct tc_u_knode *key = container_of(to_rcu_work(work), 438 struct tc_u_knode, 439 rwork); 440 rtnl_lock(); 441 u32_destroy_key(key->tp, key, false); 442 rtnl_unlock(); 443 } 444 445 /* u32_delete_key_freepf_rcu is the rcu callback variant 446 * that free's the entire structure including the statistics 447 * percpu variables. Only use this if the key is not a copy 448 * returned by u32_init_knode(). See u32_delete_key_rcu() 449 * for the variant that should be used with keys return from 450 * u32_init_knode() 451 */ 452 static void u32_delete_key_freepf_work(struct work_struct *work) 453 { 454 struct tc_u_knode *key = container_of(to_rcu_work(work), 455 struct tc_u_knode, 456 rwork); 457 rtnl_lock(); 458 u32_destroy_key(key->tp, key, true); 459 rtnl_unlock(); 460 } 461 462 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) 463 { 464 struct tc_u_knode __rcu **kp; 465 struct tc_u_knode *pkp; 466 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up); 467 468 if (ht) { 469 kp = &ht->ht[TC_U32_HASH(key->handle)]; 470 for (pkp = rtnl_dereference(*kp); pkp; 471 kp = &pkp->next, pkp = rtnl_dereference(*kp)) { 472 if (pkp == key) { 473 RCU_INIT_POINTER(*kp, key->next); 474 475 tcf_unbind_filter(tp, &key->res); 476 idr_remove(&ht->handle_idr, key->handle); 477 tcf_exts_get_net(&key->exts); 478 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work); 479 return 0; 480 } 481 } 482 } 483 WARN_ON(1); 484 return 0; 485 } 486 487 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, 488 struct netlink_ext_ack *extack) 489 { 490 struct tcf_block *block = tp->chain->block; 491 struct tc_cls_u32_offload cls_u32 = {}; 492 493 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack); 494 cls_u32.command = TC_CLSU32_DELETE_HNODE; 495 cls_u32.hnode.divisor = h->divisor; 496 cls_u32.hnode.handle = h->handle; 497 cls_u32.hnode.prio = h->prio; 498 499 tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false); 500 } 501 502 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, 503 u32 flags, struct netlink_ext_ack *extack) 504 { 505 struct tcf_block *block = tp->chain->block; 506 struct tc_cls_u32_offload cls_u32 = {}; 507 bool skip_sw = tc_skip_sw(flags); 508 bool offloaded = false; 509 int err; 510 511 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack); 512 cls_u32.command = TC_CLSU32_NEW_HNODE; 513 cls_u32.hnode.divisor = h->divisor; 514 cls_u32.hnode.handle = h->handle; 515 cls_u32.hnode.prio = h->prio; 516 517 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw); 518 if (err < 0) { 519 u32_clear_hw_hnode(tp, h, NULL); 520 return err; 521 } else if (err > 0) { 522 offloaded = true; 523 } 524 525 if (skip_sw && !offloaded) 526 return -EINVAL; 527 528 return 0; 529 } 530 531 static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, 532 struct netlink_ext_ack *extack) 533 { 534 struct tcf_block *block = tp->chain->block; 535 struct tc_cls_u32_offload cls_u32 = {}; 536 537 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); 538 cls_u32.command = TC_CLSU32_DELETE_KNODE; 539 cls_u32.knode.handle = n->handle; 540 541 tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false); 542 tcf_block_offload_dec(block, &n->flags); 543 } 544 545 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, 546 u32 flags, struct netlink_ext_ack *extack) 547 { 548 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 549 struct tcf_block *block = tp->chain->block; 550 struct tc_cls_u32_offload cls_u32 = {}; 551 bool skip_sw = tc_skip_sw(flags); 552 int err; 553 554 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack); 555 cls_u32.command = TC_CLSU32_REPLACE_KNODE; 556 cls_u32.knode.handle = n->handle; 557 cls_u32.knode.fshift = n->fshift; 558 #ifdef CONFIG_CLS_U32_MARK 559 cls_u32.knode.val = n->val; 560 cls_u32.knode.mask = n->mask; 561 #else 562 cls_u32.knode.val = 0; 563 cls_u32.knode.mask = 0; 564 #endif 565 cls_u32.knode.sel = &n->sel; 566 cls_u32.knode.exts = &n->exts; 567 if (n->ht_down) 568 cls_u32.knode.link_handle = ht->handle; 569 570 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw); 571 if (err < 0) { 572 u32_remove_hw_knode(tp, n, NULL); 573 return err; 574 } else if (err > 0) { 575 n->in_hw_count = err; 576 tcf_block_offload_inc(block, &n->flags); 577 } 578 579 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW)) 580 return -EINVAL; 581 582 return 0; 583 } 584 585 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, 586 struct netlink_ext_ack *extack) 587 { 588 struct tc_u_knode *n; 589 unsigned int h; 590 591 for (h = 0; h <= ht->divisor; h++) { 592 while ((n = rtnl_dereference(ht->ht[h])) != NULL) { 593 RCU_INIT_POINTER(ht->ht[h], 594 rtnl_dereference(n->next)); 595 tcf_unbind_filter(tp, &n->res); 596 u32_remove_hw_knode(tp, n, extack); 597 idr_remove(&ht->handle_idr, n->handle); 598 if (tcf_exts_get_net(&n->exts)) 599 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work); 600 else 601 u32_destroy_key(n->tp, n, true); 602 } 603 } 604 } 605 606 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, 607 struct netlink_ext_ack *extack) 608 { 609 struct tc_u_common *tp_c = tp->data; 610 struct tc_u_hnode __rcu **hn; 611 struct tc_u_hnode *phn; 612 613 WARN_ON(ht->refcnt); 614 615 u32_clear_hnode(tp, ht, extack); 616 617 hn = &tp_c->hlist; 618 for (phn = rtnl_dereference(*hn); 619 phn; 620 hn = &phn->next, phn = rtnl_dereference(*hn)) { 621 if (phn == ht) { 622 u32_clear_hw_hnode(tp, ht, extack); 623 idr_destroy(&ht->handle_idr); 624 idr_remove(&tp_c->handle_idr, ht->handle); 625 RCU_INIT_POINTER(*hn, ht->next); 626 kfree_rcu(ht, rcu); 627 return 0; 628 } 629 } 630 631 return -ENOENT; 632 } 633 634 static bool ht_empty(struct tc_u_hnode *ht) 635 { 636 unsigned int h; 637 638 for (h = 0; h <= ht->divisor; h++) 639 if (rcu_access_pointer(ht->ht[h])) 640 return false; 641 642 return true; 643 } 644 645 static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) 646 { 647 struct tc_u_common *tp_c = tp->data; 648 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); 649 650 WARN_ON(root_ht == NULL); 651 652 if (root_ht && --root_ht->refcnt == 0) 653 u32_destroy_hnode(tp, root_ht, extack); 654 655 if (--tp_c->refcnt == 0) { 656 struct tc_u_hnode *ht; 657 658 hlist_del(&tp_c->hnode); 659 660 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) { 661 u32_clear_hnode(tp, ht, extack); 662 RCU_INIT_POINTER(tp_c->hlist, ht->next); 663 664 /* u32_destroy_key() will later free ht for us, if it's 665 * still referenced by some knode 666 */ 667 if (--ht->refcnt == 0) 668 kfree_rcu(ht, rcu); 669 } 670 671 idr_destroy(&tp_c->handle_idr); 672 kfree(tp_c); 673 } 674 675 tp->data = NULL; 676 } 677 678 static int u32_delete(struct tcf_proto *tp, void *arg, bool *last, 679 struct netlink_ext_ack *extack) 680 { 681 struct tc_u_hnode *ht = arg; 682 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); 683 struct tc_u_common *tp_c = tp->data; 684 int ret = 0; 685 686 if (ht == NULL) 687 goto out; 688 689 if (TC_U32_KEY(ht->handle)) { 690 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack); 691 ret = u32_delete_key(tp, (struct tc_u_knode *)ht); 692 goto out; 693 } 694 695 if (root_ht == ht) { 696 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node"); 697 return -EINVAL; 698 } 699 700 if (ht->refcnt == 1) { 701 ht->refcnt--; 702 u32_destroy_hnode(tp, ht, extack); 703 } else { 704 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter"); 705 return -EBUSY; 706 } 707 708 out: 709 *last = true; 710 if (root_ht) { 711 if (root_ht->refcnt > 1) { 712 *last = false; 713 goto ret; 714 } 715 if (root_ht->refcnt == 1) { 716 if (!ht_empty(root_ht)) { 717 *last = false; 718 goto ret; 719 } 720 } 721 } 722 723 if (tp_c->refcnt > 1) { 724 *last = false; 725 goto ret; 726 } 727 728 if (tp_c->refcnt == 1) { 729 struct tc_u_hnode *ht; 730 731 for (ht = rtnl_dereference(tp_c->hlist); 732 ht; 733 ht = rtnl_dereference(ht->next)) 734 if (!ht_empty(ht)) { 735 *last = false; 736 break; 737 } 738 } 739 740 ret: 741 return ret; 742 } 743 744 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid) 745 { 746 u32 index = htid | 0x800; 747 u32 max = htid | 0xFFF; 748 749 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) { 750 index = htid + 1; 751 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, 752 GFP_KERNEL)) 753 index = max; 754 } 755 756 return index; 757 } 758 759 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { 760 [TCA_U32_CLASSID] = { .type = NLA_U32 }, 761 [TCA_U32_HASH] = { .type = NLA_U32 }, 762 [TCA_U32_LINK] = { .type = NLA_U32 }, 763 [TCA_U32_DIVISOR] = { .type = NLA_U32 }, 764 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) }, 765 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ }, 766 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) }, 767 [TCA_U32_FLAGS] = { .type = NLA_U32 }, 768 }; 769 770 static int u32_set_parms(struct net *net, struct tcf_proto *tp, 771 unsigned long base, struct tc_u_hnode *ht, 772 struct tc_u_knode *n, struct nlattr **tb, 773 struct nlattr *est, bool ovr, 774 struct netlink_ext_ack *extack) 775 { 776 int err; 777 778 err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, extack); 779 if (err < 0) 780 return err; 781 782 if (tb[TCA_U32_LINK]) { 783 u32 handle = nla_get_u32(tb[TCA_U32_LINK]); 784 struct tc_u_hnode *ht_down = NULL, *ht_old; 785 786 if (TC_U32_KEY(handle)) { 787 NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table"); 788 return -EINVAL; 789 } 790 791 if (handle) { 792 ht_down = u32_lookup_ht(ht->tp_c, handle); 793 794 if (!ht_down) { 795 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found"); 796 return -EINVAL; 797 } 798 ht_down->refcnt++; 799 } 800 801 ht_old = rtnl_dereference(n->ht_down); 802 rcu_assign_pointer(n->ht_down, ht_down); 803 804 if (ht_old) 805 ht_old->refcnt--; 806 } 807 if (tb[TCA_U32_CLASSID]) { 808 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]); 809 tcf_bind_filter(tp, &n->res, base); 810 } 811 812 #ifdef CONFIG_NET_CLS_IND 813 if (tb[TCA_U32_INDEV]) { 814 int ret; 815 ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack); 816 if (ret < 0) 817 return -EINVAL; 818 n->ifindex = ret; 819 } 820 #endif 821 return 0; 822 } 823 824 static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c, 825 struct tc_u_knode *n) 826 { 827 struct tc_u_knode __rcu **ins; 828 struct tc_u_knode *pins; 829 struct tc_u_hnode *ht; 830 831 if (TC_U32_HTID(n->handle) == TC_U32_ROOT) 832 ht = rtnl_dereference(tp->root); 833 else 834 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle)); 835 836 ins = &ht->ht[TC_U32_HASH(n->handle)]; 837 838 /* The node must always exist for it to be replaced if this is not the 839 * case then something went very wrong elsewhere. 840 */ 841 for (pins = rtnl_dereference(*ins); ; 842 ins = &pins->next, pins = rtnl_dereference(*ins)) 843 if (pins->handle == n->handle) 844 break; 845 846 idr_replace(&ht->handle_idr, n, n->handle); 847 RCU_INIT_POINTER(n->next, pins->next); 848 rcu_assign_pointer(*ins, n); 849 } 850 851 static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp, 852 struct tc_u_knode *n) 853 { 854 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 855 struct tc_u32_sel *s = &n->sel; 856 struct tc_u_knode *new; 857 858 new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), 859 GFP_KERNEL); 860 861 if (!new) 862 return NULL; 863 864 RCU_INIT_POINTER(new->next, n->next); 865 new->handle = n->handle; 866 RCU_INIT_POINTER(new->ht_up, n->ht_up); 867 868 #ifdef CONFIG_NET_CLS_IND 869 new->ifindex = n->ifindex; 870 #endif 871 new->fshift = n->fshift; 872 new->res = n->res; 873 new->flags = n->flags; 874 RCU_INIT_POINTER(new->ht_down, ht); 875 876 /* bump reference count as long as we hold pointer to structure */ 877 if (ht) 878 ht->refcnt++; 879 880 #ifdef CONFIG_CLS_U32_PERF 881 /* Statistics may be incremented by readers during update 882 * so we must keep them in tact. When the node is later destroyed 883 * a special destroy call must be made to not free the pf memory. 884 */ 885 new->pf = n->pf; 886 #endif 887 888 #ifdef CONFIG_CLS_U32_MARK 889 new->val = n->val; 890 new->mask = n->mask; 891 /* Similarly success statistics must be moved as pointers */ 892 new->pcpu_success = n->pcpu_success; 893 #endif 894 new->tp = tp; 895 memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); 896 897 if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) { 898 kfree(new); 899 return NULL; 900 } 901 902 return new; 903 } 904 905 static int u32_change(struct net *net, struct sk_buff *in_skb, 906 struct tcf_proto *tp, unsigned long base, u32 handle, 907 struct nlattr **tca, void **arg, bool ovr, 908 struct netlink_ext_ack *extack) 909 { 910 struct tc_u_common *tp_c = tp->data; 911 struct tc_u_hnode *ht; 912 struct tc_u_knode *n; 913 struct tc_u32_sel *s; 914 struct nlattr *opt = tca[TCA_OPTIONS]; 915 struct nlattr *tb[TCA_U32_MAX + 1]; 916 u32 htid, flags = 0; 917 int err; 918 #ifdef CONFIG_CLS_U32_PERF 919 size_t size; 920 #endif 921 922 if (!opt) { 923 if (handle) { 924 NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options"); 925 return -EINVAL; 926 } else { 927 return 0; 928 } 929 } 930 931 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, extack); 932 if (err < 0) 933 return err; 934 935 if (tb[TCA_U32_FLAGS]) { 936 flags = nla_get_u32(tb[TCA_U32_FLAGS]); 937 if (!tc_flags_valid(flags)) { 938 NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags"); 939 return -EINVAL; 940 } 941 } 942 943 n = *arg; 944 if (n) { 945 struct tc_u_knode *new; 946 947 if (TC_U32_KEY(n->handle) == 0) { 948 NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero"); 949 return -EINVAL; 950 } 951 952 if ((n->flags ^ flags) & 953 ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) { 954 NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags"); 955 return -EINVAL; 956 } 957 958 new = u32_init_knode(tp, n); 959 if (!new) 960 return -ENOMEM; 961 962 err = u32_set_parms(net, tp, base, 963 rtnl_dereference(n->ht_up), new, tb, 964 tca[TCA_RATE], ovr, extack); 965 966 if (err) { 967 u32_destroy_key(tp, new, false); 968 return err; 969 } 970 971 err = u32_replace_hw_knode(tp, new, flags, extack); 972 if (err) { 973 u32_destroy_key(tp, new, false); 974 return err; 975 } 976 977 if (!tc_in_hw(new->flags)) 978 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 979 980 u32_replace_knode(tp, tp_c, new); 981 tcf_unbind_filter(tp, &n->res); 982 tcf_exts_get_net(&n->exts); 983 tcf_queue_work(&n->rwork, u32_delete_key_work); 984 return 0; 985 } 986 987 if (tb[TCA_U32_DIVISOR]) { 988 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); 989 990 if (--divisor > 0x100) { 991 NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets"); 992 return -EINVAL; 993 } 994 if (TC_U32_KEY(handle)) { 995 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table"); 996 return -EINVAL; 997 } 998 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL); 999 if (ht == NULL) 1000 return -ENOBUFS; 1001 if (handle == 0) { 1002 handle = gen_new_htid(tp->data, ht); 1003 if (handle == 0) { 1004 kfree(ht); 1005 return -ENOMEM; 1006 } 1007 } else { 1008 err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle, 1009 handle, GFP_KERNEL); 1010 if (err) { 1011 kfree(ht); 1012 return err; 1013 } 1014 } 1015 ht->tp_c = tp_c; 1016 ht->refcnt = 1; 1017 ht->divisor = divisor; 1018 ht->handle = handle; 1019 ht->prio = tp->prio; 1020 idr_init(&ht->handle_idr); 1021 ht->flags = flags; 1022 1023 err = u32_replace_hw_hnode(tp, ht, flags, extack); 1024 if (err) { 1025 idr_remove(&tp_c->handle_idr, handle); 1026 kfree(ht); 1027 return err; 1028 } 1029 1030 RCU_INIT_POINTER(ht->next, tp_c->hlist); 1031 rcu_assign_pointer(tp_c->hlist, ht); 1032 *arg = ht; 1033 1034 return 0; 1035 } 1036 1037 if (tb[TCA_U32_HASH]) { 1038 htid = nla_get_u32(tb[TCA_U32_HASH]); 1039 if (TC_U32_HTID(htid) == TC_U32_ROOT) { 1040 ht = rtnl_dereference(tp->root); 1041 htid = ht->handle; 1042 } else { 1043 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid)); 1044 if (!ht) { 1045 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found"); 1046 return -EINVAL; 1047 } 1048 } 1049 } else { 1050 ht = rtnl_dereference(tp->root); 1051 htid = ht->handle; 1052 } 1053 1054 if (ht->divisor < TC_U32_HASH(htid)) { 1055 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value"); 1056 return -EINVAL; 1057 } 1058 1059 if (handle) { 1060 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) { 1061 NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch"); 1062 return -EINVAL; 1063 } 1064 handle = htid | TC_U32_NODE(handle); 1065 err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle, 1066 GFP_KERNEL); 1067 if (err) 1068 return err; 1069 } else 1070 handle = gen_new_kid(ht, htid); 1071 1072 if (tb[TCA_U32_SEL] == NULL) { 1073 NL_SET_ERR_MSG_MOD(extack, "Selector not specified"); 1074 err = -EINVAL; 1075 goto erridr; 1076 } 1077 1078 s = nla_data(tb[TCA_U32_SEL]); 1079 1080 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); 1081 if (n == NULL) { 1082 err = -ENOBUFS; 1083 goto erridr; 1084 } 1085 1086 #ifdef CONFIG_CLS_U32_PERF 1087 size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64); 1088 n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt)); 1089 if (!n->pf) { 1090 err = -ENOBUFS; 1091 goto errfree; 1092 } 1093 #endif 1094 1095 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); 1096 RCU_INIT_POINTER(n->ht_up, ht); 1097 n->handle = handle; 1098 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; 1099 n->flags = flags; 1100 n->tp = tp; 1101 1102 err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE); 1103 if (err < 0) 1104 goto errout; 1105 1106 #ifdef CONFIG_CLS_U32_MARK 1107 n->pcpu_success = alloc_percpu(u32); 1108 if (!n->pcpu_success) { 1109 err = -ENOMEM; 1110 goto errout; 1111 } 1112 1113 if (tb[TCA_U32_MARK]) { 1114 struct tc_u32_mark *mark; 1115 1116 mark = nla_data(tb[TCA_U32_MARK]); 1117 n->val = mark->val; 1118 n->mask = mark->mask; 1119 } 1120 #endif 1121 1122 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr, 1123 extack); 1124 if (err == 0) { 1125 struct tc_u_knode __rcu **ins; 1126 struct tc_u_knode *pins; 1127 1128 err = u32_replace_hw_knode(tp, n, flags, extack); 1129 if (err) 1130 goto errhw; 1131 1132 if (!tc_in_hw(n->flags)) 1133 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 1134 1135 ins = &ht->ht[TC_U32_HASH(handle)]; 1136 for (pins = rtnl_dereference(*ins); pins; 1137 ins = &pins->next, pins = rtnl_dereference(*ins)) 1138 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle)) 1139 break; 1140 1141 RCU_INIT_POINTER(n->next, pins); 1142 rcu_assign_pointer(*ins, n); 1143 *arg = n; 1144 return 0; 1145 } 1146 1147 errhw: 1148 #ifdef CONFIG_CLS_U32_MARK 1149 free_percpu(n->pcpu_success); 1150 #endif 1151 1152 errout: 1153 tcf_exts_destroy(&n->exts); 1154 #ifdef CONFIG_CLS_U32_PERF 1155 errfree: 1156 free_percpu(n->pf); 1157 #endif 1158 kfree(n); 1159 erridr: 1160 idr_remove(&ht->handle_idr, handle); 1161 return err; 1162 } 1163 1164 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) 1165 { 1166 struct tc_u_common *tp_c = tp->data; 1167 struct tc_u_hnode *ht; 1168 struct tc_u_knode *n; 1169 unsigned int h; 1170 1171 if (arg->stop) 1172 return; 1173 1174 for (ht = rtnl_dereference(tp_c->hlist); 1175 ht; 1176 ht = rtnl_dereference(ht->next)) { 1177 if (ht->prio != tp->prio) 1178 continue; 1179 if (arg->count >= arg->skip) { 1180 if (arg->fn(tp, ht, arg) < 0) { 1181 arg->stop = 1; 1182 return; 1183 } 1184 } 1185 arg->count++; 1186 for (h = 0; h <= ht->divisor; h++) { 1187 for (n = rtnl_dereference(ht->ht[h]); 1188 n; 1189 n = rtnl_dereference(n->next)) { 1190 if (arg->count < arg->skip) { 1191 arg->count++; 1192 continue; 1193 } 1194 if (arg->fn(tp, n, arg) < 0) { 1195 arg->stop = 1; 1196 return; 1197 } 1198 arg->count++; 1199 } 1200 } 1201 } 1202 } 1203 1204 static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, 1205 bool add, tc_setup_cb_t *cb, void *cb_priv, 1206 struct netlink_ext_ack *extack) 1207 { 1208 struct tc_cls_u32_offload cls_u32 = {}; 1209 int err; 1210 1211 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack); 1212 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE; 1213 cls_u32.hnode.divisor = ht->divisor; 1214 cls_u32.hnode.handle = ht->handle; 1215 cls_u32.hnode.prio = ht->prio; 1216 1217 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv); 1218 if (err && add && tc_skip_sw(ht->flags)) 1219 return err; 1220 1221 return 0; 1222 } 1223 1224 static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, 1225 bool add, tc_setup_cb_t *cb, void *cb_priv, 1226 struct netlink_ext_ack *extack) 1227 { 1228 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 1229 struct tcf_block *block = tp->chain->block; 1230 struct tc_cls_u32_offload cls_u32 = {}; 1231 int err; 1232 1233 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); 1234 cls_u32.command = add ? 1235 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE; 1236 cls_u32.knode.handle = n->handle; 1237 1238 if (add) { 1239 cls_u32.knode.fshift = n->fshift; 1240 #ifdef CONFIG_CLS_U32_MARK 1241 cls_u32.knode.val = n->val; 1242 cls_u32.knode.mask = n->mask; 1243 #else 1244 cls_u32.knode.val = 0; 1245 cls_u32.knode.mask = 0; 1246 #endif 1247 cls_u32.knode.sel = &n->sel; 1248 cls_u32.knode.exts = &n->exts; 1249 if (n->ht_down) 1250 cls_u32.knode.link_handle = ht->handle; 1251 } 1252 1253 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv); 1254 if (err) { 1255 if (add && tc_skip_sw(n->flags)) 1256 return err; 1257 return 0; 1258 } 1259 1260 tc_cls_offload_cnt_update(block, &n->in_hw_count, &n->flags, add); 1261 1262 return 0; 1263 } 1264 1265 static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 1266 void *cb_priv, struct netlink_ext_ack *extack) 1267 { 1268 struct tc_u_common *tp_c = tp->data; 1269 struct tc_u_hnode *ht; 1270 struct tc_u_knode *n; 1271 unsigned int h; 1272 int err; 1273 1274 for (ht = rtnl_dereference(tp_c->hlist); 1275 ht; 1276 ht = rtnl_dereference(ht->next)) { 1277 if (ht->prio != tp->prio) 1278 continue; 1279 1280 /* When adding filters to a new dev, try to offload the 1281 * hashtable first. When removing, do the filters before the 1282 * hashtable. 1283 */ 1284 if (add && !tc_skip_hw(ht->flags)) { 1285 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv, 1286 extack); 1287 if (err) 1288 return err; 1289 } 1290 1291 for (h = 0; h <= ht->divisor; h++) { 1292 for (n = rtnl_dereference(ht->ht[h]); 1293 n; 1294 n = rtnl_dereference(n->next)) { 1295 if (tc_skip_hw(n->flags)) 1296 continue; 1297 1298 err = u32_reoffload_knode(tp, n, add, cb, 1299 cb_priv, extack); 1300 if (err) 1301 return err; 1302 } 1303 } 1304 1305 if (!add && !tc_skip_hw(ht->flags)) 1306 u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack); 1307 } 1308 1309 return 0; 1310 } 1311 1312 static void u32_bind_class(void *fh, u32 classid, unsigned long cl) 1313 { 1314 struct tc_u_knode *n = fh; 1315 1316 if (n && n->res.classid == classid) 1317 n->res.class = cl; 1318 } 1319 1320 static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh, 1321 struct sk_buff *skb, struct tcmsg *t) 1322 { 1323 struct tc_u_knode *n = fh; 1324 struct tc_u_hnode *ht_up, *ht_down; 1325 struct nlattr *nest; 1326 1327 if (n == NULL) 1328 return skb->len; 1329 1330 t->tcm_handle = n->handle; 1331 1332 nest = nla_nest_start(skb, TCA_OPTIONS); 1333 if (nest == NULL) 1334 goto nla_put_failure; 1335 1336 if (TC_U32_KEY(n->handle) == 0) { 1337 struct tc_u_hnode *ht = fh; 1338 u32 divisor = ht->divisor + 1; 1339 1340 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor)) 1341 goto nla_put_failure; 1342 } else { 1343 #ifdef CONFIG_CLS_U32_PERF 1344 struct tc_u32_pcnt *gpf; 1345 int cpu; 1346 #endif 1347 1348 if (nla_put(skb, TCA_U32_SEL, 1349 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), 1350 &n->sel)) 1351 goto nla_put_failure; 1352 1353 ht_up = rtnl_dereference(n->ht_up); 1354 if (ht_up) { 1355 u32 htid = n->handle & 0xFFFFF000; 1356 if (nla_put_u32(skb, TCA_U32_HASH, htid)) 1357 goto nla_put_failure; 1358 } 1359 if (n->res.classid && 1360 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid)) 1361 goto nla_put_failure; 1362 1363 ht_down = rtnl_dereference(n->ht_down); 1364 if (ht_down && 1365 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle)) 1366 goto nla_put_failure; 1367 1368 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags)) 1369 goto nla_put_failure; 1370 1371 #ifdef CONFIG_CLS_U32_MARK 1372 if ((n->val || n->mask)) { 1373 struct tc_u32_mark mark = {.val = n->val, 1374 .mask = n->mask, 1375 .success = 0}; 1376 int cpum; 1377 1378 for_each_possible_cpu(cpum) { 1379 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum); 1380 1381 mark.success += cnt; 1382 } 1383 1384 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark)) 1385 goto nla_put_failure; 1386 } 1387 #endif 1388 1389 if (tcf_exts_dump(skb, &n->exts) < 0) 1390 goto nla_put_failure; 1391 1392 #ifdef CONFIG_NET_CLS_IND 1393 if (n->ifindex) { 1394 struct net_device *dev; 1395 dev = __dev_get_by_index(net, n->ifindex); 1396 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name)) 1397 goto nla_put_failure; 1398 } 1399 #endif 1400 #ifdef CONFIG_CLS_U32_PERF 1401 gpf = kzalloc(sizeof(struct tc_u32_pcnt) + 1402 n->sel.nkeys * sizeof(u64), 1403 GFP_KERNEL); 1404 if (!gpf) 1405 goto nla_put_failure; 1406 1407 for_each_possible_cpu(cpu) { 1408 int i; 1409 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu); 1410 1411 gpf->rcnt += pf->rcnt; 1412 gpf->rhit += pf->rhit; 1413 for (i = 0; i < n->sel.nkeys; i++) 1414 gpf->kcnts[i] += pf->kcnts[i]; 1415 } 1416 1417 if (nla_put_64bit(skb, TCA_U32_PCNT, 1418 sizeof(struct tc_u32_pcnt) + 1419 n->sel.nkeys * sizeof(u64), 1420 gpf, TCA_U32_PAD)) { 1421 kfree(gpf); 1422 goto nla_put_failure; 1423 } 1424 kfree(gpf); 1425 #endif 1426 } 1427 1428 nla_nest_end(skb, nest); 1429 1430 if (TC_U32_KEY(n->handle)) 1431 if (tcf_exts_dump_stats(skb, &n->exts) < 0) 1432 goto nla_put_failure; 1433 return skb->len; 1434 1435 nla_put_failure: 1436 nla_nest_cancel(skb, nest); 1437 return -1; 1438 } 1439 1440 static struct tcf_proto_ops cls_u32_ops __read_mostly = { 1441 .kind = "u32", 1442 .classify = u32_classify, 1443 .init = u32_init, 1444 .destroy = u32_destroy, 1445 .get = u32_get, 1446 .change = u32_change, 1447 .delete = u32_delete, 1448 .walk = u32_walk, 1449 .reoffload = u32_reoffload, 1450 .dump = u32_dump, 1451 .bind_class = u32_bind_class, 1452 .owner = THIS_MODULE, 1453 }; 1454 1455 static int __init init_u32(void) 1456 { 1457 int i, ret; 1458 1459 pr_info("u32 classifier\n"); 1460 #ifdef CONFIG_CLS_U32_PERF 1461 pr_info(" Performance counters on\n"); 1462 #endif 1463 #ifdef CONFIG_NET_CLS_IND 1464 pr_info(" input device check on\n"); 1465 #endif 1466 #ifdef CONFIG_NET_CLS_ACT 1467 pr_info(" Actions configured\n"); 1468 #endif 1469 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE, 1470 sizeof(struct hlist_head), 1471 GFP_KERNEL); 1472 if (!tc_u_common_hash) 1473 return -ENOMEM; 1474 1475 for (i = 0; i < U32_HASH_SIZE; i++) 1476 INIT_HLIST_HEAD(&tc_u_common_hash[i]); 1477 1478 ret = register_tcf_proto_ops(&cls_u32_ops); 1479 if (ret) 1480 kvfree(tc_u_common_hash); 1481 return ret; 1482 } 1483 1484 static void __exit exit_u32(void) 1485 { 1486 unregister_tcf_proto_ops(&cls_u32_ops); 1487 kvfree(tc_u_common_hash); 1488 } 1489 1490 module_init(init_u32) 1491 module_exit(exit_u32) 1492 MODULE_LICENSE("GPL"); 1493