1 /* 2 * net/sched/cls_route.c ROUTE4 classifier. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/skbuff.h> 19 #include <net/dst.h> 20 #include <net/route.h> 21 #include <net/netlink.h> 22 #include <net/act_api.h> 23 #include <net/pkt_cls.h> 24 25 /* 26 * 1. For now we assume that route tags < 256. 27 * It allows to use direct table lookups, instead of hash tables. 28 * 2. For now we assume that "from TAG" and "fromdev DEV" statements 29 * are mutually exclusive. 30 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX" 31 */ 32 struct route4_fastmap { 33 struct route4_filter *filter; 34 u32 id; 35 int iif; 36 }; 37 38 struct route4_head { 39 struct route4_fastmap fastmap[16]; 40 struct route4_bucket __rcu *table[256 + 1]; 41 struct rcu_head rcu; 42 }; 43 44 struct route4_bucket { 45 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */ 46 struct route4_filter __rcu *ht[16 + 16 + 1]; 47 struct rcu_head rcu; 48 }; 49 50 struct route4_filter { 51 struct route4_filter __rcu *next; 52 u32 id; 53 int iif; 54 55 struct tcf_result res; 56 struct tcf_exts exts; 57 u32 handle; 58 struct route4_bucket *bkt; 59 struct tcf_proto *tp; 60 struct rcu_head rcu; 61 }; 62 63 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) 64 65 static inline int route4_fastmap_hash(u32 id, int iif) 66 { 67 return id & 0xF; 68 } 69 70 static DEFINE_SPINLOCK(fastmap_lock); 71 static void 72 route4_reset_fastmap(struct route4_head *head) 73 { 74 spin_lock_bh(&fastmap_lock); 75 memset(head->fastmap, 0, sizeof(head->fastmap)); 76 spin_unlock_bh(&fastmap_lock); 77 } 78 79 static void 80 route4_set_fastmap(struct route4_head *head, u32 id, int iif, 81 struct route4_filter *f) 82 { 83 int h = route4_fastmap_hash(id, iif); 84 85 /* fastmap updates must look atomic to aling id, iff, filter */ 86 spin_lock_bh(&fastmap_lock); 87 head->fastmap[h].id = id; 88 head->fastmap[h].iif = iif; 89 head->fastmap[h].filter = f; 90 spin_unlock_bh(&fastmap_lock); 91 } 92 93 static inline int route4_hash_to(u32 id) 94 { 95 return id & 0xFF; 96 } 97 98 static inline int route4_hash_from(u32 id) 99 { 100 return (id >> 16) & 0xF; 101 } 102 103 static inline int route4_hash_iif(int iif) 104 { 105 return 16 + ((iif >> 16) & 0xF); 106 } 107 108 static inline int route4_hash_wild(void) 109 { 110 return 32; 111 } 112 113 #define ROUTE4_APPLY_RESULT() \ 114 { \ 115 *res = f->res; \ 116 if (tcf_exts_is_available(&f->exts)) { \ 117 int r = tcf_exts_exec(skb, &f->exts, res); \ 118 if (r < 0) { \ 119 dont_cache = 1; \ 120 continue; \ 121 } \ 122 return r; \ 123 } else if (!dont_cache) \ 124 route4_set_fastmap(head, id, iif, f); \ 125 return 0; \ 126 } 127 128 static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp, 129 struct tcf_result *res) 130 { 131 struct route4_head *head = rcu_dereference_bh(tp->root); 132 struct dst_entry *dst; 133 struct route4_bucket *b; 134 struct route4_filter *f; 135 u32 id, h; 136 int iif, dont_cache = 0; 137 138 dst = skb_dst(skb); 139 if (!dst) 140 goto failure; 141 142 id = dst->tclassid; 143 144 iif = inet_iif(skb); 145 146 h = route4_fastmap_hash(id, iif); 147 148 spin_lock(&fastmap_lock); 149 if (id == head->fastmap[h].id && 150 iif == head->fastmap[h].iif && 151 (f = head->fastmap[h].filter) != NULL) { 152 if (f == ROUTE4_FAILURE) { 153 spin_unlock(&fastmap_lock); 154 goto failure; 155 } 156 157 *res = f->res; 158 spin_unlock(&fastmap_lock); 159 return 0; 160 } 161 spin_unlock(&fastmap_lock); 162 163 h = route4_hash_to(id); 164 165 restart: 166 b = rcu_dereference_bh(head->table[h]); 167 if (b) { 168 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]); 169 f; 170 f = rcu_dereference_bh(f->next)) 171 if (f->id == id) 172 ROUTE4_APPLY_RESULT(); 173 174 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]); 175 f; 176 f = rcu_dereference_bh(f->next)) 177 if (f->iif == iif) 178 ROUTE4_APPLY_RESULT(); 179 180 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]); 181 f; 182 f = rcu_dereference_bh(f->next)) 183 ROUTE4_APPLY_RESULT(); 184 } 185 if (h < 256) { 186 h = 256; 187 id &= ~0xFFFF; 188 goto restart; 189 } 190 191 if (!dont_cache) 192 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE); 193 failure: 194 return -1; 195 } 196 197 static inline u32 to_hash(u32 id) 198 { 199 u32 h = id & 0xFF; 200 201 if (id & 0x8000) 202 h += 256; 203 return h; 204 } 205 206 static inline u32 from_hash(u32 id) 207 { 208 id &= 0xFFFF; 209 if (id == 0xFFFF) 210 return 32; 211 if (!(id & 0x8000)) { 212 if (id > 255) 213 return 256; 214 return id & 0xF; 215 } 216 return 16 + (id & 0xF); 217 } 218 219 static unsigned long route4_get(struct tcf_proto *tp, u32 handle) 220 { 221 struct route4_head *head = rtnl_dereference(tp->root); 222 struct route4_bucket *b; 223 struct route4_filter *f; 224 unsigned int h1, h2; 225 226 h1 = to_hash(handle); 227 if (h1 > 256) 228 return 0; 229 230 h2 = from_hash(handle >> 16); 231 if (h2 > 32) 232 return 0; 233 234 b = rtnl_dereference(head->table[h1]); 235 if (b) { 236 for (f = rtnl_dereference(b->ht[h2]); 237 f; 238 f = rtnl_dereference(f->next)) 239 if (f->handle == handle) 240 return (unsigned long)f; 241 } 242 return 0; 243 } 244 245 static int route4_init(struct tcf_proto *tp) 246 { 247 struct route4_head *head; 248 249 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL); 250 if (head == NULL) 251 return -ENOBUFS; 252 253 rcu_assign_pointer(tp->root, head); 254 return 0; 255 } 256 257 static void route4_delete_filter(struct rcu_head *head) 258 { 259 struct route4_filter *f = container_of(head, struct route4_filter, rcu); 260 261 tcf_exts_destroy(&f->exts); 262 kfree(f); 263 } 264 265 static void route4_destroy(struct tcf_proto *tp) 266 { 267 struct route4_head *head = rtnl_dereference(tp->root); 268 int h1, h2; 269 270 if (head == NULL) 271 return; 272 273 for (h1 = 0; h1 <= 256; h1++) { 274 struct route4_bucket *b; 275 276 b = rtnl_dereference(head->table[h1]); 277 if (b) { 278 for (h2 = 0; h2 <= 32; h2++) { 279 struct route4_filter *f; 280 281 while ((f = rtnl_dereference(b->ht[h2])) != NULL) { 282 struct route4_filter *next; 283 284 next = rtnl_dereference(f->next); 285 RCU_INIT_POINTER(b->ht[h2], next); 286 tcf_unbind_filter(tp, &f->res); 287 call_rcu(&f->rcu, route4_delete_filter); 288 } 289 } 290 RCU_INIT_POINTER(head->table[h1], NULL); 291 kfree_rcu(b, rcu); 292 } 293 } 294 kfree_rcu(head, rcu); 295 } 296 297 static int route4_delete(struct tcf_proto *tp, unsigned long arg, bool *last) 298 { 299 struct route4_head *head = rtnl_dereference(tp->root); 300 struct route4_filter *f = (struct route4_filter *)arg; 301 struct route4_filter __rcu **fp; 302 struct route4_filter *nf; 303 struct route4_bucket *b; 304 unsigned int h = 0; 305 int i, h1; 306 307 if (!head || !f) 308 return -EINVAL; 309 310 h = f->handle; 311 b = f->bkt; 312 313 fp = &b->ht[from_hash(h >> 16)]; 314 for (nf = rtnl_dereference(*fp); nf; 315 fp = &nf->next, nf = rtnl_dereference(*fp)) { 316 if (nf == f) { 317 /* unlink it */ 318 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); 319 320 /* Remove any fastmap lookups that might ref filter 321 * notice we unlink'd the filter so we can't get it 322 * back in the fastmap. 323 */ 324 route4_reset_fastmap(head); 325 326 /* Delete it */ 327 tcf_unbind_filter(tp, &f->res); 328 call_rcu(&f->rcu, route4_delete_filter); 329 330 /* Strip RTNL protected tree */ 331 for (i = 0; i <= 32; i++) { 332 struct route4_filter *rt; 333 334 rt = rtnl_dereference(b->ht[i]); 335 if (rt) 336 goto out; 337 } 338 339 /* OK, session has no flows */ 340 RCU_INIT_POINTER(head->table[to_hash(h)], NULL); 341 kfree_rcu(b, rcu); 342 break; 343 } 344 } 345 346 out: 347 *last = true; 348 for (h1 = 0; h1 <= 256; h1++) { 349 if (rcu_access_pointer(head->table[h1])) { 350 *last = false; 351 break; 352 } 353 } 354 355 return 0; 356 } 357 358 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = { 359 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 }, 360 [TCA_ROUTE4_TO] = { .type = NLA_U32 }, 361 [TCA_ROUTE4_FROM] = { .type = NLA_U32 }, 362 [TCA_ROUTE4_IIF] = { .type = NLA_U32 }, 363 }; 364 365 static int route4_set_parms(struct net *net, struct tcf_proto *tp, 366 unsigned long base, struct route4_filter *f, 367 u32 handle, struct route4_head *head, 368 struct nlattr **tb, struct nlattr *est, int new, 369 bool ovr) 370 { 371 u32 id = 0, to = 0, nhandle = 0x8000; 372 struct route4_filter *fp; 373 unsigned int h1; 374 struct route4_bucket *b; 375 struct tcf_exts e; 376 int err; 377 378 err = tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); 379 if (err < 0) 380 return err; 381 err = tcf_exts_validate(net, tp, tb, est, &e, ovr); 382 if (err < 0) 383 goto errout; 384 385 err = -EINVAL; 386 if (tb[TCA_ROUTE4_TO]) { 387 if (new && handle & 0x8000) 388 goto errout; 389 to = nla_get_u32(tb[TCA_ROUTE4_TO]); 390 if (to > 0xFF) 391 goto errout; 392 nhandle = to; 393 } 394 395 if (tb[TCA_ROUTE4_FROM]) { 396 if (tb[TCA_ROUTE4_IIF]) 397 goto errout; 398 id = nla_get_u32(tb[TCA_ROUTE4_FROM]); 399 if (id > 0xFF) 400 goto errout; 401 nhandle |= id << 16; 402 } else if (tb[TCA_ROUTE4_IIF]) { 403 id = nla_get_u32(tb[TCA_ROUTE4_IIF]); 404 if (id > 0x7FFF) 405 goto errout; 406 nhandle |= (id | 0x8000) << 16; 407 } else 408 nhandle |= 0xFFFF << 16; 409 410 if (handle && new) { 411 nhandle |= handle & 0x7F00; 412 if (nhandle != handle) 413 goto errout; 414 } 415 416 h1 = to_hash(nhandle); 417 b = rtnl_dereference(head->table[h1]); 418 if (!b) { 419 err = -ENOBUFS; 420 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); 421 if (b == NULL) 422 goto errout; 423 424 rcu_assign_pointer(head->table[h1], b); 425 } else { 426 unsigned int h2 = from_hash(nhandle >> 16); 427 428 err = -EEXIST; 429 for (fp = rtnl_dereference(b->ht[h2]); 430 fp; 431 fp = rtnl_dereference(fp->next)) 432 if (fp->handle == f->handle) 433 goto errout; 434 } 435 436 if (tb[TCA_ROUTE4_TO]) 437 f->id = to; 438 439 if (tb[TCA_ROUTE4_FROM]) 440 f->id = to | id<<16; 441 else if (tb[TCA_ROUTE4_IIF]) 442 f->iif = id; 443 444 f->handle = nhandle; 445 f->bkt = b; 446 f->tp = tp; 447 448 if (tb[TCA_ROUTE4_CLASSID]) { 449 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]); 450 tcf_bind_filter(tp, &f->res, base); 451 } 452 453 tcf_exts_change(tp, &f->exts, &e); 454 455 return 0; 456 errout: 457 tcf_exts_destroy(&e); 458 return err; 459 } 460 461 static int route4_change(struct net *net, struct sk_buff *in_skb, 462 struct tcf_proto *tp, unsigned long base, u32 handle, 463 struct nlattr **tca, unsigned long *arg, bool ovr) 464 { 465 struct route4_head *head = rtnl_dereference(tp->root); 466 struct route4_filter __rcu **fp; 467 struct route4_filter *fold, *f1, *pfp, *f = NULL; 468 struct route4_bucket *b; 469 struct nlattr *opt = tca[TCA_OPTIONS]; 470 struct nlattr *tb[TCA_ROUTE4_MAX + 1]; 471 unsigned int h, th; 472 int err; 473 bool new = true; 474 475 if (opt == NULL) 476 return handle ? -EINVAL : 0; 477 478 err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy, NULL); 479 if (err < 0) 480 return err; 481 482 fold = (struct route4_filter *)*arg; 483 if (fold && handle && fold->handle != handle) 484 return -EINVAL; 485 486 err = -ENOBUFS; 487 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL); 488 if (!f) 489 goto errout; 490 491 err = tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); 492 if (err < 0) 493 goto errout; 494 495 if (fold) { 496 f->id = fold->id; 497 f->iif = fold->iif; 498 f->res = fold->res; 499 f->handle = fold->handle; 500 501 f->tp = fold->tp; 502 f->bkt = fold->bkt; 503 new = false; 504 } 505 506 err = route4_set_parms(net, tp, base, f, handle, head, tb, 507 tca[TCA_RATE], new, ovr); 508 if (err < 0) 509 goto errout; 510 511 h = from_hash(f->handle >> 16); 512 fp = &f->bkt->ht[h]; 513 for (pfp = rtnl_dereference(*fp); 514 (f1 = rtnl_dereference(*fp)) != NULL; 515 fp = &f1->next) 516 if (f->handle < f1->handle) 517 break; 518 519 netif_keep_dst(qdisc_dev(tp->q)); 520 rcu_assign_pointer(f->next, f1); 521 rcu_assign_pointer(*fp, f); 522 523 if (fold && fold->handle && f->handle != fold->handle) { 524 th = to_hash(fold->handle); 525 h = from_hash(fold->handle >> 16); 526 b = rtnl_dereference(head->table[th]); 527 if (b) { 528 fp = &b->ht[h]; 529 for (pfp = rtnl_dereference(*fp); pfp; 530 fp = &pfp->next, pfp = rtnl_dereference(*fp)) { 531 if (pfp == f) { 532 *fp = f->next; 533 break; 534 } 535 } 536 } 537 } 538 539 route4_reset_fastmap(head); 540 *arg = (unsigned long)f; 541 if (fold) { 542 tcf_unbind_filter(tp, &fold->res); 543 call_rcu(&fold->rcu, route4_delete_filter); 544 } 545 return 0; 546 547 errout: 548 if (f) 549 tcf_exts_destroy(&f->exts); 550 kfree(f); 551 return err; 552 } 553 554 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) 555 { 556 struct route4_head *head = rtnl_dereference(tp->root); 557 unsigned int h, h1; 558 559 if (head == NULL) 560 arg->stop = 1; 561 562 if (arg->stop) 563 return; 564 565 for (h = 0; h <= 256; h++) { 566 struct route4_bucket *b = rtnl_dereference(head->table[h]); 567 568 if (b) { 569 for (h1 = 0; h1 <= 32; h1++) { 570 struct route4_filter *f; 571 572 for (f = rtnl_dereference(b->ht[h1]); 573 f; 574 f = rtnl_dereference(f->next)) { 575 if (arg->count < arg->skip) { 576 arg->count++; 577 continue; 578 } 579 if (arg->fn(tp, (unsigned long)f, arg) < 0) { 580 arg->stop = 1; 581 return; 582 } 583 arg->count++; 584 } 585 } 586 } 587 } 588 } 589 590 static int route4_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, 591 struct sk_buff *skb, struct tcmsg *t) 592 { 593 struct route4_filter *f = (struct route4_filter *)fh; 594 struct nlattr *nest; 595 u32 id; 596 597 if (f == NULL) 598 return skb->len; 599 600 t->tcm_handle = f->handle; 601 602 nest = nla_nest_start(skb, TCA_OPTIONS); 603 if (nest == NULL) 604 goto nla_put_failure; 605 606 if (!(f->handle & 0x8000)) { 607 id = f->id & 0xFF; 608 if (nla_put_u32(skb, TCA_ROUTE4_TO, id)) 609 goto nla_put_failure; 610 } 611 if (f->handle & 0x80000000) { 612 if ((f->handle >> 16) != 0xFFFF && 613 nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif)) 614 goto nla_put_failure; 615 } else { 616 id = f->id >> 16; 617 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id)) 618 goto nla_put_failure; 619 } 620 if (f->res.classid && 621 nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid)) 622 goto nla_put_failure; 623 624 if (tcf_exts_dump(skb, &f->exts) < 0) 625 goto nla_put_failure; 626 627 nla_nest_end(skb, nest); 628 629 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 630 goto nla_put_failure; 631 632 return skb->len; 633 634 nla_put_failure: 635 nla_nest_cancel(skb, nest); 636 return -1; 637 } 638 639 static struct tcf_proto_ops cls_route4_ops __read_mostly = { 640 .kind = "route", 641 .classify = route4_classify, 642 .init = route4_init, 643 .destroy = route4_destroy, 644 .get = route4_get, 645 .change = route4_change, 646 .delete = route4_delete, 647 .walk = route4_walk, 648 .dump = route4_dump, 649 .owner = THIS_MODULE, 650 }; 651 652 static int __init init_route4(void) 653 { 654 return register_tcf_proto_ops(&cls_route4_ops); 655 } 656 657 static void __exit exit_route4(void) 658 { 659 unregister_tcf_proto_ops(&cls_route4_ops); 660 } 661 662 module_init(init_route4) 663 module_exit(exit_route4) 664 MODULE_LICENSE("GPL"); 665