1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/em_meta.c Metadata ematch 4 * 5 * Authors: Thomas Graf <tgraf@suug.ch> 6 * 7 * ========================================================================== 8 * 9 * The metadata ematch compares two meta objects where each object 10 * represents either a meta value stored in the kernel or a static 11 * value provided by userspace. The objects are not provided by 12 * userspace itself but rather a definition providing the information 13 * to build them. Every object is of a certain type which must be 14 * equal to the object it is being compared to. 15 * 16 * The definition of a objects conists of the type (meta type), a 17 * identifier (meta id) and additional type specific information. 18 * The meta id is either TCF_META_TYPE_VALUE for values provided by 19 * userspace or a index to the meta operations table consisting of 20 * function pointers to type specific meta data collectors returning 21 * the value of the requested meta value. 22 * 23 * lvalue rvalue 24 * +-----------+ +-----------+ 25 * | type: INT | | type: INT | 26 * def | id: DEV | | id: VALUE | 27 * | data: | | data: 3 | 28 * +-----------+ +-----------+ 29 * | | 30 * ---> meta_ops[INT][DEV](...) | 31 * | | 32 * ----------- | 33 * V V 34 * +-----------+ +-----------+ 35 * | type: INT | | type: INT | 36 * obj | id: DEV | | id: VALUE | 37 * | data: 2 |<--data got filled out | data: 3 | 38 * +-----------+ +-----------+ 39 * | | 40 * --------------> 2 equals 3 <-------------- 41 * 42 * This is a simplified schema, the complexity varies depending 43 * on the meta type. Obviously, the length of the data must also 44 * be provided for non-numeric types. 45 * 46 * Additionally, type dependent modifiers such as shift operators 47 * or mask may be applied to extend the functionaliy. As of now, 48 * the variable length type supports shifting the byte string to 49 * the right, eating up any number of octets and thus supporting 50 * wildcard interface name comparisons such as "ppp%" matching 51 * ppp0..9. 52 * 53 * NOTE: Certain meta values depend on other subsystems and are 54 * only available if that subsystem is enabled in the kernel. 55 */ 56 57 #include <linux/slab.h> 58 #include <linux/module.h> 59 #include <linux/types.h> 60 #include <linux/kernel.h> 61 #include <linux/sched.h> 62 #include <linux/sched/loadavg.h> 63 #include <linux/string.h> 64 #include <linux/skbuff.h> 65 #include <linux/random.h> 66 #include <linux/if_vlan.h> 67 #include <linux/tc_ematch/tc_em_meta.h> 68 #include <net/dst.h> 69 #include <net/route.h> 70 #include <net/pkt_cls.h> 71 #include <net/sock.h> 72 73 struct meta_obj { 74 unsigned long value; 75 unsigned int len; 76 }; 77 78 struct meta_value { 79 struct tcf_meta_val hdr; 80 unsigned long val; 81 unsigned int len; 82 }; 83 84 struct meta_match { 85 struct meta_value lvalue; 86 struct meta_value rvalue; 87 }; 88 89 static inline int meta_id(struct meta_value *v) 90 { 91 return TCF_META_ID(v->hdr.kind); 92 } 93 94 static inline int meta_type(struct meta_value *v) 95 { 96 return TCF_META_TYPE(v->hdr.kind); 97 } 98 99 #define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \ 100 struct tcf_pkt_info *info, struct meta_value *v, \ 101 struct meta_obj *dst, int *err) 102 103 /************************************************************************** 104 * System status & misc 105 **************************************************************************/ 106 107 META_COLLECTOR(int_random) 108 { 109 get_random_bytes(&dst->value, sizeof(dst->value)); 110 } 111 112 static inline unsigned long fixed_loadavg(int load) 113 { 114 int rnd_load = load + (FIXED_1/200); 115 int rnd_frac = ((rnd_load & (FIXED_1-1)) * 100) >> FSHIFT; 116 117 return ((rnd_load >> FSHIFT) * 100) + rnd_frac; 118 } 119 120 META_COLLECTOR(int_loadavg_0) 121 { 122 dst->value = fixed_loadavg(avenrun[0]); 123 } 124 125 META_COLLECTOR(int_loadavg_1) 126 { 127 dst->value = fixed_loadavg(avenrun[1]); 128 } 129 130 META_COLLECTOR(int_loadavg_2) 131 { 132 dst->value = fixed_loadavg(avenrun[2]); 133 } 134 135 /************************************************************************** 136 * Device names & indices 137 **************************************************************************/ 138 139 static inline int int_dev(struct net_device *dev, struct meta_obj *dst) 140 { 141 if (unlikely(dev == NULL)) 142 return -1; 143 144 dst->value = dev->ifindex; 145 return 0; 146 } 147 148 static inline int var_dev(struct net_device *dev, struct meta_obj *dst) 149 { 150 if (unlikely(dev == NULL)) 151 return -1; 152 153 dst->value = (unsigned long) dev->name; 154 dst->len = strlen(dev->name); 155 return 0; 156 } 157 158 META_COLLECTOR(int_dev) 159 { 160 *err = int_dev(skb->dev, dst); 161 } 162 163 META_COLLECTOR(var_dev) 164 { 165 *err = var_dev(skb->dev, dst); 166 } 167 168 /************************************************************************** 169 * vlan tag 170 **************************************************************************/ 171 172 META_COLLECTOR(int_vlan_tag) 173 { 174 unsigned short tag; 175 176 if (skb_vlan_tag_present(skb)) 177 dst->value = skb_vlan_tag_get(skb); 178 else if (!__vlan_get_tag(skb, &tag)) 179 dst->value = tag; 180 else 181 *err = -1; 182 } 183 184 185 186 /************************************************************************** 187 * skb attributes 188 **************************************************************************/ 189 190 META_COLLECTOR(int_priority) 191 { 192 dst->value = skb->priority; 193 } 194 195 META_COLLECTOR(int_protocol) 196 { 197 /* Let userspace take care of the byte ordering */ 198 dst->value = tc_skb_protocol(skb); 199 } 200 201 META_COLLECTOR(int_pkttype) 202 { 203 dst->value = skb->pkt_type; 204 } 205 206 META_COLLECTOR(int_pktlen) 207 { 208 dst->value = skb->len; 209 } 210 211 META_COLLECTOR(int_datalen) 212 { 213 dst->value = skb->data_len; 214 } 215 216 META_COLLECTOR(int_maclen) 217 { 218 dst->value = skb->mac_len; 219 } 220 221 META_COLLECTOR(int_rxhash) 222 { 223 dst->value = skb_get_hash(skb); 224 } 225 226 /************************************************************************** 227 * Netfilter 228 **************************************************************************/ 229 230 META_COLLECTOR(int_mark) 231 { 232 dst->value = skb->mark; 233 } 234 235 /************************************************************************** 236 * Traffic Control 237 **************************************************************************/ 238 239 META_COLLECTOR(int_tcindex) 240 { 241 dst->value = skb->tc_index; 242 } 243 244 /************************************************************************** 245 * Routing 246 **************************************************************************/ 247 248 META_COLLECTOR(int_rtclassid) 249 { 250 if (unlikely(skb_dst(skb) == NULL)) 251 *err = -1; 252 else 253 #ifdef CONFIG_IP_ROUTE_CLASSID 254 dst->value = skb_dst(skb)->tclassid; 255 #else 256 dst->value = 0; 257 #endif 258 } 259 260 META_COLLECTOR(int_rtiif) 261 { 262 if (unlikely(skb_rtable(skb) == NULL)) 263 *err = -1; 264 else 265 dst->value = inet_iif(skb); 266 } 267 268 /************************************************************************** 269 * Socket Attributes 270 **************************************************************************/ 271 272 #define skip_nonlocal(skb) \ 273 (unlikely(skb->sk == NULL)) 274 275 META_COLLECTOR(int_sk_family) 276 { 277 if (skip_nonlocal(skb)) { 278 *err = -1; 279 return; 280 } 281 dst->value = skb->sk->sk_family; 282 } 283 284 META_COLLECTOR(int_sk_state) 285 { 286 if (skip_nonlocal(skb)) { 287 *err = -1; 288 return; 289 } 290 dst->value = skb->sk->sk_state; 291 } 292 293 META_COLLECTOR(int_sk_reuse) 294 { 295 if (skip_nonlocal(skb)) { 296 *err = -1; 297 return; 298 } 299 dst->value = skb->sk->sk_reuse; 300 } 301 302 META_COLLECTOR(int_sk_bound_if) 303 { 304 if (skip_nonlocal(skb)) { 305 *err = -1; 306 return; 307 } 308 /* No error if bound_dev_if is 0, legal userspace check */ 309 dst->value = skb->sk->sk_bound_dev_if; 310 } 311 312 META_COLLECTOR(var_sk_bound_if) 313 { 314 if (skip_nonlocal(skb)) { 315 *err = -1; 316 return; 317 } 318 319 if (skb->sk->sk_bound_dev_if == 0) { 320 dst->value = (unsigned long) "any"; 321 dst->len = 3; 322 } else { 323 struct net_device *dev; 324 325 rcu_read_lock(); 326 dev = dev_get_by_index_rcu(sock_net(skb->sk), 327 skb->sk->sk_bound_dev_if); 328 *err = var_dev(dev, dst); 329 rcu_read_unlock(); 330 } 331 } 332 333 META_COLLECTOR(int_sk_refcnt) 334 { 335 if (skip_nonlocal(skb)) { 336 *err = -1; 337 return; 338 } 339 dst->value = refcount_read(&skb->sk->sk_refcnt); 340 } 341 342 META_COLLECTOR(int_sk_rcvbuf) 343 { 344 const struct sock *sk = skb_to_full_sk(skb); 345 346 if (!sk) { 347 *err = -1; 348 return; 349 } 350 dst->value = sk->sk_rcvbuf; 351 } 352 353 META_COLLECTOR(int_sk_shutdown) 354 { 355 const struct sock *sk = skb_to_full_sk(skb); 356 357 if (!sk) { 358 *err = -1; 359 return; 360 } 361 dst->value = sk->sk_shutdown; 362 } 363 364 META_COLLECTOR(int_sk_proto) 365 { 366 const struct sock *sk = skb_to_full_sk(skb); 367 368 if (!sk) { 369 *err = -1; 370 return; 371 } 372 dst->value = sk->sk_protocol; 373 } 374 375 META_COLLECTOR(int_sk_type) 376 { 377 const struct sock *sk = skb_to_full_sk(skb); 378 379 if (!sk) { 380 *err = -1; 381 return; 382 } 383 dst->value = sk->sk_type; 384 } 385 386 META_COLLECTOR(int_sk_rmem_alloc) 387 { 388 const struct sock *sk = skb_to_full_sk(skb); 389 390 if (!sk) { 391 *err = -1; 392 return; 393 } 394 dst->value = sk_rmem_alloc_get(sk); 395 } 396 397 META_COLLECTOR(int_sk_wmem_alloc) 398 { 399 const struct sock *sk = skb_to_full_sk(skb); 400 401 if (!sk) { 402 *err = -1; 403 return; 404 } 405 dst->value = sk_wmem_alloc_get(sk); 406 } 407 408 META_COLLECTOR(int_sk_omem_alloc) 409 { 410 const struct sock *sk = skb_to_full_sk(skb); 411 412 if (!sk) { 413 *err = -1; 414 return; 415 } 416 dst->value = atomic_read(&sk->sk_omem_alloc); 417 } 418 419 META_COLLECTOR(int_sk_rcv_qlen) 420 { 421 const struct sock *sk = skb_to_full_sk(skb); 422 423 if (!sk) { 424 *err = -1; 425 return; 426 } 427 dst->value = sk->sk_receive_queue.qlen; 428 } 429 430 META_COLLECTOR(int_sk_snd_qlen) 431 { 432 const struct sock *sk = skb_to_full_sk(skb); 433 434 if (!sk) { 435 *err = -1; 436 return; 437 } 438 dst->value = sk->sk_write_queue.qlen; 439 } 440 441 META_COLLECTOR(int_sk_wmem_queued) 442 { 443 const struct sock *sk = skb_to_full_sk(skb); 444 445 if (!sk) { 446 *err = -1; 447 return; 448 } 449 dst->value = READ_ONCE(sk->sk_wmem_queued); 450 } 451 452 META_COLLECTOR(int_sk_fwd_alloc) 453 { 454 const struct sock *sk = skb_to_full_sk(skb); 455 456 if (!sk) { 457 *err = -1; 458 return; 459 } 460 dst->value = sk->sk_forward_alloc; 461 } 462 463 META_COLLECTOR(int_sk_sndbuf) 464 { 465 const struct sock *sk = skb_to_full_sk(skb); 466 467 if (!sk) { 468 *err = -1; 469 return; 470 } 471 dst->value = sk->sk_sndbuf; 472 } 473 474 META_COLLECTOR(int_sk_alloc) 475 { 476 const struct sock *sk = skb_to_full_sk(skb); 477 478 if (!sk) { 479 *err = -1; 480 return; 481 } 482 dst->value = (__force int) sk->sk_allocation; 483 } 484 485 META_COLLECTOR(int_sk_hash) 486 { 487 if (skip_nonlocal(skb)) { 488 *err = -1; 489 return; 490 } 491 dst->value = skb->sk->sk_hash; 492 } 493 494 META_COLLECTOR(int_sk_lingertime) 495 { 496 const struct sock *sk = skb_to_full_sk(skb); 497 498 if (!sk) { 499 *err = -1; 500 return; 501 } 502 dst->value = sk->sk_lingertime / HZ; 503 } 504 505 META_COLLECTOR(int_sk_err_qlen) 506 { 507 const struct sock *sk = skb_to_full_sk(skb); 508 509 if (!sk) { 510 *err = -1; 511 return; 512 } 513 dst->value = sk->sk_error_queue.qlen; 514 } 515 516 META_COLLECTOR(int_sk_ack_bl) 517 { 518 const struct sock *sk = skb_to_full_sk(skb); 519 520 if (!sk) { 521 *err = -1; 522 return; 523 } 524 dst->value = READ_ONCE(sk->sk_ack_backlog); 525 } 526 527 META_COLLECTOR(int_sk_max_ack_bl) 528 { 529 const struct sock *sk = skb_to_full_sk(skb); 530 531 if (!sk) { 532 *err = -1; 533 return; 534 } 535 dst->value = READ_ONCE(sk->sk_max_ack_backlog); 536 } 537 538 META_COLLECTOR(int_sk_prio) 539 { 540 const struct sock *sk = skb_to_full_sk(skb); 541 542 if (!sk) { 543 *err = -1; 544 return; 545 } 546 dst->value = sk->sk_priority; 547 } 548 549 META_COLLECTOR(int_sk_rcvlowat) 550 { 551 const struct sock *sk = skb_to_full_sk(skb); 552 553 if (!sk) { 554 *err = -1; 555 return; 556 } 557 dst->value = READ_ONCE(sk->sk_rcvlowat); 558 } 559 560 META_COLLECTOR(int_sk_rcvtimeo) 561 { 562 const struct sock *sk = skb_to_full_sk(skb); 563 564 if (!sk) { 565 *err = -1; 566 return; 567 } 568 dst->value = sk->sk_rcvtimeo / HZ; 569 } 570 571 META_COLLECTOR(int_sk_sndtimeo) 572 { 573 const struct sock *sk = skb_to_full_sk(skb); 574 575 if (!sk) { 576 *err = -1; 577 return; 578 } 579 dst->value = sk->sk_sndtimeo / HZ; 580 } 581 582 META_COLLECTOR(int_sk_sendmsg_off) 583 { 584 const struct sock *sk = skb_to_full_sk(skb); 585 586 if (!sk) { 587 *err = -1; 588 return; 589 } 590 dst->value = sk->sk_frag.offset; 591 } 592 593 META_COLLECTOR(int_sk_write_pend) 594 { 595 const struct sock *sk = skb_to_full_sk(skb); 596 597 if (!sk) { 598 *err = -1; 599 return; 600 } 601 dst->value = sk->sk_write_pending; 602 } 603 604 /************************************************************************** 605 * Meta value collectors assignment table 606 **************************************************************************/ 607 608 struct meta_ops { 609 void (*get)(struct sk_buff *, struct tcf_pkt_info *, 610 struct meta_value *, struct meta_obj *, int *); 611 }; 612 613 #define META_ID(name) TCF_META_ID_##name 614 #define META_FUNC(name) { .get = meta_##name } 615 616 /* Meta value operations table listing all meta value collectors and 617 * assigns them to a type and meta id. */ 618 static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = { 619 [TCF_META_TYPE_VAR] = { 620 [META_ID(DEV)] = META_FUNC(var_dev), 621 [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if), 622 }, 623 [TCF_META_TYPE_INT] = { 624 [META_ID(RANDOM)] = META_FUNC(int_random), 625 [META_ID(LOADAVG_0)] = META_FUNC(int_loadavg_0), 626 [META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1), 627 [META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2), 628 [META_ID(DEV)] = META_FUNC(int_dev), 629 [META_ID(PRIORITY)] = META_FUNC(int_priority), 630 [META_ID(PROTOCOL)] = META_FUNC(int_protocol), 631 [META_ID(PKTTYPE)] = META_FUNC(int_pkttype), 632 [META_ID(PKTLEN)] = META_FUNC(int_pktlen), 633 [META_ID(DATALEN)] = META_FUNC(int_datalen), 634 [META_ID(MACLEN)] = META_FUNC(int_maclen), 635 [META_ID(NFMARK)] = META_FUNC(int_mark), 636 [META_ID(TCINDEX)] = META_FUNC(int_tcindex), 637 [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid), 638 [META_ID(RTIIF)] = META_FUNC(int_rtiif), 639 [META_ID(SK_FAMILY)] = META_FUNC(int_sk_family), 640 [META_ID(SK_STATE)] = META_FUNC(int_sk_state), 641 [META_ID(SK_REUSE)] = META_FUNC(int_sk_reuse), 642 [META_ID(SK_BOUND_IF)] = META_FUNC(int_sk_bound_if), 643 [META_ID(SK_REFCNT)] = META_FUNC(int_sk_refcnt), 644 [META_ID(SK_RCVBUF)] = META_FUNC(int_sk_rcvbuf), 645 [META_ID(SK_SNDBUF)] = META_FUNC(int_sk_sndbuf), 646 [META_ID(SK_SHUTDOWN)] = META_FUNC(int_sk_shutdown), 647 [META_ID(SK_PROTO)] = META_FUNC(int_sk_proto), 648 [META_ID(SK_TYPE)] = META_FUNC(int_sk_type), 649 [META_ID(SK_RMEM_ALLOC)] = META_FUNC(int_sk_rmem_alloc), 650 [META_ID(SK_WMEM_ALLOC)] = META_FUNC(int_sk_wmem_alloc), 651 [META_ID(SK_OMEM_ALLOC)] = META_FUNC(int_sk_omem_alloc), 652 [META_ID(SK_WMEM_QUEUED)] = META_FUNC(int_sk_wmem_queued), 653 [META_ID(SK_RCV_QLEN)] = META_FUNC(int_sk_rcv_qlen), 654 [META_ID(SK_SND_QLEN)] = META_FUNC(int_sk_snd_qlen), 655 [META_ID(SK_ERR_QLEN)] = META_FUNC(int_sk_err_qlen), 656 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc), 657 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc), 658 [META_ID(SK_HASH)] = META_FUNC(int_sk_hash), 659 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime), 660 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl), 661 [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl), 662 [META_ID(SK_PRIO)] = META_FUNC(int_sk_prio), 663 [META_ID(SK_RCVLOWAT)] = META_FUNC(int_sk_rcvlowat), 664 [META_ID(SK_RCVTIMEO)] = META_FUNC(int_sk_rcvtimeo), 665 [META_ID(SK_SNDTIMEO)] = META_FUNC(int_sk_sndtimeo), 666 [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off), 667 [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend), 668 [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag), 669 [META_ID(RXHASH)] = META_FUNC(int_rxhash), 670 } 671 }; 672 673 static inline struct meta_ops *meta_ops(struct meta_value *val) 674 { 675 return &__meta_ops[meta_type(val)][meta_id(val)]; 676 } 677 678 /************************************************************************** 679 * Type specific operations for TCF_META_TYPE_VAR 680 **************************************************************************/ 681 682 static int meta_var_compare(struct meta_obj *a, struct meta_obj *b) 683 { 684 int r = a->len - b->len; 685 686 if (r == 0) 687 r = memcmp((void *) a->value, (void *) b->value, a->len); 688 689 return r; 690 } 691 692 static int meta_var_change(struct meta_value *dst, struct nlattr *nla) 693 { 694 int len = nla_len(nla); 695 696 dst->val = (unsigned long)kmemdup(nla_data(nla), len, GFP_KERNEL); 697 if (dst->val == 0UL) 698 return -ENOMEM; 699 dst->len = len; 700 return 0; 701 } 702 703 static void meta_var_destroy(struct meta_value *v) 704 { 705 kfree((void *) v->val); 706 } 707 708 static void meta_var_apply_extras(struct meta_value *v, 709 struct meta_obj *dst) 710 { 711 int shift = v->hdr.shift; 712 713 if (shift && shift < dst->len) 714 dst->len -= shift; 715 } 716 717 static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv) 718 { 719 if (v->val && v->len && 720 nla_put(skb, tlv, v->len, (void *) v->val)) 721 goto nla_put_failure; 722 return 0; 723 724 nla_put_failure: 725 return -1; 726 } 727 728 /************************************************************************** 729 * Type specific operations for TCF_META_TYPE_INT 730 **************************************************************************/ 731 732 static int meta_int_compare(struct meta_obj *a, struct meta_obj *b) 733 { 734 /* Let gcc optimize it, the unlikely is not really based on 735 * some numbers but jump free code for mismatches seems 736 * more logical. */ 737 if (unlikely(a->value == b->value)) 738 return 0; 739 else if (a->value < b->value) 740 return -1; 741 else 742 return 1; 743 } 744 745 static int meta_int_change(struct meta_value *dst, struct nlattr *nla) 746 { 747 if (nla_len(nla) >= sizeof(unsigned long)) { 748 dst->val = *(unsigned long *) nla_data(nla); 749 dst->len = sizeof(unsigned long); 750 } else if (nla_len(nla) == sizeof(u32)) { 751 dst->val = nla_get_u32(nla); 752 dst->len = sizeof(u32); 753 } else 754 return -EINVAL; 755 756 return 0; 757 } 758 759 static void meta_int_apply_extras(struct meta_value *v, 760 struct meta_obj *dst) 761 { 762 if (v->hdr.shift) 763 dst->value >>= v->hdr.shift; 764 765 if (v->val) 766 dst->value &= v->val; 767 } 768 769 static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv) 770 { 771 if (v->len == sizeof(unsigned long)) { 772 if (nla_put(skb, tlv, sizeof(unsigned long), &v->val)) 773 goto nla_put_failure; 774 } else if (v->len == sizeof(u32)) { 775 if (nla_put_u32(skb, tlv, v->val)) 776 goto nla_put_failure; 777 } 778 779 return 0; 780 781 nla_put_failure: 782 return -1; 783 } 784 785 /************************************************************************** 786 * Type specific operations table 787 **************************************************************************/ 788 789 struct meta_type_ops { 790 void (*destroy)(struct meta_value *); 791 int (*compare)(struct meta_obj *, struct meta_obj *); 792 int (*change)(struct meta_value *, struct nlattr *); 793 void (*apply_extras)(struct meta_value *, struct meta_obj *); 794 int (*dump)(struct sk_buff *, struct meta_value *, int); 795 }; 796 797 static const struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = { 798 [TCF_META_TYPE_VAR] = { 799 .destroy = meta_var_destroy, 800 .compare = meta_var_compare, 801 .change = meta_var_change, 802 .apply_extras = meta_var_apply_extras, 803 .dump = meta_var_dump 804 }, 805 [TCF_META_TYPE_INT] = { 806 .compare = meta_int_compare, 807 .change = meta_int_change, 808 .apply_extras = meta_int_apply_extras, 809 .dump = meta_int_dump 810 } 811 }; 812 813 static inline const struct meta_type_ops *meta_type_ops(struct meta_value *v) 814 { 815 return &__meta_type_ops[meta_type(v)]; 816 } 817 818 /************************************************************************** 819 * Core 820 **************************************************************************/ 821 822 static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info, 823 struct meta_value *v, struct meta_obj *dst) 824 { 825 int err = 0; 826 827 if (meta_id(v) == TCF_META_ID_VALUE) { 828 dst->value = v->val; 829 dst->len = v->len; 830 return 0; 831 } 832 833 meta_ops(v)->get(skb, info, v, dst, &err); 834 if (err < 0) 835 return err; 836 837 if (meta_type_ops(v)->apply_extras) 838 meta_type_ops(v)->apply_extras(v, dst); 839 840 return 0; 841 } 842 843 static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m, 844 struct tcf_pkt_info *info) 845 { 846 int r; 847 struct meta_match *meta = (struct meta_match *) m->data; 848 struct meta_obj l_value, r_value; 849 850 if (meta_get(skb, info, &meta->lvalue, &l_value) < 0 || 851 meta_get(skb, info, &meta->rvalue, &r_value) < 0) 852 return 0; 853 854 r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value); 855 856 switch (meta->lvalue.hdr.op) { 857 case TCF_EM_OPND_EQ: 858 return !r; 859 case TCF_EM_OPND_LT: 860 return r < 0; 861 case TCF_EM_OPND_GT: 862 return r > 0; 863 } 864 865 return 0; 866 } 867 868 static void meta_delete(struct meta_match *meta) 869 { 870 if (meta) { 871 const struct meta_type_ops *ops = meta_type_ops(&meta->lvalue); 872 873 if (ops && ops->destroy) { 874 ops->destroy(&meta->lvalue); 875 ops->destroy(&meta->rvalue); 876 } 877 } 878 879 kfree(meta); 880 } 881 882 static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla) 883 { 884 if (nla) { 885 if (nla_len(nla) == 0) 886 return -EINVAL; 887 888 return meta_type_ops(dst)->change(dst, nla); 889 } 890 891 return 0; 892 } 893 894 static inline int meta_is_supported(struct meta_value *val) 895 { 896 return !meta_id(val) || meta_ops(val)->get; 897 } 898 899 static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = { 900 [TCA_EM_META_HDR] = { .len = sizeof(struct tcf_meta_hdr) }, 901 }; 902 903 static int em_meta_change(struct net *net, void *data, int len, 904 struct tcf_ematch *m) 905 { 906 int err; 907 struct nlattr *tb[TCA_EM_META_MAX + 1]; 908 struct tcf_meta_hdr *hdr; 909 struct meta_match *meta = NULL; 910 911 err = nla_parse_deprecated(tb, TCA_EM_META_MAX, data, len, 912 meta_policy, NULL); 913 if (err < 0) 914 goto errout; 915 916 err = -EINVAL; 917 if (tb[TCA_EM_META_HDR] == NULL) 918 goto errout; 919 hdr = nla_data(tb[TCA_EM_META_HDR]); 920 921 if (TCF_META_TYPE(hdr->left.kind) != TCF_META_TYPE(hdr->right.kind) || 922 TCF_META_TYPE(hdr->left.kind) > TCF_META_TYPE_MAX || 923 TCF_META_ID(hdr->left.kind) > TCF_META_ID_MAX || 924 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX) 925 goto errout; 926 927 meta = kzalloc(sizeof(*meta), GFP_KERNEL); 928 if (meta == NULL) { 929 err = -ENOMEM; 930 goto errout; 931 } 932 933 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left)); 934 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right)); 935 936 if (!meta_is_supported(&meta->lvalue) || 937 !meta_is_supported(&meta->rvalue)) { 938 err = -EOPNOTSUPP; 939 goto errout; 940 } 941 942 if (meta_change_data(&meta->lvalue, tb[TCA_EM_META_LVALUE]) < 0 || 943 meta_change_data(&meta->rvalue, tb[TCA_EM_META_RVALUE]) < 0) 944 goto errout; 945 946 m->datalen = sizeof(*meta); 947 m->data = (unsigned long) meta; 948 949 err = 0; 950 errout: 951 if (err && meta) 952 meta_delete(meta); 953 return err; 954 } 955 956 static void em_meta_destroy(struct tcf_ematch *m) 957 { 958 if (m) 959 meta_delete((struct meta_match *) m->data); 960 } 961 962 static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em) 963 { 964 struct meta_match *meta = (struct meta_match *) em->data; 965 struct tcf_meta_hdr hdr; 966 const struct meta_type_ops *ops; 967 968 memset(&hdr, 0, sizeof(hdr)); 969 memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left)); 970 memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right)); 971 972 if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr)) 973 goto nla_put_failure; 974 975 ops = meta_type_ops(&meta->lvalue); 976 if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 || 977 ops->dump(skb, &meta->rvalue, TCA_EM_META_RVALUE) < 0) 978 goto nla_put_failure; 979 980 return 0; 981 982 nla_put_failure: 983 return -1; 984 } 985 986 static struct tcf_ematch_ops em_meta_ops = { 987 .kind = TCF_EM_META, 988 .change = em_meta_change, 989 .match = em_meta_match, 990 .destroy = em_meta_destroy, 991 .dump = em_meta_dump, 992 .owner = THIS_MODULE, 993 .link = LIST_HEAD_INIT(em_meta_ops.link) 994 }; 995 996 static int __init init_em_meta(void) 997 { 998 return tcf_em_register(&em_meta_ops); 999 } 1000 1001 static void __exit exit_em_meta(void) 1002 { 1003 tcf_em_unregister(&em_meta_ops); 1004 } 1005 1006 MODULE_LICENSE("GPL"); 1007 1008 module_init(init_em_meta); 1009 module_exit(exit_em_meta); 1010 1011 MODULE_ALIAS_TCF_EMATCH(TCF_EM_META); 1012