1 #include <linux/kernel.h> 2 #include <linux/skbuff.h> 3 #include <linux/export.h> 4 #include <linux/ip.h> 5 #include <linux/ipv6.h> 6 #include <linux/if_vlan.h> 7 #include <net/dsa.h> 8 #include <net/ip.h> 9 #include <net/ipv6.h> 10 #include <net/gre.h> 11 #include <net/pptp.h> 12 #include <linux/igmp.h> 13 #include <linux/icmp.h> 14 #include <linux/sctp.h> 15 #include <linux/dccp.h> 16 #include <linux/if_tunnel.h> 17 #include <linux/if_pppox.h> 18 #include <linux/ppp_defs.h> 19 #include <linux/stddef.h> 20 #include <linux/if_ether.h> 21 #include <linux/mpls.h> 22 #include <linux/tcp.h> 23 #include <net/flow_dissector.h> 24 #include <scsi/fc/fc_fcoe.h> 25 26 static void dissector_set_key(struct flow_dissector *flow_dissector, 27 enum flow_dissector_key_id key_id) 28 { 29 flow_dissector->used_keys |= (1 << key_id); 30 } 31 32 void skb_flow_dissector_init(struct flow_dissector *flow_dissector, 33 const struct flow_dissector_key *key, 34 unsigned int key_count) 35 { 36 unsigned int i; 37 38 memset(flow_dissector, 0, sizeof(*flow_dissector)); 39 40 for (i = 0; i < key_count; i++, key++) { 41 /* User should make sure that every key target offset is withing 42 * boundaries of unsigned short. 43 */ 44 BUG_ON(key->offset > USHRT_MAX); 45 BUG_ON(dissector_uses_key(flow_dissector, 46 key->key_id)); 47 48 dissector_set_key(flow_dissector, key->key_id); 49 flow_dissector->offset[key->key_id] = key->offset; 50 } 51 52 /* Ensure that the dissector always includes control and basic key. 53 * That way we are able to avoid handling lack of these in fast path. 54 */ 55 BUG_ON(!dissector_uses_key(flow_dissector, 56 FLOW_DISSECTOR_KEY_CONTROL)); 57 BUG_ON(!dissector_uses_key(flow_dissector, 58 FLOW_DISSECTOR_KEY_BASIC)); 59 } 60 EXPORT_SYMBOL(skb_flow_dissector_init); 61 62 /** 63 * skb_flow_get_be16 - extract be16 entity 64 * @skb: sk_buff to extract from 65 * @poff: offset to extract at 66 * @data: raw buffer pointer to the packet 67 * @hlen: packet header length 68 * 69 * The function will try to retrieve a be32 entity at 70 * offset poff 71 */ 72 static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff, 73 void *data, int hlen) 74 { 75 __be16 *u, _u; 76 77 u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u); 78 if (u) 79 return *u; 80 81 return 0; 82 } 83 84 /** 85 * __skb_flow_get_ports - extract the upper layer ports and return them 86 * @skb: sk_buff to extract the ports from 87 * @thoff: transport header offset 88 * @ip_proto: protocol for which to get port offset 89 * @data: raw buffer pointer to the packet, if NULL use skb->data 90 * @hlen: packet header length, if @data is NULL use skb_headlen(skb) 91 * 92 * The function will try to retrieve the ports at offset thoff + poff where poff 93 * is the protocol port offset returned from proto_ports_offset 94 */ 95 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, 96 void *data, int hlen) 97 { 98 int poff = proto_ports_offset(ip_proto); 99 100 if (!data) { 101 data = skb->data; 102 hlen = skb_headlen(skb); 103 } 104 105 if (poff >= 0) { 106 __be32 *ports, _ports; 107 108 ports = __skb_header_pointer(skb, thoff + poff, 109 sizeof(_ports), data, hlen, &_ports); 110 if (ports) 111 return *ports; 112 } 113 114 return 0; 115 } 116 EXPORT_SYMBOL(__skb_flow_get_ports); 117 118 static enum flow_dissect_ret 119 __skb_flow_dissect_mpls(const struct sk_buff *skb, 120 struct flow_dissector *flow_dissector, 121 void *target_container, void *data, int nhoff, int hlen) 122 { 123 struct flow_dissector_key_keyid *key_keyid; 124 struct mpls_label *hdr, _hdr[2]; 125 u32 entry, label; 126 127 if (!dissector_uses_key(flow_dissector, 128 FLOW_DISSECTOR_KEY_MPLS_ENTROPY) && 129 !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) 130 return FLOW_DISSECT_RET_OUT_GOOD; 131 132 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, 133 hlen, &_hdr); 134 if (!hdr) 135 return FLOW_DISSECT_RET_OUT_BAD; 136 137 entry = ntohl(hdr[0].entry); 138 label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT; 139 140 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) { 141 struct flow_dissector_key_mpls *key_mpls; 142 143 key_mpls = skb_flow_dissector_target(flow_dissector, 144 FLOW_DISSECTOR_KEY_MPLS, 145 target_container); 146 key_mpls->mpls_label = label; 147 key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK) 148 >> MPLS_LS_TTL_SHIFT; 149 key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK) 150 >> MPLS_LS_TC_SHIFT; 151 key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK) 152 >> MPLS_LS_S_SHIFT; 153 } 154 155 if (label == MPLS_LABEL_ENTROPY) { 156 key_keyid = skb_flow_dissector_target(flow_dissector, 157 FLOW_DISSECTOR_KEY_MPLS_ENTROPY, 158 target_container); 159 key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK); 160 } 161 return FLOW_DISSECT_RET_OUT_GOOD; 162 } 163 164 static enum flow_dissect_ret 165 __skb_flow_dissect_arp(const struct sk_buff *skb, 166 struct flow_dissector *flow_dissector, 167 void *target_container, void *data, int nhoff, int hlen) 168 { 169 struct flow_dissector_key_arp *key_arp; 170 struct { 171 unsigned char ar_sha[ETH_ALEN]; 172 unsigned char ar_sip[4]; 173 unsigned char ar_tha[ETH_ALEN]; 174 unsigned char ar_tip[4]; 175 } *arp_eth, _arp_eth; 176 const struct arphdr *arp; 177 struct arphdr _arp; 178 179 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP)) 180 return FLOW_DISSECT_RET_OUT_GOOD; 181 182 arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data, 183 hlen, &_arp); 184 if (!arp) 185 return FLOW_DISSECT_RET_OUT_BAD; 186 187 if (arp->ar_hrd != htons(ARPHRD_ETHER) || 188 arp->ar_pro != htons(ETH_P_IP) || 189 arp->ar_hln != ETH_ALEN || 190 arp->ar_pln != 4 || 191 (arp->ar_op != htons(ARPOP_REPLY) && 192 arp->ar_op != htons(ARPOP_REQUEST))) 193 return FLOW_DISSECT_RET_OUT_BAD; 194 195 arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp), 196 sizeof(_arp_eth), data, 197 hlen, &_arp_eth); 198 if (!arp_eth) 199 return FLOW_DISSECT_RET_OUT_BAD; 200 201 key_arp = skb_flow_dissector_target(flow_dissector, 202 FLOW_DISSECTOR_KEY_ARP, 203 target_container); 204 205 memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip)); 206 memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip)); 207 208 /* Only store the lower byte of the opcode; 209 * this covers ARPOP_REPLY and ARPOP_REQUEST. 210 */ 211 key_arp->op = ntohs(arp->ar_op) & 0xff; 212 213 ether_addr_copy(key_arp->sha, arp_eth->ar_sha); 214 ether_addr_copy(key_arp->tha, arp_eth->ar_tha); 215 216 return FLOW_DISSECT_RET_OUT_GOOD; 217 } 218 219 static enum flow_dissect_ret 220 __skb_flow_dissect_gre(const struct sk_buff *skb, 221 struct flow_dissector_key_control *key_control, 222 struct flow_dissector *flow_dissector, 223 void *target_container, void *data, 224 __be16 *p_proto, int *p_nhoff, int *p_hlen, 225 unsigned int flags) 226 { 227 struct flow_dissector_key_keyid *key_keyid; 228 struct gre_base_hdr *hdr, _hdr; 229 int offset = 0; 230 u16 gre_ver; 231 232 hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), 233 data, *p_hlen, &_hdr); 234 if (!hdr) 235 return FLOW_DISSECT_RET_OUT_BAD; 236 237 /* Only look inside GRE without routing */ 238 if (hdr->flags & GRE_ROUTING) 239 return FLOW_DISSECT_RET_OUT_GOOD; 240 241 /* Only look inside GRE for version 0 and 1 */ 242 gre_ver = ntohs(hdr->flags & GRE_VERSION); 243 if (gre_ver > 1) 244 return FLOW_DISSECT_RET_OUT_GOOD; 245 246 *p_proto = hdr->protocol; 247 if (gre_ver) { 248 /* Version1 must be PPTP, and check the flags */ 249 if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY))) 250 return FLOW_DISSECT_RET_OUT_GOOD; 251 } 252 253 offset += sizeof(struct gre_base_hdr); 254 255 if (hdr->flags & GRE_CSUM) 256 offset += sizeof(((struct gre_full_hdr *) 0)->csum) + 257 sizeof(((struct gre_full_hdr *) 0)->reserved1); 258 259 if (hdr->flags & GRE_KEY) { 260 const __be32 *keyid; 261 __be32 _keyid; 262 263 keyid = __skb_header_pointer(skb, *p_nhoff + offset, 264 sizeof(_keyid), 265 data, *p_hlen, &_keyid); 266 if (!keyid) 267 return FLOW_DISSECT_RET_OUT_BAD; 268 269 if (dissector_uses_key(flow_dissector, 270 FLOW_DISSECTOR_KEY_GRE_KEYID)) { 271 key_keyid = skb_flow_dissector_target(flow_dissector, 272 FLOW_DISSECTOR_KEY_GRE_KEYID, 273 target_container); 274 if (gre_ver == 0) 275 key_keyid->keyid = *keyid; 276 else 277 key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK; 278 } 279 offset += sizeof(((struct gre_full_hdr *) 0)->key); 280 } 281 282 if (hdr->flags & GRE_SEQ) 283 offset += sizeof(((struct pptp_gre_header *) 0)->seq); 284 285 if (gre_ver == 0) { 286 if (*p_proto == htons(ETH_P_TEB)) { 287 const struct ethhdr *eth; 288 struct ethhdr _eth; 289 290 eth = __skb_header_pointer(skb, *p_nhoff + offset, 291 sizeof(_eth), 292 data, *p_hlen, &_eth); 293 if (!eth) 294 return FLOW_DISSECT_RET_OUT_BAD; 295 *p_proto = eth->h_proto; 296 offset += sizeof(*eth); 297 298 /* Cap headers that we access via pointers at the 299 * end of the Ethernet header as our maximum alignment 300 * at that point is only 2 bytes. 301 */ 302 if (NET_IP_ALIGN) 303 *p_hlen = *p_nhoff + offset; 304 } 305 } else { /* version 1, must be PPTP */ 306 u8 _ppp_hdr[PPP_HDRLEN]; 307 u8 *ppp_hdr; 308 309 if (hdr->flags & GRE_ACK) 310 offset += sizeof(((struct pptp_gre_header *) 0)->ack); 311 312 ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset, 313 sizeof(_ppp_hdr), 314 data, *p_hlen, _ppp_hdr); 315 if (!ppp_hdr) 316 return FLOW_DISSECT_RET_OUT_BAD; 317 318 switch (PPP_PROTOCOL(ppp_hdr)) { 319 case PPP_IP: 320 *p_proto = htons(ETH_P_IP); 321 break; 322 case PPP_IPV6: 323 *p_proto = htons(ETH_P_IPV6); 324 break; 325 default: 326 /* Could probably catch some more like MPLS */ 327 break; 328 } 329 330 offset += PPP_HDRLEN; 331 } 332 333 *p_nhoff += offset; 334 key_control->flags |= FLOW_DIS_ENCAPSULATION; 335 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) 336 return FLOW_DISSECT_RET_OUT_GOOD; 337 338 return FLOW_DISSECT_RET_PROTO_AGAIN; 339 } 340 341 static void 342 __skb_flow_dissect_tcp(const struct sk_buff *skb, 343 struct flow_dissector *flow_dissector, 344 void *target_container, void *data, int thoff, int hlen) 345 { 346 struct flow_dissector_key_tcp *key_tcp; 347 struct tcphdr *th, _th; 348 349 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP)) 350 return; 351 352 th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th); 353 if (!th) 354 return; 355 356 if (unlikely(__tcp_hdrlen(th) < sizeof(_th))) 357 return; 358 359 key_tcp = skb_flow_dissector_target(flow_dissector, 360 FLOW_DISSECTOR_KEY_TCP, 361 target_container); 362 key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF)); 363 } 364 365 static void 366 __skb_flow_dissect_ipv4(const struct sk_buff *skb, 367 struct flow_dissector *flow_dissector, 368 void *target_container, void *data, const struct iphdr *iph) 369 { 370 struct flow_dissector_key_ip *key_ip; 371 372 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP)) 373 return; 374 375 key_ip = skb_flow_dissector_target(flow_dissector, 376 FLOW_DISSECTOR_KEY_IP, 377 target_container); 378 key_ip->tos = iph->tos; 379 key_ip->ttl = iph->ttl; 380 } 381 382 static void 383 __skb_flow_dissect_ipv6(const struct sk_buff *skb, 384 struct flow_dissector *flow_dissector, 385 void *target_container, void *data, const struct ipv6hdr *iph) 386 { 387 struct flow_dissector_key_ip *key_ip; 388 389 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP)) 390 return; 391 392 key_ip = skb_flow_dissector_target(flow_dissector, 393 FLOW_DISSECTOR_KEY_IP, 394 target_container); 395 key_ip->tos = ipv6_get_dsfield(iph); 396 key_ip->ttl = iph->hop_limit; 397 } 398 399 /* Maximum number of protocol headers that can be parsed in 400 * __skb_flow_dissect 401 */ 402 #define MAX_FLOW_DISSECT_HDRS 15 403 404 static bool skb_flow_dissect_allowed(int *num_hdrs) 405 { 406 ++*num_hdrs; 407 408 return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS); 409 } 410 411 /** 412 * __skb_flow_dissect - extract the flow_keys struct and return it 413 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified 414 * @flow_dissector: list of keys to dissect 415 * @target_container: target structure to put dissected values into 416 * @data: raw buffer pointer to the packet, if NULL use skb->data 417 * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol 418 * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb) 419 * @hlen: packet header length, if @data is NULL use skb_headlen(skb) 420 * 421 * The function will try to retrieve individual keys into target specified 422 * by flow_dissector from either the skbuff or a raw buffer specified by the 423 * rest parameters. 424 * 425 * Caller must take care of zeroing target container memory. 426 */ 427 bool __skb_flow_dissect(const struct sk_buff *skb, 428 struct flow_dissector *flow_dissector, 429 void *target_container, 430 void *data, __be16 proto, int nhoff, int hlen, 431 unsigned int flags) 432 { 433 struct flow_dissector_key_control *key_control; 434 struct flow_dissector_key_basic *key_basic; 435 struct flow_dissector_key_addrs *key_addrs; 436 struct flow_dissector_key_ports *key_ports; 437 struct flow_dissector_key_icmp *key_icmp; 438 struct flow_dissector_key_tags *key_tags; 439 struct flow_dissector_key_vlan *key_vlan; 440 enum flow_dissect_ret fdret; 441 bool skip_vlan = false; 442 int num_hdrs = 0; 443 u8 ip_proto = 0; 444 bool ret; 445 446 if (!data) { 447 data = skb->data; 448 proto = skb_vlan_tag_present(skb) ? 449 skb->vlan_proto : skb->protocol; 450 nhoff = skb_network_offset(skb); 451 hlen = skb_headlen(skb); 452 #if IS_ENABLED(CONFIG_NET_DSA) 453 if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) { 454 const struct dsa_device_ops *ops; 455 int offset; 456 457 ops = skb->dev->dsa_ptr->tag_ops; 458 if (ops->flow_dissect && 459 !ops->flow_dissect(skb, &proto, &offset)) { 460 hlen -= offset; 461 nhoff += offset; 462 } 463 } 464 #endif 465 } 466 467 /* It is ensured by skb_flow_dissector_init() that control key will 468 * be always present. 469 */ 470 key_control = skb_flow_dissector_target(flow_dissector, 471 FLOW_DISSECTOR_KEY_CONTROL, 472 target_container); 473 474 /* It is ensured by skb_flow_dissector_init() that basic key will 475 * be always present. 476 */ 477 key_basic = skb_flow_dissector_target(flow_dissector, 478 FLOW_DISSECTOR_KEY_BASIC, 479 target_container); 480 481 if (dissector_uses_key(flow_dissector, 482 FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 483 struct ethhdr *eth = eth_hdr(skb); 484 struct flow_dissector_key_eth_addrs *key_eth_addrs; 485 486 key_eth_addrs = skb_flow_dissector_target(flow_dissector, 487 FLOW_DISSECTOR_KEY_ETH_ADDRS, 488 target_container); 489 memcpy(key_eth_addrs, ð->h_dest, sizeof(*key_eth_addrs)); 490 } 491 492 proto_again: 493 fdret = FLOW_DISSECT_RET_CONTINUE; 494 495 switch (proto) { 496 case htons(ETH_P_IP): { 497 const struct iphdr *iph; 498 struct iphdr _iph; 499 500 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); 501 if (!iph || iph->ihl < 5) { 502 fdret = FLOW_DISSECT_RET_OUT_BAD; 503 break; 504 } 505 506 nhoff += iph->ihl * 4; 507 508 ip_proto = iph->protocol; 509 510 if (dissector_uses_key(flow_dissector, 511 FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 512 key_addrs = skb_flow_dissector_target(flow_dissector, 513 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 514 target_container); 515 516 memcpy(&key_addrs->v4addrs, &iph->saddr, 517 sizeof(key_addrs->v4addrs)); 518 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 519 } 520 521 if (ip_is_fragment(iph)) { 522 key_control->flags |= FLOW_DIS_IS_FRAGMENT; 523 524 if (iph->frag_off & htons(IP_OFFSET)) { 525 fdret = FLOW_DISSECT_RET_OUT_GOOD; 526 break; 527 } else { 528 key_control->flags |= FLOW_DIS_FIRST_FRAG; 529 if (!(flags & 530 FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) { 531 fdret = FLOW_DISSECT_RET_OUT_GOOD; 532 break; 533 } 534 } 535 } 536 537 __skb_flow_dissect_ipv4(skb, flow_dissector, 538 target_container, data, iph); 539 540 if (flags & FLOW_DISSECTOR_F_STOP_AT_L3) { 541 fdret = FLOW_DISSECT_RET_OUT_GOOD; 542 break; 543 } 544 545 break; 546 } 547 case htons(ETH_P_IPV6): { 548 const struct ipv6hdr *iph; 549 struct ipv6hdr _iph; 550 551 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); 552 if (!iph) { 553 fdret = FLOW_DISSECT_RET_OUT_BAD; 554 break; 555 } 556 557 ip_proto = iph->nexthdr; 558 nhoff += sizeof(struct ipv6hdr); 559 560 if (dissector_uses_key(flow_dissector, 561 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 562 key_addrs = skb_flow_dissector_target(flow_dissector, 563 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 564 target_container); 565 566 memcpy(&key_addrs->v6addrs, &iph->saddr, 567 sizeof(key_addrs->v6addrs)); 568 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 569 } 570 571 if ((dissector_uses_key(flow_dissector, 572 FLOW_DISSECTOR_KEY_FLOW_LABEL) || 573 (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) && 574 ip6_flowlabel(iph)) { 575 __be32 flow_label = ip6_flowlabel(iph); 576 577 if (dissector_uses_key(flow_dissector, 578 FLOW_DISSECTOR_KEY_FLOW_LABEL)) { 579 key_tags = skb_flow_dissector_target(flow_dissector, 580 FLOW_DISSECTOR_KEY_FLOW_LABEL, 581 target_container); 582 key_tags->flow_label = ntohl(flow_label); 583 } 584 if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) { 585 fdret = FLOW_DISSECT_RET_OUT_GOOD; 586 break; 587 } 588 } 589 590 __skb_flow_dissect_ipv6(skb, flow_dissector, 591 target_container, data, iph); 592 593 if (flags & FLOW_DISSECTOR_F_STOP_AT_L3) 594 fdret = FLOW_DISSECT_RET_OUT_GOOD; 595 596 break; 597 } 598 case htons(ETH_P_8021AD): 599 case htons(ETH_P_8021Q): { 600 const struct vlan_hdr *vlan; 601 struct vlan_hdr _vlan; 602 bool vlan_tag_present = skb && skb_vlan_tag_present(skb); 603 604 if (vlan_tag_present) 605 proto = skb->protocol; 606 607 if (!vlan_tag_present || eth_type_vlan(skb->protocol)) { 608 vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), 609 data, hlen, &_vlan); 610 if (!vlan) { 611 fdret = FLOW_DISSECT_RET_OUT_BAD; 612 break; 613 } 614 615 proto = vlan->h_vlan_encapsulated_proto; 616 nhoff += sizeof(*vlan); 617 if (skip_vlan) { 618 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 619 break; 620 } 621 } 622 623 skip_vlan = true; 624 if (dissector_uses_key(flow_dissector, 625 FLOW_DISSECTOR_KEY_VLAN)) { 626 key_vlan = skb_flow_dissector_target(flow_dissector, 627 FLOW_DISSECTOR_KEY_VLAN, 628 target_container); 629 630 if (vlan_tag_present) { 631 key_vlan->vlan_id = skb_vlan_tag_get_id(skb); 632 key_vlan->vlan_priority = 633 (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT); 634 } else { 635 key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) & 636 VLAN_VID_MASK; 637 key_vlan->vlan_priority = 638 (ntohs(vlan->h_vlan_TCI) & 639 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 640 } 641 } 642 643 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 644 break; 645 } 646 case htons(ETH_P_PPP_SES): { 647 struct { 648 struct pppoe_hdr hdr; 649 __be16 proto; 650 } *hdr, _hdr; 651 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); 652 if (!hdr) { 653 fdret = FLOW_DISSECT_RET_OUT_BAD; 654 break; 655 } 656 657 proto = hdr->proto; 658 nhoff += PPPOE_SES_HLEN; 659 switch (proto) { 660 case htons(PPP_IP): 661 proto = htons(ETH_P_IP); 662 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 663 break; 664 case htons(PPP_IPV6): 665 proto = htons(ETH_P_IPV6); 666 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 667 break; 668 default: 669 fdret = FLOW_DISSECT_RET_OUT_BAD; 670 break; 671 } 672 break; 673 } 674 case htons(ETH_P_TIPC): { 675 struct { 676 __be32 pre[3]; 677 __be32 srcnode; 678 } *hdr, _hdr; 679 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); 680 if (!hdr) { 681 fdret = FLOW_DISSECT_RET_OUT_BAD; 682 break; 683 } 684 685 if (dissector_uses_key(flow_dissector, 686 FLOW_DISSECTOR_KEY_TIPC_ADDRS)) { 687 key_addrs = skb_flow_dissector_target(flow_dissector, 688 FLOW_DISSECTOR_KEY_TIPC_ADDRS, 689 target_container); 690 key_addrs->tipcaddrs.srcnode = hdr->srcnode; 691 key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS; 692 } 693 fdret = FLOW_DISSECT_RET_OUT_GOOD; 694 break; 695 } 696 697 case htons(ETH_P_MPLS_UC): 698 case htons(ETH_P_MPLS_MC): 699 fdret = __skb_flow_dissect_mpls(skb, flow_dissector, 700 target_container, data, 701 nhoff, hlen); 702 break; 703 case htons(ETH_P_FCOE): 704 if ((hlen - nhoff) < FCOE_HEADER_LEN) { 705 fdret = FLOW_DISSECT_RET_OUT_BAD; 706 break; 707 } 708 709 nhoff += FCOE_HEADER_LEN; 710 fdret = FLOW_DISSECT_RET_OUT_GOOD; 711 break; 712 713 case htons(ETH_P_ARP): 714 case htons(ETH_P_RARP): 715 fdret = __skb_flow_dissect_arp(skb, flow_dissector, 716 target_container, data, 717 nhoff, hlen); 718 break; 719 720 default: 721 fdret = FLOW_DISSECT_RET_OUT_BAD; 722 break; 723 } 724 725 /* Process result of proto processing */ 726 switch (fdret) { 727 case FLOW_DISSECT_RET_OUT_GOOD: 728 goto out_good; 729 case FLOW_DISSECT_RET_PROTO_AGAIN: 730 if (skb_flow_dissect_allowed(&num_hdrs)) 731 goto proto_again; 732 goto out_good; 733 case FLOW_DISSECT_RET_CONTINUE: 734 case FLOW_DISSECT_RET_IPPROTO_AGAIN: 735 break; 736 case FLOW_DISSECT_RET_OUT_BAD: 737 default: 738 goto out_bad; 739 } 740 741 ip_proto_again: 742 fdret = FLOW_DISSECT_RET_CONTINUE; 743 744 switch (ip_proto) { 745 case IPPROTO_GRE: 746 fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector, 747 target_container, data, 748 &proto, &nhoff, &hlen, flags); 749 break; 750 751 case NEXTHDR_HOP: 752 case NEXTHDR_ROUTING: 753 case NEXTHDR_DEST: { 754 u8 _opthdr[2], *opthdr; 755 756 if (proto != htons(ETH_P_IPV6)) 757 break; 758 759 opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr), 760 data, hlen, &_opthdr); 761 if (!opthdr) { 762 fdret = FLOW_DISSECT_RET_OUT_BAD; 763 break; 764 } 765 766 ip_proto = opthdr[0]; 767 nhoff += (opthdr[1] + 1) << 3; 768 769 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN; 770 break; 771 } 772 case NEXTHDR_FRAGMENT: { 773 struct frag_hdr _fh, *fh; 774 775 if (proto != htons(ETH_P_IPV6)) 776 break; 777 778 fh = __skb_header_pointer(skb, nhoff, sizeof(_fh), 779 data, hlen, &_fh); 780 781 if (!fh) { 782 fdret = FLOW_DISSECT_RET_OUT_BAD; 783 break; 784 } 785 786 key_control->flags |= FLOW_DIS_IS_FRAGMENT; 787 788 nhoff += sizeof(_fh); 789 ip_proto = fh->nexthdr; 790 791 if (!(fh->frag_off & htons(IP6_OFFSET))) { 792 key_control->flags |= FLOW_DIS_FIRST_FRAG; 793 if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) { 794 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN; 795 break; 796 } 797 } 798 799 fdret = FLOW_DISSECT_RET_OUT_GOOD; 800 break; 801 } 802 case IPPROTO_IPIP: 803 proto = htons(ETH_P_IP); 804 805 key_control->flags |= FLOW_DIS_ENCAPSULATION; 806 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) { 807 fdret = FLOW_DISSECT_RET_OUT_GOOD; 808 break; 809 } 810 811 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 812 break; 813 814 case IPPROTO_IPV6: 815 proto = htons(ETH_P_IPV6); 816 817 key_control->flags |= FLOW_DIS_ENCAPSULATION; 818 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) { 819 fdret = FLOW_DISSECT_RET_OUT_GOOD; 820 break; 821 } 822 823 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 824 break; 825 826 827 case IPPROTO_MPLS: 828 proto = htons(ETH_P_MPLS_UC); 829 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 830 break; 831 832 case IPPROTO_TCP: 833 __skb_flow_dissect_tcp(skb, flow_dissector, target_container, 834 data, nhoff, hlen); 835 break; 836 837 default: 838 break; 839 } 840 841 if (dissector_uses_key(flow_dissector, 842 FLOW_DISSECTOR_KEY_PORTS)) { 843 key_ports = skb_flow_dissector_target(flow_dissector, 844 FLOW_DISSECTOR_KEY_PORTS, 845 target_container); 846 key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, 847 data, hlen); 848 } 849 850 if (dissector_uses_key(flow_dissector, 851 FLOW_DISSECTOR_KEY_ICMP)) { 852 key_icmp = skb_flow_dissector_target(flow_dissector, 853 FLOW_DISSECTOR_KEY_ICMP, 854 target_container); 855 key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen); 856 } 857 858 /* Process result of IP proto processing */ 859 switch (fdret) { 860 case FLOW_DISSECT_RET_PROTO_AGAIN: 861 if (skb_flow_dissect_allowed(&num_hdrs)) 862 goto proto_again; 863 break; 864 case FLOW_DISSECT_RET_IPPROTO_AGAIN: 865 if (skb_flow_dissect_allowed(&num_hdrs)) 866 goto ip_proto_again; 867 break; 868 case FLOW_DISSECT_RET_OUT_GOOD: 869 case FLOW_DISSECT_RET_CONTINUE: 870 break; 871 case FLOW_DISSECT_RET_OUT_BAD: 872 default: 873 goto out_bad; 874 } 875 876 out_good: 877 ret = true; 878 879 key_control->thoff = (u16)nhoff; 880 out: 881 key_basic->n_proto = proto; 882 key_basic->ip_proto = ip_proto; 883 884 return ret; 885 886 out_bad: 887 ret = false; 888 key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); 889 goto out; 890 } 891 EXPORT_SYMBOL(__skb_flow_dissect); 892 893 static u32 hashrnd __read_mostly; 894 static __always_inline void __flow_hash_secret_init(void) 895 { 896 net_get_random_once(&hashrnd, sizeof(hashrnd)); 897 } 898 899 static __always_inline u32 __flow_hash_words(const u32 *words, u32 length, 900 u32 keyval) 901 { 902 return jhash2(words, length, keyval); 903 } 904 905 static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow) 906 { 907 const void *p = flow; 908 909 BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32)); 910 return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET); 911 } 912 913 static inline size_t flow_keys_hash_length(const struct flow_keys *flow) 914 { 915 size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs); 916 BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32)); 917 BUILD_BUG_ON(offsetof(typeof(*flow), addrs) != 918 sizeof(*flow) - sizeof(flow->addrs)); 919 920 switch (flow->control.addr_type) { 921 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 922 diff -= sizeof(flow->addrs.v4addrs); 923 break; 924 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 925 diff -= sizeof(flow->addrs.v6addrs); 926 break; 927 case FLOW_DISSECTOR_KEY_TIPC_ADDRS: 928 diff -= sizeof(flow->addrs.tipcaddrs); 929 break; 930 } 931 return (sizeof(*flow) - diff) / sizeof(u32); 932 } 933 934 __be32 flow_get_u32_src(const struct flow_keys *flow) 935 { 936 switch (flow->control.addr_type) { 937 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 938 return flow->addrs.v4addrs.src; 939 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 940 return (__force __be32)ipv6_addr_hash( 941 &flow->addrs.v6addrs.src); 942 case FLOW_DISSECTOR_KEY_TIPC_ADDRS: 943 return flow->addrs.tipcaddrs.srcnode; 944 default: 945 return 0; 946 } 947 } 948 EXPORT_SYMBOL(flow_get_u32_src); 949 950 __be32 flow_get_u32_dst(const struct flow_keys *flow) 951 { 952 switch (flow->control.addr_type) { 953 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 954 return flow->addrs.v4addrs.dst; 955 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 956 return (__force __be32)ipv6_addr_hash( 957 &flow->addrs.v6addrs.dst); 958 default: 959 return 0; 960 } 961 } 962 EXPORT_SYMBOL(flow_get_u32_dst); 963 964 static inline void __flow_hash_consistentify(struct flow_keys *keys) 965 { 966 int addr_diff, i; 967 968 switch (keys->control.addr_type) { 969 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 970 addr_diff = (__force u32)keys->addrs.v4addrs.dst - 971 (__force u32)keys->addrs.v4addrs.src; 972 if ((addr_diff < 0) || 973 (addr_diff == 0 && 974 ((__force u16)keys->ports.dst < 975 (__force u16)keys->ports.src))) { 976 swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst); 977 swap(keys->ports.src, keys->ports.dst); 978 } 979 break; 980 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 981 addr_diff = memcmp(&keys->addrs.v6addrs.dst, 982 &keys->addrs.v6addrs.src, 983 sizeof(keys->addrs.v6addrs.dst)); 984 if ((addr_diff < 0) || 985 (addr_diff == 0 && 986 ((__force u16)keys->ports.dst < 987 (__force u16)keys->ports.src))) { 988 for (i = 0; i < 4; i++) 989 swap(keys->addrs.v6addrs.src.s6_addr32[i], 990 keys->addrs.v6addrs.dst.s6_addr32[i]); 991 swap(keys->ports.src, keys->ports.dst); 992 } 993 break; 994 } 995 } 996 997 static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) 998 { 999 u32 hash; 1000 1001 __flow_hash_consistentify(keys); 1002 1003 hash = __flow_hash_words(flow_keys_hash_start(keys), 1004 flow_keys_hash_length(keys), keyval); 1005 if (!hash) 1006 hash = 1; 1007 1008 return hash; 1009 } 1010 1011 u32 flow_hash_from_keys(struct flow_keys *keys) 1012 { 1013 __flow_hash_secret_init(); 1014 return __flow_hash_from_keys(keys, hashrnd); 1015 } 1016 EXPORT_SYMBOL(flow_hash_from_keys); 1017 1018 static inline u32 ___skb_get_hash(const struct sk_buff *skb, 1019 struct flow_keys *keys, u32 keyval) 1020 { 1021 skb_flow_dissect_flow_keys(skb, keys, 1022 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 1023 1024 return __flow_hash_from_keys(keys, keyval); 1025 } 1026 1027 struct _flow_keys_digest_data { 1028 __be16 n_proto; 1029 u8 ip_proto; 1030 u8 padding; 1031 __be32 ports; 1032 __be32 src; 1033 __be32 dst; 1034 }; 1035 1036 void make_flow_keys_digest(struct flow_keys_digest *digest, 1037 const struct flow_keys *flow) 1038 { 1039 struct _flow_keys_digest_data *data = 1040 (struct _flow_keys_digest_data *)digest; 1041 1042 BUILD_BUG_ON(sizeof(*data) > sizeof(*digest)); 1043 1044 memset(digest, 0, sizeof(*digest)); 1045 1046 data->n_proto = flow->basic.n_proto; 1047 data->ip_proto = flow->basic.ip_proto; 1048 data->ports = flow->ports.ports; 1049 data->src = flow->addrs.v4addrs.src; 1050 data->dst = flow->addrs.v4addrs.dst; 1051 } 1052 EXPORT_SYMBOL(make_flow_keys_digest); 1053 1054 static struct flow_dissector flow_keys_dissector_symmetric __read_mostly; 1055 1056 u32 __skb_get_hash_symmetric(const struct sk_buff *skb) 1057 { 1058 struct flow_keys keys; 1059 1060 __flow_hash_secret_init(); 1061 1062 memset(&keys, 0, sizeof(keys)); 1063 __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys, 1064 NULL, 0, 0, 0, 1065 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 1066 1067 return __flow_hash_from_keys(&keys, hashrnd); 1068 } 1069 EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); 1070 1071 /** 1072 * __skb_get_hash: calculate a flow hash 1073 * @skb: sk_buff to calculate flow hash from 1074 * 1075 * This function calculates a flow hash based on src/dst addresses 1076 * and src/dst port numbers. Sets hash in skb to non-zero hash value 1077 * on success, zero indicates no valid hash. Also, sets l4_hash in skb 1078 * if hash is a canonical 4-tuple hash over transport ports. 1079 */ 1080 void __skb_get_hash(struct sk_buff *skb) 1081 { 1082 struct flow_keys keys; 1083 u32 hash; 1084 1085 __flow_hash_secret_init(); 1086 1087 hash = ___skb_get_hash(skb, &keys, hashrnd); 1088 1089 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); 1090 } 1091 EXPORT_SYMBOL(__skb_get_hash); 1092 1093 __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb) 1094 { 1095 struct flow_keys keys; 1096 1097 return ___skb_get_hash(skb, &keys, perturb); 1098 } 1099 EXPORT_SYMBOL(skb_get_hash_perturb); 1100 1101 u32 __skb_get_poff(const struct sk_buff *skb, void *data, 1102 const struct flow_keys *keys, int hlen) 1103 { 1104 u32 poff = keys->control.thoff; 1105 1106 /* skip L4 headers for fragments after the first */ 1107 if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) && 1108 !(keys->control.flags & FLOW_DIS_FIRST_FRAG)) 1109 return poff; 1110 1111 switch (keys->basic.ip_proto) { 1112 case IPPROTO_TCP: { 1113 /* access doff as u8 to avoid unaligned access */ 1114 const u8 *doff; 1115 u8 _doff; 1116 1117 doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff), 1118 data, hlen, &_doff); 1119 if (!doff) 1120 return poff; 1121 1122 poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2); 1123 break; 1124 } 1125 case IPPROTO_UDP: 1126 case IPPROTO_UDPLITE: 1127 poff += sizeof(struct udphdr); 1128 break; 1129 /* For the rest, we do not really care about header 1130 * extensions at this point for now. 1131 */ 1132 case IPPROTO_ICMP: 1133 poff += sizeof(struct icmphdr); 1134 break; 1135 case IPPROTO_ICMPV6: 1136 poff += sizeof(struct icmp6hdr); 1137 break; 1138 case IPPROTO_IGMP: 1139 poff += sizeof(struct igmphdr); 1140 break; 1141 case IPPROTO_DCCP: 1142 poff += sizeof(struct dccp_hdr); 1143 break; 1144 case IPPROTO_SCTP: 1145 poff += sizeof(struct sctphdr); 1146 break; 1147 } 1148 1149 return poff; 1150 } 1151 1152 /** 1153 * skb_get_poff - get the offset to the payload 1154 * @skb: sk_buff to get the payload offset from 1155 * 1156 * The function will get the offset to the payload as far as it could 1157 * be dissected. The main user is currently BPF, so that we can dynamically 1158 * truncate packets without needing to push actual payload to the user 1159 * space and can analyze headers only, instead. 1160 */ 1161 u32 skb_get_poff(const struct sk_buff *skb) 1162 { 1163 struct flow_keys keys; 1164 1165 if (!skb_flow_dissect_flow_keys(skb, &keys, 0)) 1166 return 0; 1167 1168 return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb)); 1169 } 1170 1171 __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys) 1172 { 1173 memset(keys, 0, sizeof(*keys)); 1174 1175 memcpy(&keys->addrs.v6addrs.src, &fl6->saddr, 1176 sizeof(keys->addrs.v6addrs.src)); 1177 memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr, 1178 sizeof(keys->addrs.v6addrs.dst)); 1179 keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1180 keys->ports.src = fl6->fl6_sport; 1181 keys->ports.dst = fl6->fl6_dport; 1182 keys->keyid.keyid = fl6->fl6_gre_key; 1183 keys->tags.flow_label = (__force u32)fl6->flowlabel; 1184 keys->basic.ip_proto = fl6->flowi6_proto; 1185 1186 return flow_hash_from_keys(keys); 1187 } 1188 EXPORT_SYMBOL(__get_hash_from_flowi6); 1189 1190 __u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys) 1191 { 1192 memset(keys, 0, sizeof(*keys)); 1193 1194 keys->addrs.v4addrs.src = fl4->saddr; 1195 keys->addrs.v4addrs.dst = fl4->daddr; 1196 keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1197 keys->ports.src = fl4->fl4_sport; 1198 keys->ports.dst = fl4->fl4_dport; 1199 keys->keyid.keyid = fl4->fl4_gre_key; 1200 keys->basic.ip_proto = fl4->flowi4_proto; 1201 1202 return flow_hash_from_keys(keys); 1203 } 1204 EXPORT_SYMBOL(__get_hash_from_flowi4); 1205 1206 static const struct flow_dissector_key flow_keys_dissector_keys[] = { 1207 { 1208 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 1209 .offset = offsetof(struct flow_keys, control), 1210 }, 1211 { 1212 .key_id = FLOW_DISSECTOR_KEY_BASIC, 1213 .offset = offsetof(struct flow_keys, basic), 1214 }, 1215 { 1216 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, 1217 .offset = offsetof(struct flow_keys, addrs.v4addrs), 1218 }, 1219 { 1220 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, 1221 .offset = offsetof(struct flow_keys, addrs.v6addrs), 1222 }, 1223 { 1224 .key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS, 1225 .offset = offsetof(struct flow_keys, addrs.tipcaddrs), 1226 }, 1227 { 1228 .key_id = FLOW_DISSECTOR_KEY_PORTS, 1229 .offset = offsetof(struct flow_keys, ports), 1230 }, 1231 { 1232 .key_id = FLOW_DISSECTOR_KEY_VLAN, 1233 .offset = offsetof(struct flow_keys, vlan), 1234 }, 1235 { 1236 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, 1237 .offset = offsetof(struct flow_keys, tags), 1238 }, 1239 { 1240 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, 1241 .offset = offsetof(struct flow_keys, keyid), 1242 }, 1243 }; 1244 1245 static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = { 1246 { 1247 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 1248 .offset = offsetof(struct flow_keys, control), 1249 }, 1250 { 1251 .key_id = FLOW_DISSECTOR_KEY_BASIC, 1252 .offset = offsetof(struct flow_keys, basic), 1253 }, 1254 { 1255 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, 1256 .offset = offsetof(struct flow_keys, addrs.v4addrs), 1257 }, 1258 { 1259 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, 1260 .offset = offsetof(struct flow_keys, addrs.v6addrs), 1261 }, 1262 { 1263 .key_id = FLOW_DISSECTOR_KEY_PORTS, 1264 .offset = offsetof(struct flow_keys, ports), 1265 }, 1266 }; 1267 1268 static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = { 1269 { 1270 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 1271 .offset = offsetof(struct flow_keys, control), 1272 }, 1273 { 1274 .key_id = FLOW_DISSECTOR_KEY_BASIC, 1275 .offset = offsetof(struct flow_keys, basic), 1276 }, 1277 }; 1278 1279 struct flow_dissector flow_keys_dissector __read_mostly; 1280 EXPORT_SYMBOL(flow_keys_dissector); 1281 1282 struct flow_dissector flow_keys_buf_dissector __read_mostly; 1283 1284 static int __init init_default_flow_dissectors(void) 1285 { 1286 skb_flow_dissector_init(&flow_keys_dissector, 1287 flow_keys_dissector_keys, 1288 ARRAY_SIZE(flow_keys_dissector_keys)); 1289 skb_flow_dissector_init(&flow_keys_dissector_symmetric, 1290 flow_keys_dissector_symmetric_keys, 1291 ARRAY_SIZE(flow_keys_dissector_symmetric_keys)); 1292 skb_flow_dissector_init(&flow_keys_buf_dissector, 1293 flow_keys_buf_dissector_keys, 1294 ARRAY_SIZE(flow_keys_buf_dissector_keys)); 1295 return 0; 1296 } 1297 1298 core_initcall(init_default_flow_dissectors); 1299