1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kernel.h> 3 #include <linux/skbuff.h> 4 #include <linux/export.h> 5 #include <linux/ip.h> 6 #include <linux/ipv6.h> 7 #include <linux/if_vlan.h> 8 #include <linux/filter.h> 9 #include <net/dsa.h> 10 #include <net/dst_metadata.h> 11 #include <net/ip.h> 12 #include <net/ipv6.h> 13 #include <net/gre.h> 14 #include <net/pptp.h> 15 #include <net/tipc.h> 16 #include <linux/igmp.h> 17 #include <linux/icmp.h> 18 #include <linux/sctp.h> 19 #include <linux/dccp.h> 20 #include <linux/if_tunnel.h> 21 #include <linux/if_pppox.h> 22 #include <linux/ppp_defs.h> 23 #include <linux/stddef.h> 24 #include <linux/if_ether.h> 25 #include <linux/if_hsr.h> 26 #include <linux/mpls.h> 27 #include <linux/tcp.h> 28 #include <linux/ptp_classify.h> 29 #include <net/flow_dissector.h> 30 #include <scsi/fc/fc_fcoe.h> 31 #include <uapi/linux/batadv_packet.h> 32 #include <linux/bpf.h> 33 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 34 #include <net/netfilter/nf_conntrack_core.h> 35 #include <net/netfilter/nf_conntrack_labels.h> 36 #endif 37 #include <linux/bpf-netns.h> 38 39 static void dissector_set_key(struct flow_dissector *flow_dissector, 40 enum flow_dissector_key_id key_id) 41 { 42 flow_dissector->used_keys |= (1 << key_id); 43 } 44 45 void skb_flow_dissector_init(struct flow_dissector *flow_dissector, 46 const struct flow_dissector_key *key, 47 unsigned int key_count) 48 { 49 unsigned int i; 50 51 memset(flow_dissector, 0, sizeof(*flow_dissector)); 52 53 for (i = 0; i < key_count; i++, key++) { 54 /* User should make sure that every key target offset is within 55 * boundaries of unsigned short. 56 */ 57 BUG_ON(key->offset > USHRT_MAX); 58 BUG_ON(dissector_uses_key(flow_dissector, 59 key->key_id)); 60 61 dissector_set_key(flow_dissector, key->key_id); 62 flow_dissector->offset[key->key_id] = key->offset; 63 } 64 65 /* Ensure that the dissector always includes control and basic key. 66 * That way we are able to avoid handling lack of these in fast path. 67 */ 68 BUG_ON(!dissector_uses_key(flow_dissector, 69 FLOW_DISSECTOR_KEY_CONTROL)); 70 BUG_ON(!dissector_uses_key(flow_dissector, 71 FLOW_DISSECTOR_KEY_BASIC)); 72 } 73 EXPORT_SYMBOL(skb_flow_dissector_init); 74 75 #ifdef CONFIG_BPF_SYSCALL 76 int flow_dissector_bpf_prog_attach_check(struct net *net, 77 struct bpf_prog *prog) 78 { 79 enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR; 80 81 if (net == &init_net) { 82 /* BPF flow dissector in the root namespace overrides 83 * any per-net-namespace one. When attaching to root, 84 * make sure we don't have any BPF program attached 85 * to the non-root namespaces. 86 */ 87 struct net *ns; 88 89 for_each_net(ns) { 90 if (ns == &init_net) 91 continue; 92 if (rcu_access_pointer(ns->bpf.run_array[type])) 93 return -EEXIST; 94 } 95 } else { 96 /* Make sure root flow dissector is not attached 97 * when attaching to the non-root namespace. 98 */ 99 if (rcu_access_pointer(init_net.bpf.run_array[type])) 100 return -EEXIST; 101 } 102 103 return 0; 104 } 105 #endif /* CONFIG_BPF_SYSCALL */ 106 107 /** 108 * __skb_flow_get_ports - extract the upper layer ports and return them 109 * @skb: sk_buff to extract the ports from 110 * @thoff: transport header offset 111 * @ip_proto: protocol for which to get port offset 112 * @data: raw buffer pointer to the packet, if NULL use skb->data 113 * @hlen: packet header length, if @data is NULL use skb_headlen(skb) 114 * 115 * The function will try to retrieve the ports at offset thoff + poff where poff 116 * is the protocol port offset returned from proto_ports_offset 117 */ 118 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, 119 const void *data, int hlen) 120 { 121 int poff = proto_ports_offset(ip_proto); 122 123 if (!data) { 124 data = skb->data; 125 hlen = skb_headlen(skb); 126 } 127 128 if (poff >= 0) { 129 __be32 *ports, _ports; 130 131 ports = __skb_header_pointer(skb, thoff + poff, 132 sizeof(_ports), data, hlen, &_ports); 133 if (ports) 134 return *ports; 135 } 136 137 return 0; 138 } 139 EXPORT_SYMBOL(__skb_flow_get_ports); 140 141 static bool icmp_has_id(u8 type) 142 { 143 switch (type) { 144 case ICMP_ECHO: 145 case ICMP_ECHOREPLY: 146 case ICMP_TIMESTAMP: 147 case ICMP_TIMESTAMPREPLY: 148 case ICMPV6_ECHO_REQUEST: 149 case ICMPV6_ECHO_REPLY: 150 return true; 151 } 152 153 return false; 154 } 155 156 /** 157 * skb_flow_get_icmp_tci - extract ICMP(6) Type, Code and Identifier fields 158 * @skb: sk_buff to extract from 159 * @key_icmp: struct flow_dissector_key_icmp to fill 160 * @data: raw buffer pointer to the packet 161 * @thoff: offset to extract at 162 * @hlen: packet header length 163 */ 164 void skb_flow_get_icmp_tci(const struct sk_buff *skb, 165 struct flow_dissector_key_icmp *key_icmp, 166 const void *data, int thoff, int hlen) 167 { 168 struct icmphdr *ih, _ih; 169 170 ih = __skb_header_pointer(skb, thoff, sizeof(_ih), data, hlen, &_ih); 171 if (!ih) 172 return; 173 174 key_icmp->type = ih->type; 175 key_icmp->code = ih->code; 176 177 /* As we use 0 to signal that the Id field is not present, 178 * avoid confusion with packets without such field 179 */ 180 if (icmp_has_id(ih->type)) 181 key_icmp->id = ih->un.echo.id ? ntohs(ih->un.echo.id) : 1; 182 else 183 key_icmp->id = 0; 184 } 185 EXPORT_SYMBOL(skb_flow_get_icmp_tci); 186 187 /* If FLOW_DISSECTOR_KEY_ICMP is set, dissect an ICMP packet 188 * using skb_flow_get_icmp_tci(). 189 */ 190 static void __skb_flow_dissect_icmp(const struct sk_buff *skb, 191 struct flow_dissector *flow_dissector, 192 void *target_container, const void *data, 193 int thoff, int hlen) 194 { 195 struct flow_dissector_key_icmp *key_icmp; 196 197 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ICMP)) 198 return; 199 200 key_icmp = skb_flow_dissector_target(flow_dissector, 201 FLOW_DISSECTOR_KEY_ICMP, 202 target_container); 203 204 skb_flow_get_icmp_tci(skb, key_icmp, data, thoff, hlen); 205 } 206 207 void skb_flow_dissect_meta(const struct sk_buff *skb, 208 struct flow_dissector *flow_dissector, 209 void *target_container) 210 { 211 struct flow_dissector_key_meta *meta; 212 213 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_META)) 214 return; 215 216 meta = skb_flow_dissector_target(flow_dissector, 217 FLOW_DISSECTOR_KEY_META, 218 target_container); 219 meta->ingress_ifindex = skb->skb_iif; 220 } 221 EXPORT_SYMBOL(skb_flow_dissect_meta); 222 223 static void 224 skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type, 225 struct flow_dissector *flow_dissector, 226 void *target_container) 227 { 228 struct flow_dissector_key_control *ctrl; 229 230 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) 231 return; 232 233 ctrl = skb_flow_dissector_target(flow_dissector, 234 FLOW_DISSECTOR_KEY_ENC_CONTROL, 235 target_container); 236 ctrl->addr_type = type; 237 } 238 239 void 240 skb_flow_dissect_ct(const struct sk_buff *skb, 241 struct flow_dissector *flow_dissector, 242 void *target_container, u16 *ctinfo_map, 243 size_t mapsize, bool post_ct, u16 zone) 244 { 245 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 246 struct flow_dissector_key_ct *key; 247 enum ip_conntrack_info ctinfo; 248 struct nf_conn_labels *cl; 249 struct nf_conn *ct; 250 251 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_CT)) 252 return; 253 254 ct = nf_ct_get(skb, &ctinfo); 255 if (!ct && !post_ct) 256 return; 257 258 key = skb_flow_dissector_target(flow_dissector, 259 FLOW_DISSECTOR_KEY_CT, 260 target_container); 261 262 if (!ct) { 263 key->ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 264 TCA_FLOWER_KEY_CT_FLAGS_INVALID; 265 key->ct_zone = zone; 266 return; 267 } 268 269 if (ctinfo < mapsize) 270 key->ct_state = ctinfo_map[ctinfo]; 271 #if IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) 272 key->ct_zone = ct->zone.id; 273 #endif 274 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) 275 key->ct_mark = ct->mark; 276 #endif 277 278 cl = nf_ct_labels_find(ct); 279 if (cl) 280 memcpy(key->ct_labels, cl->bits, sizeof(key->ct_labels)); 281 #endif /* CONFIG_NF_CONNTRACK */ 282 } 283 EXPORT_SYMBOL(skb_flow_dissect_ct); 284 285 void 286 skb_flow_dissect_tunnel_info(const struct sk_buff *skb, 287 struct flow_dissector *flow_dissector, 288 void *target_container) 289 { 290 struct ip_tunnel_info *info; 291 struct ip_tunnel_key *key; 292 293 /* A quick check to see if there might be something to do. */ 294 if (!dissector_uses_key(flow_dissector, 295 FLOW_DISSECTOR_KEY_ENC_KEYID) && 296 !dissector_uses_key(flow_dissector, 297 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) && 298 !dissector_uses_key(flow_dissector, 299 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) && 300 !dissector_uses_key(flow_dissector, 301 FLOW_DISSECTOR_KEY_ENC_CONTROL) && 302 !dissector_uses_key(flow_dissector, 303 FLOW_DISSECTOR_KEY_ENC_PORTS) && 304 !dissector_uses_key(flow_dissector, 305 FLOW_DISSECTOR_KEY_ENC_IP) && 306 !dissector_uses_key(flow_dissector, 307 FLOW_DISSECTOR_KEY_ENC_OPTS)) 308 return; 309 310 info = skb_tunnel_info(skb); 311 if (!info) 312 return; 313 314 key = &info->key; 315 316 switch (ip_tunnel_info_af(info)) { 317 case AF_INET: 318 skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS, 319 flow_dissector, 320 target_container); 321 if (dissector_uses_key(flow_dissector, 322 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { 323 struct flow_dissector_key_ipv4_addrs *ipv4; 324 325 ipv4 = skb_flow_dissector_target(flow_dissector, 326 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 327 target_container); 328 ipv4->src = key->u.ipv4.src; 329 ipv4->dst = key->u.ipv4.dst; 330 } 331 break; 332 case AF_INET6: 333 skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS, 334 flow_dissector, 335 target_container); 336 if (dissector_uses_key(flow_dissector, 337 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { 338 struct flow_dissector_key_ipv6_addrs *ipv6; 339 340 ipv6 = skb_flow_dissector_target(flow_dissector, 341 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, 342 target_container); 343 ipv6->src = key->u.ipv6.src; 344 ipv6->dst = key->u.ipv6.dst; 345 } 346 break; 347 } 348 349 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 350 struct flow_dissector_key_keyid *keyid; 351 352 keyid = skb_flow_dissector_target(flow_dissector, 353 FLOW_DISSECTOR_KEY_ENC_KEYID, 354 target_container); 355 keyid->keyid = tunnel_id_to_key32(key->tun_id); 356 } 357 358 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { 359 struct flow_dissector_key_ports *tp; 360 361 tp = skb_flow_dissector_target(flow_dissector, 362 FLOW_DISSECTOR_KEY_ENC_PORTS, 363 target_container); 364 tp->src = key->tp_src; 365 tp->dst = key->tp_dst; 366 } 367 368 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IP)) { 369 struct flow_dissector_key_ip *ip; 370 371 ip = skb_flow_dissector_target(flow_dissector, 372 FLOW_DISSECTOR_KEY_ENC_IP, 373 target_container); 374 ip->tos = key->tos; 375 ip->ttl = key->ttl; 376 } 377 378 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) { 379 struct flow_dissector_key_enc_opts *enc_opt; 380 381 enc_opt = skb_flow_dissector_target(flow_dissector, 382 FLOW_DISSECTOR_KEY_ENC_OPTS, 383 target_container); 384 385 if (info->options_len) { 386 enc_opt->len = info->options_len; 387 ip_tunnel_info_opts_get(enc_opt->data, info); 388 enc_opt->dst_opt_type = info->key.tun_flags & 389 TUNNEL_OPTIONS_PRESENT; 390 } 391 } 392 } 393 EXPORT_SYMBOL(skb_flow_dissect_tunnel_info); 394 395 void skb_flow_dissect_hash(const struct sk_buff *skb, 396 struct flow_dissector *flow_dissector, 397 void *target_container) 398 { 399 struct flow_dissector_key_hash *key; 400 401 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_HASH)) 402 return; 403 404 key = skb_flow_dissector_target(flow_dissector, 405 FLOW_DISSECTOR_KEY_HASH, 406 target_container); 407 408 key->hash = skb_get_hash_raw(skb); 409 } 410 EXPORT_SYMBOL(skb_flow_dissect_hash); 411 412 static enum flow_dissect_ret 413 __skb_flow_dissect_mpls(const struct sk_buff *skb, 414 struct flow_dissector *flow_dissector, 415 void *target_container, const void *data, int nhoff, 416 int hlen, int lse_index, bool *entropy_label) 417 { 418 struct mpls_label *hdr, _hdr; 419 u32 entry, label, bos; 420 421 if (!dissector_uses_key(flow_dissector, 422 FLOW_DISSECTOR_KEY_MPLS_ENTROPY) && 423 !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) 424 return FLOW_DISSECT_RET_OUT_GOOD; 425 426 if (lse_index >= FLOW_DIS_MPLS_MAX) 427 return FLOW_DISSECT_RET_OUT_GOOD; 428 429 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, 430 hlen, &_hdr); 431 if (!hdr) 432 return FLOW_DISSECT_RET_OUT_BAD; 433 434 entry = ntohl(hdr->entry); 435 label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT; 436 bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT; 437 438 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) { 439 struct flow_dissector_key_mpls *key_mpls; 440 struct flow_dissector_mpls_lse *lse; 441 442 key_mpls = skb_flow_dissector_target(flow_dissector, 443 FLOW_DISSECTOR_KEY_MPLS, 444 target_container); 445 lse = &key_mpls->ls[lse_index]; 446 447 lse->mpls_ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 448 lse->mpls_bos = bos; 449 lse->mpls_tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; 450 lse->mpls_label = label; 451 dissector_set_mpls_lse(key_mpls, lse_index); 452 } 453 454 if (*entropy_label && 455 dissector_uses_key(flow_dissector, 456 FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) { 457 struct flow_dissector_key_keyid *key_keyid; 458 459 key_keyid = skb_flow_dissector_target(flow_dissector, 460 FLOW_DISSECTOR_KEY_MPLS_ENTROPY, 461 target_container); 462 key_keyid->keyid = cpu_to_be32(label); 463 } 464 465 *entropy_label = label == MPLS_LABEL_ENTROPY; 466 467 return bos ? FLOW_DISSECT_RET_OUT_GOOD : FLOW_DISSECT_RET_PROTO_AGAIN; 468 } 469 470 static enum flow_dissect_ret 471 __skb_flow_dissect_arp(const struct sk_buff *skb, 472 struct flow_dissector *flow_dissector, 473 void *target_container, const void *data, 474 int nhoff, int hlen) 475 { 476 struct flow_dissector_key_arp *key_arp; 477 struct { 478 unsigned char ar_sha[ETH_ALEN]; 479 unsigned char ar_sip[4]; 480 unsigned char ar_tha[ETH_ALEN]; 481 unsigned char ar_tip[4]; 482 } *arp_eth, _arp_eth; 483 const struct arphdr *arp; 484 struct arphdr _arp; 485 486 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP)) 487 return FLOW_DISSECT_RET_OUT_GOOD; 488 489 arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data, 490 hlen, &_arp); 491 if (!arp) 492 return FLOW_DISSECT_RET_OUT_BAD; 493 494 if (arp->ar_hrd != htons(ARPHRD_ETHER) || 495 arp->ar_pro != htons(ETH_P_IP) || 496 arp->ar_hln != ETH_ALEN || 497 arp->ar_pln != 4 || 498 (arp->ar_op != htons(ARPOP_REPLY) && 499 arp->ar_op != htons(ARPOP_REQUEST))) 500 return FLOW_DISSECT_RET_OUT_BAD; 501 502 arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp), 503 sizeof(_arp_eth), data, 504 hlen, &_arp_eth); 505 if (!arp_eth) 506 return FLOW_DISSECT_RET_OUT_BAD; 507 508 key_arp = skb_flow_dissector_target(flow_dissector, 509 FLOW_DISSECTOR_KEY_ARP, 510 target_container); 511 512 memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip)); 513 memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip)); 514 515 /* Only store the lower byte of the opcode; 516 * this covers ARPOP_REPLY and ARPOP_REQUEST. 517 */ 518 key_arp->op = ntohs(arp->ar_op) & 0xff; 519 520 ether_addr_copy(key_arp->sha, arp_eth->ar_sha); 521 ether_addr_copy(key_arp->tha, arp_eth->ar_tha); 522 523 return FLOW_DISSECT_RET_OUT_GOOD; 524 } 525 526 static enum flow_dissect_ret 527 __skb_flow_dissect_gre(const struct sk_buff *skb, 528 struct flow_dissector_key_control *key_control, 529 struct flow_dissector *flow_dissector, 530 void *target_container, const void *data, 531 __be16 *p_proto, int *p_nhoff, int *p_hlen, 532 unsigned int flags) 533 { 534 struct flow_dissector_key_keyid *key_keyid; 535 struct gre_base_hdr *hdr, _hdr; 536 int offset = 0; 537 u16 gre_ver; 538 539 hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), 540 data, *p_hlen, &_hdr); 541 if (!hdr) 542 return FLOW_DISSECT_RET_OUT_BAD; 543 544 /* Only look inside GRE without routing */ 545 if (hdr->flags & GRE_ROUTING) 546 return FLOW_DISSECT_RET_OUT_GOOD; 547 548 /* Only look inside GRE for version 0 and 1 */ 549 gre_ver = ntohs(hdr->flags & GRE_VERSION); 550 if (gre_ver > 1) 551 return FLOW_DISSECT_RET_OUT_GOOD; 552 553 *p_proto = hdr->protocol; 554 if (gre_ver) { 555 /* Version1 must be PPTP, and check the flags */ 556 if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY))) 557 return FLOW_DISSECT_RET_OUT_GOOD; 558 } 559 560 offset += sizeof(struct gre_base_hdr); 561 562 if (hdr->flags & GRE_CSUM) 563 offset += sizeof_field(struct gre_full_hdr, csum) + 564 sizeof_field(struct gre_full_hdr, reserved1); 565 566 if (hdr->flags & GRE_KEY) { 567 const __be32 *keyid; 568 __be32 _keyid; 569 570 keyid = __skb_header_pointer(skb, *p_nhoff + offset, 571 sizeof(_keyid), 572 data, *p_hlen, &_keyid); 573 if (!keyid) 574 return FLOW_DISSECT_RET_OUT_BAD; 575 576 if (dissector_uses_key(flow_dissector, 577 FLOW_DISSECTOR_KEY_GRE_KEYID)) { 578 key_keyid = skb_flow_dissector_target(flow_dissector, 579 FLOW_DISSECTOR_KEY_GRE_KEYID, 580 target_container); 581 if (gre_ver == 0) 582 key_keyid->keyid = *keyid; 583 else 584 key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK; 585 } 586 offset += sizeof_field(struct gre_full_hdr, key); 587 } 588 589 if (hdr->flags & GRE_SEQ) 590 offset += sizeof_field(struct pptp_gre_header, seq); 591 592 if (gre_ver == 0) { 593 if (*p_proto == htons(ETH_P_TEB)) { 594 const struct ethhdr *eth; 595 struct ethhdr _eth; 596 597 eth = __skb_header_pointer(skb, *p_nhoff + offset, 598 sizeof(_eth), 599 data, *p_hlen, &_eth); 600 if (!eth) 601 return FLOW_DISSECT_RET_OUT_BAD; 602 *p_proto = eth->h_proto; 603 offset += sizeof(*eth); 604 605 /* Cap headers that we access via pointers at the 606 * end of the Ethernet header as our maximum alignment 607 * at that point is only 2 bytes. 608 */ 609 if (NET_IP_ALIGN) 610 *p_hlen = *p_nhoff + offset; 611 } 612 } else { /* version 1, must be PPTP */ 613 u8 _ppp_hdr[PPP_HDRLEN]; 614 u8 *ppp_hdr; 615 616 if (hdr->flags & GRE_ACK) 617 offset += sizeof_field(struct pptp_gre_header, ack); 618 619 ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset, 620 sizeof(_ppp_hdr), 621 data, *p_hlen, _ppp_hdr); 622 if (!ppp_hdr) 623 return FLOW_DISSECT_RET_OUT_BAD; 624 625 switch (PPP_PROTOCOL(ppp_hdr)) { 626 case PPP_IP: 627 *p_proto = htons(ETH_P_IP); 628 break; 629 case PPP_IPV6: 630 *p_proto = htons(ETH_P_IPV6); 631 break; 632 default: 633 /* Could probably catch some more like MPLS */ 634 break; 635 } 636 637 offset += PPP_HDRLEN; 638 } 639 640 *p_nhoff += offset; 641 key_control->flags |= FLOW_DIS_ENCAPSULATION; 642 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) 643 return FLOW_DISSECT_RET_OUT_GOOD; 644 645 return FLOW_DISSECT_RET_PROTO_AGAIN; 646 } 647 648 /** 649 * __skb_flow_dissect_batadv() - dissect batman-adv header 650 * @skb: sk_buff to with the batman-adv header 651 * @key_control: flow dissectors control key 652 * @data: raw buffer pointer to the packet, if NULL use skb->data 653 * @p_proto: pointer used to update the protocol to process next 654 * @p_nhoff: pointer used to update inner network header offset 655 * @hlen: packet header length 656 * @flags: any combination of FLOW_DISSECTOR_F_* 657 * 658 * ETH_P_BATMAN packets are tried to be dissected. Only 659 * &struct batadv_unicast packets are actually processed because they contain an 660 * inner ethernet header and are usually followed by actual network header. This 661 * allows the flow dissector to continue processing the packet. 662 * 663 * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found, 664 * FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation, 665 * otherwise FLOW_DISSECT_RET_OUT_BAD 666 */ 667 static enum flow_dissect_ret 668 __skb_flow_dissect_batadv(const struct sk_buff *skb, 669 struct flow_dissector_key_control *key_control, 670 const void *data, __be16 *p_proto, int *p_nhoff, 671 int hlen, unsigned int flags) 672 { 673 struct { 674 struct batadv_unicast_packet batadv_unicast; 675 struct ethhdr eth; 676 } *hdr, _hdr; 677 678 hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen, 679 &_hdr); 680 if (!hdr) 681 return FLOW_DISSECT_RET_OUT_BAD; 682 683 if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION) 684 return FLOW_DISSECT_RET_OUT_BAD; 685 686 if (hdr->batadv_unicast.packet_type != BATADV_UNICAST) 687 return FLOW_DISSECT_RET_OUT_BAD; 688 689 *p_proto = hdr->eth.h_proto; 690 *p_nhoff += sizeof(*hdr); 691 692 key_control->flags |= FLOW_DIS_ENCAPSULATION; 693 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) 694 return FLOW_DISSECT_RET_OUT_GOOD; 695 696 return FLOW_DISSECT_RET_PROTO_AGAIN; 697 } 698 699 static void 700 __skb_flow_dissect_tcp(const struct sk_buff *skb, 701 struct flow_dissector *flow_dissector, 702 void *target_container, const void *data, 703 int thoff, int hlen) 704 { 705 struct flow_dissector_key_tcp *key_tcp; 706 struct tcphdr *th, _th; 707 708 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP)) 709 return; 710 711 th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th); 712 if (!th) 713 return; 714 715 if (unlikely(__tcp_hdrlen(th) < sizeof(_th))) 716 return; 717 718 key_tcp = skb_flow_dissector_target(flow_dissector, 719 FLOW_DISSECTOR_KEY_TCP, 720 target_container); 721 key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF)); 722 } 723 724 static void 725 __skb_flow_dissect_ports(const struct sk_buff *skb, 726 struct flow_dissector *flow_dissector, 727 void *target_container, const void *data, 728 int nhoff, u8 ip_proto, int hlen) 729 { 730 enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX; 731 struct flow_dissector_key_ports *key_ports; 732 733 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) 734 dissector_ports = FLOW_DISSECTOR_KEY_PORTS; 735 else if (dissector_uses_key(flow_dissector, 736 FLOW_DISSECTOR_KEY_PORTS_RANGE)) 737 dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE; 738 739 if (dissector_ports == FLOW_DISSECTOR_KEY_MAX) 740 return; 741 742 key_ports = skb_flow_dissector_target(flow_dissector, 743 dissector_ports, 744 target_container); 745 key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, 746 data, hlen); 747 } 748 749 static void 750 __skb_flow_dissect_ipv4(const struct sk_buff *skb, 751 struct flow_dissector *flow_dissector, 752 void *target_container, const void *data, 753 const struct iphdr *iph) 754 { 755 struct flow_dissector_key_ip *key_ip; 756 757 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP)) 758 return; 759 760 key_ip = skb_flow_dissector_target(flow_dissector, 761 FLOW_DISSECTOR_KEY_IP, 762 target_container); 763 key_ip->tos = iph->tos; 764 key_ip->ttl = iph->ttl; 765 } 766 767 static void 768 __skb_flow_dissect_ipv6(const struct sk_buff *skb, 769 struct flow_dissector *flow_dissector, 770 void *target_container, const void *data, 771 const struct ipv6hdr *iph) 772 { 773 struct flow_dissector_key_ip *key_ip; 774 775 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP)) 776 return; 777 778 key_ip = skb_flow_dissector_target(flow_dissector, 779 FLOW_DISSECTOR_KEY_IP, 780 target_container); 781 key_ip->tos = ipv6_get_dsfield(iph); 782 key_ip->ttl = iph->hop_limit; 783 } 784 785 /* Maximum number of protocol headers that can be parsed in 786 * __skb_flow_dissect 787 */ 788 #define MAX_FLOW_DISSECT_HDRS 15 789 790 static bool skb_flow_dissect_allowed(int *num_hdrs) 791 { 792 ++*num_hdrs; 793 794 return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS); 795 } 796 797 static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, 798 struct flow_dissector *flow_dissector, 799 void *target_container) 800 { 801 struct flow_dissector_key_ports *key_ports = NULL; 802 struct flow_dissector_key_control *key_control; 803 struct flow_dissector_key_basic *key_basic; 804 struct flow_dissector_key_addrs *key_addrs; 805 struct flow_dissector_key_tags *key_tags; 806 807 key_control = skb_flow_dissector_target(flow_dissector, 808 FLOW_DISSECTOR_KEY_CONTROL, 809 target_container); 810 key_control->thoff = flow_keys->thoff; 811 if (flow_keys->is_frag) 812 key_control->flags |= FLOW_DIS_IS_FRAGMENT; 813 if (flow_keys->is_first_frag) 814 key_control->flags |= FLOW_DIS_FIRST_FRAG; 815 if (flow_keys->is_encap) 816 key_control->flags |= FLOW_DIS_ENCAPSULATION; 817 818 key_basic = skb_flow_dissector_target(flow_dissector, 819 FLOW_DISSECTOR_KEY_BASIC, 820 target_container); 821 key_basic->n_proto = flow_keys->n_proto; 822 key_basic->ip_proto = flow_keys->ip_proto; 823 824 if (flow_keys->addr_proto == ETH_P_IP && 825 dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 826 key_addrs = skb_flow_dissector_target(flow_dissector, 827 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 828 target_container); 829 key_addrs->v4addrs.src = flow_keys->ipv4_src; 830 key_addrs->v4addrs.dst = flow_keys->ipv4_dst; 831 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 832 } else if (flow_keys->addr_proto == ETH_P_IPV6 && 833 dissector_uses_key(flow_dissector, 834 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 835 key_addrs = skb_flow_dissector_target(flow_dissector, 836 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 837 target_container); 838 memcpy(&key_addrs->v6addrs.src, &flow_keys->ipv6_src, 839 sizeof(key_addrs->v6addrs.src)); 840 memcpy(&key_addrs->v6addrs.dst, &flow_keys->ipv6_dst, 841 sizeof(key_addrs->v6addrs.dst)); 842 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 843 } 844 845 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) 846 key_ports = skb_flow_dissector_target(flow_dissector, 847 FLOW_DISSECTOR_KEY_PORTS, 848 target_container); 849 else if (dissector_uses_key(flow_dissector, 850 FLOW_DISSECTOR_KEY_PORTS_RANGE)) 851 key_ports = skb_flow_dissector_target(flow_dissector, 852 FLOW_DISSECTOR_KEY_PORTS_RANGE, 853 target_container); 854 855 if (key_ports) { 856 key_ports->src = flow_keys->sport; 857 key_ports->dst = flow_keys->dport; 858 } 859 860 if (dissector_uses_key(flow_dissector, 861 FLOW_DISSECTOR_KEY_FLOW_LABEL)) { 862 key_tags = skb_flow_dissector_target(flow_dissector, 863 FLOW_DISSECTOR_KEY_FLOW_LABEL, 864 target_container); 865 key_tags->flow_label = ntohl(flow_keys->flow_label); 866 } 867 } 868 869 bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, 870 __be16 proto, int nhoff, int hlen, unsigned int flags) 871 { 872 struct bpf_flow_keys *flow_keys = ctx->flow_keys; 873 u32 result; 874 875 /* Pass parameters to the BPF program */ 876 memset(flow_keys, 0, sizeof(*flow_keys)); 877 flow_keys->n_proto = proto; 878 flow_keys->nhoff = nhoff; 879 flow_keys->thoff = flow_keys->nhoff; 880 881 BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG != 882 (int)FLOW_DISSECTOR_F_PARSE_1ST_FRAG); 883 BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL != 884 (int)FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 885 BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP != 886 (int)FLOW_DISSECTOR_F_STOP_AT_ENCAP); 887 flow_keys->flags = flags; 888 889 result = bpf_prog_run_pin_on_cpu(prog, ctx); 890 891 flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen); 892 flow_keys->thoff = clamp_t(u16, flow_keys->thoff, 893 flow_keys->nhoff, hlen); 894 895 return result == BPF_OK; 896 } 897 898 static bool is_pppoe_ses_hdr_valid(const struct pppoe_hdr *hdr) 899 { 900 return hdr->ver == 1 && hdr->type == 1 && hdr->code == 0; 901 } 902 903 /** 904 * __skb_flow_dissect - extract the flow_keys struct and return it 905 * @net: associated network namespace, derived from @skb if NULL 906 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified 907 * @flow_dissector: list of keys to dissect 908 * @target_container: target structure to put dissected values into 909 * @data: raw buffer pointer to the packet, if NULL use skb->data 910 * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol 911 * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb) 912 * @hlen: packet header length, if @data is NULL use skb_headlen(skb) 913 * @flags: flags that control the dissection process, e.g. 914 * FLOW_DISSECTOR_F_STOP_AT_ENCAP. 915 * 916 * The function will try to retrieve individual keys into target specified 917 * by flow_dissector from either the skbuff or a raw buffer specified by the 918 * rest parameters. 919 * 920 * Caller must take care of zeroing target container memory. 921 */ 922 bool __skb_flow_dissect(const struct net *net, 923 const struct sk_buff *skb, 924 struct flow_dissector *flow_dissector, 925 void *target_container, const void *data, 926 __be16 proto, int nhoff, int hlen, unsigned int flags) 927 { 928 struct flow_dissector_key_control *key_control; 929 struct flow_dissector_key_basic *key_basic; 930 struct flow_dissector_key_addrs *key_addrs; 931 struct flow_dissector_key_tags *key_tags; 932 struct flow_dissector_key_vlan *key_vlan; 933 enum flow_dissect_ret fdret; 934 enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX; 935 bool mpls_el = false; 936 int mpls_lse = 0; 937 int num_hdrs = 0; 938 u8 ip_proto = 0; 939 bool ret; 940 941 if (!data) { 942 data = skb->data; 943 proto = skb_vlan_tag_present(skb) ? 944 skb->vlan_proto : skb->protocol; 945 nhoff = skb_network_offset(skb); 946 hlen = skb_headlen(skb); 947 #if IS_ENABLED(CONFIG_NET_DSA) 948 if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) && 949 proto == htons(ETH_P_XDSA))) { 950 const struct dsa_device_ops *ops; 951 int offset = 0; 952 953 ops = skb->dev->dsa_ptr->tag_ops; 954 /* Only DSA header taggers break flow dissection */ 955 if (ops->needed_headroom) { 956 if (ops->flow_dissect) 957 ops->flow_dissect(skb, &proto, &offset); 958 else 959 dsa_tag_generic_flow_dissect(skb, 960 &proto, 961 &offset); 962 hlen -= offset; 963 nhoff += offset; 964 } 965 } 966 #endif 967 } 968 969 /* It is ensured by skb_flow_dissector_init() that control key will 970 * be always present. 971 */ 972 key_control = skb_flow_dissector_target(flow_dissector, 973 FLOW_DISSECTOR_KEY_CONTROL, 974 target_container); 975 976 /* It is ensured by skb_flow_dissector_init() that basic key will 977 * be always present. 978 */ 979 key_basic = skb_flow_dissector_target(flow_dissector, 980 FLOW_DISSECTOR_KEY_BASIC, 981 target_container); 982 983 if (skb) { 984 if (!net) { 985 if (skb->dev) 986 net = dev_net(skb->dev); 987 else if (skb->sk) 988 net = sock_net(skb->sk); 989 } 990 } 991 992 WARN_ON_ONCE(!net); 993 if (net) { 994 enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR; 995 struct bpf_prog_array *run_array; 996 997 rcu_read_lock(); 998 run_array = rcu_dereference(init_net.bpf.run_array[type]); 999 if (!run_array) 1000 run_array = rcu_dereference(net->bpf.run_array[type]); 1001 1002 if (run_array) { 1003 struct bpf_flow_keys flow_keys; 1004 struct bpf_flow_dissector ctx = { 1005 .flow_keys = &flow_keys, 1006 .data = data, 1007 .data_end = data + hlen, 1008 }; 1009 __be16 n_proto = proto; 1010 struct bpf_prog *prog; 1011 1012 if (skb) { 1013 ctx.skb = skb; 1014 /* we can't use 'proto' in the skb case 1015 * because it might be set to skb->vlan_proto 1016 * which has been pulled from the data 1017 */ 1018 n_proto = skb->protocol; 1019 } 1020 1021 prog = READ_ONCE(run_array->items[0].prog); 1022 ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff, 1023 hlen, flags); 1024 __skb_flow_bpf_to_target(&flow_keys, flow_dissector, 1025 target_container); 1026 rcu_read_unlock(); 1027 return ret; 1028 } 1029 rcu_read_unlock(); 1030 } 1031 1032 if (dissector_uses_key(flow_dissector, 1033 FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1034 struct ethhdr *eth = eth_hdr(skb); 1035 struct flow_dissector_key_eth_addrs *key_eth_addrs; 1036 1037 key_eth_addrs = skb_flow_dissector_target(flow_dissector, 1038 FLOW_DISSECTOR_KEY_ETH_ADDRS, 1039 target_container); 1040 memcpy(key_eth_addrs, eth, sizeof(*key_eth_addrs)); 1041 } 1042 1043 if (dissector_uses_key(flow_dissector, 1044 FLOW_DISSECTOR_KEY_NUM_OF_VLANS)) { 1045 struct flow_dissector_key_num_of_vlans *key_num_of_vlans; 1046 1047 key_num_of_vlans = skb_flow_dissector_target(flow_dissector, 1048 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, 1049 target_container); 1050 key_num_of_vlans->num_of_vlans = 0; 1051 } 1052 1053 proto_again: 1054 fdret = FLOW_DISSECT_RET_CONTINUE; 1055 1056 switch (proto) { 1057 case htons(ETH_P_IP): { 1058 const struct iphdr *iph; 1059 struct iphdr _iph; 1060 1061 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); 1062 if (!iph || iph->ihl < 5) { 1063 fdret = FLOW_DISSECT_RET_OUT_BAD; 1064 break; 1065 } 1066 1067 nhoff += iph->ihl * 4; 1068 1069 ip_proto = iph->protocol; 1070 1071 if (dissector_uses_key(flow_dissector, 1072 FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 1073 key_addrs = skb_flow_dissector_target(flow_dissector, 1074 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 1075 target_container); 1076 1077 memcpy(&key_addrs->v4addrs.src, &iph->saddr, 1078 sizeof(key_addrs->v4addrs.src)); 1079 memcpy(&key_addrs->v4addrs.dst, &iph->daddr, 1080 sizeof(key_addrs->v4addrs.dst)); 1081 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1082 } 1083 1084 __skb_flow_dissect_ipv4(skb, flow_dissector, 1085 target_container, data, iph); 1086 1087 if (ip_is_fragment(iph)) { 1088 key_control->flags |= FLOW_DIS_IS_FRAGMENT; 1089 1090 if (iph->frag_off & htons(IP_OFFSET)) { 1091 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1092 break; 1093 } else { 1094 key_control->flags |= FLOW_DIS_FIRST_FRAG; 1095 if (!(flags & 1096 FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) { 1097 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1098 break; 1099 } 1100 } 1101 } 1102 1103 break; 1104 } 1105 case htons(ETH_P_IPV6): { 1106 const struct ipv6hdr *iph; 1107 struct ipv6hdr _iph; 1108 1109 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); 1110 if (!iph) { 1111 fdret = FLOW_DISSECT_RET_OUT_BAD; 1112 break; 1113 } 1114 1115 ip_proto = iph->nexthdr; 1116 nhoff += sizeof(struct ipv6hdr); 1117 1118 if (dissector_uses_key(flow_dissector, 1119 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 1120 key_addrs = skb_flow_dissector_target(flow_dissector, 1121 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 1122 target_container); 1123 1124 memcpy(&key_addrs->v6addrs.src, &iph->saddr, 1125 sizeof(key_addrs->v6addrs.src)); 1126 memcpy(&key_addrs->v6addrs.dst, &iph->daddr, 1127 sizeof(key_addrs->v6addrs.dst)); 1128 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1129 } 1130 1131 if ((dissector_uses_key(flow_dissector, 1132 FLOW_DISSECTOR_KEY_FLOW_LABEL) || 1133 (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) && 1134 ip6_flowlabel(iph)) { 1135 __be32 flow_label = ip6_flowlabel(iph); 1136 1137 if (dissector_uses_key(flow_dissector, 1138 FLOW_DISSECTOR_KEY_FLOW_LABEL)) { 1139 key_tags = skb_flow_dissector_target(flow_dissector, 1140 FLOW_DISSECTOR_KEY_FLOW_LABEL, 1141 target_container); 1142 key_tags->flow_label = ntohl(flow_label); 1143 } 1144 if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) { 1145 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1146 break; 1147 } 1148 } 1149 1150 __skb_flow_dissect_ipv6(skb, flow_dissector, 1151 target_container, data, iph); 1152 1153 break; 1154 } 1155 case htons(ETH_P_8021AD): 1156 case htons(ETH_P_8021Q): { 1157 const struct vlan_hdr *vlan = NULL; 1158 struct vlan_hdr _vlan; 1159 __be16 saved_vlan_tpid = proto; 1160 1161 if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX && 1162 skb && skb_vlan_tag_present(skb)) { 1163 proto = skb->protocol; 1164 } else { 1165 vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), 1166 data, hlen, &_vlan); 1167 if (!vlan) { 1168 fdret = FLOW_DISSECT_RET_OUT_BAD; 1169 break; 1170 } 1171 1172 proto = vlan->h_vlan_encapsulated_proto; 1173 nhoff += sizeof(*vlan); 1174 } 1175 1176 if (dissector_uses_key(flow_dissector, 1177 FLOW_DISSECTOR_KEY_NUM_OF_VLANS)) { 1178 struct flow_dissector_key_num_of_vlans *key_nvs; 1179 1180 key_nvs = skb_flow_dissector_target(flow_dissector, 1181 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, 1182 target_container); 1183 key_nvs->num_of_vlans++; 1184 } 1185 1186 if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) { 1187 dissector_vlan = FLOW_DISSECTOR_KEY_VLAN; 1188 } else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) { 1189 dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN; 1190 } else { 1191 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1192 break; 1193 } 1194 1195 if (dissector_uses_key(flow_dissector, dissector_vlan)) { 1196 key_vlan = skb_flow_dissector_target(flow_dissector, 1197 dissector_vlan, 1198 target_container); 1199 1200 if (!vlan) { 1201 key_vlan->vlan_id = skb_vlan_tag_get_id(skb); 1202 key_vlan->vlan_priority = skb_vlan_tag_get_prio(skb); 1203 } else { 1204 key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) & 1205 VLAN_VID_MASK; 1206 key_vlan->vlan_priority = 1207 (ntohs(vlan->h_vlan_TCI) & 1208 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 1209 } 1210 key_vlan->vlan_tpid = saved_vlan_tpid; 1211 key_vlan->vlan_eth_type = proto; 1212 } 1213 1214 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1215 break; 1216 } 1217 case htons(ETH_P_PPP_SES): { 1218 struct { 1219 struct pppoe_hdr hdr; 1220 __be16 proto; 1221 } *hdr, _hdr; 1222 u16 ppp_proto; 1223 1224 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); 1225 if (!hdr) { 1226 fdret = FLOW_DISSECT_RET_OUT_BAD; 1227 break; 1228 } 1229 1230 if (!is_pppoe_ses_hdr_valid(&hdr->hdr)) { 1231 fdret = FLOW_DISSECT_RET_OUT_BAD; 1232 break; 1233 } 1234 1235 /* least significant bit of the most significant octet 1236 * indicates if protocol field was compressed 1237 */ 1238 ppp_proto = ntohs(hdr->proto); 1239 if (ppp_proto & 0x0100) { 1240 ppp_proto = ppp_proto >> 8; 1241 nhoff += PPPOE_SES_HLEN - 1; 1242 } else { 1243 nhoff += PPPOE_SES_HLEN; 1244 } 1245 1246 if (ppp_proto == PPP_IP) { 1247 proto = htons(ETH_P_IP); 1248 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1249 } else if (ppp_proto == PPP_IPV6) { 1250 proto = htons(ETH_P_IPV6); 1251 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1252 } else if (ppp_proto == PPP_MPLS_UC) { 1253 proto = htons(ETH_P_MPLS_UC); 1254 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1255 } else if (ppp_proto == PPP_MPLS_MC) { 1256 proto = htons(ETH_P_MPLS_MC); 1257 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1258 } else if (ppp_proto_is_valid(ppp_proto)) { 1259 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1260 } else { 1261 fdret = FLOW_DISSECT_RET_OUT_BAD; 1262 break; 1263 } 1264 1265 if (dissector_uses_key(flow_dissector, 1266 FLOW_DISSECTOR_KEY_PPPOE)) { 1267 struct flow_dissector_key_pppoe *key_pppoe; 1268 1269 key_pppoe = skb_flow_dissector_target(flow_dissector, 1270 FLOW_DISSECTOR_KEY_PPPOE, 1271 target_container); 1272 key_pppoe->session_id = hdr->hdr.sid; 1273 key_pppoe->ppp_proto = htons(ppp_proto); 1274 key_pppoe->type = htons(ETH_P_PPP_SES); 1275 } 1276 break; 1277 } 1278 case htons(ETH_P_TIPC): { 1279 struct tipc_basic_hdr *hdr, _hdr; 1280 1281 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), 1282 data, hlen, &_hdr); 1283 if (!hdr) { 1284 fdret = FLOW_DISSECT_RET_OUT_BAD; 1285 break; 1286 } 1287 1288 if (dissector_uses_key(flow_dissector, 1289 FLOW_DISSECTOR_KEY_TIPC)) { 1290 key_addrs = skb_flow_dissector_target(flow_dissector, 1291 FLOW_DISSECTOR_KEY_TIPC, 1292 target_container); 1293 key_addrs->tipckey.key = tipc_hdr_rps_key(hdr); 1294 key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC; 1295 } 1296 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1297 break; 1298 } 1299 1300 case htons(ETH_P_MPLS_UC): 1301 case htons(ETH_P_MPLS_MC): 1302 fdret = __skb_flow_dissect_mpls(skb, flow_dissector, 1303 target_container, data, 1304 nhoff, hlen, mpls_lse, 1305 &mpls_el); 1306 nhoff += sizeof(struct mpls_label); 1307 mpls_lse++; 1308 break; 1309 case htons(ETH_P_FCOE): 1310 if ((hlen - nhoff) < FCOE_HEADER_LEN) { 1311 fdret = FLOW_DISSECT_RET_OUT_BAD; 1312 break; 1313 } 1314 1315 nhoff += FCOE_HEADER_LEN; 1316 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1317 break; 1318 1319 case htons(ETH_P_ARP): 1320 case htons(ETH_P_RARP): 1321 fdret = __skb_flow_dissect_arp(skb, flow_dissector, 1322 target_container, data, 1323 nhoff, hlen); 1324 break; 1325 1326 case htons(ETH_P_BATMAN): 1327 fdret = __skb_flow_dissect_batadv(skb, key_control, data, 1328 &proto, &nhoff, hlen, flags); 1329 break; 1330 1331 case htons(ETH_P_1588): { 1332 struct ptp_header *hdr, _hdr; 1333 1334 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, 1335 hlen, &_hdr); 1336 if (!hdr) { 1337 fdret = FLOW_DISSECT_RET_OUT_BAD; 1338 break; 1339 } 1340 1341 nhoff += ntohs(hdr->message_length); 1342 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1343 break; 1344 } 1345 1346 case htons(ETH_P_PRP): 1347 case htons(ETH_P_HSR): { 1348 struct hsr_tag *hdr, _hdr; 1349 1350 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, 1351 &_hdr); 1352 if (!hdr) { 1353 fdret = FLOW_DISSECT_RET_OUT_BAD; 1354 break; 1355 } 1356 1357 proto = hdr->encap_proto; 1358 nhoff += HSR_HLEN; 1359 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1360 break; 1361 } 1362 1363 default: 1364 fdret = FLOW_DISSECT_RET_OUT_BAD; 1365 break; 1366 } 1367 1368 /* Process result of proto processing */ 1369 switch (fdret) { 1370 case FLOW_DISSECT_RET_OUT_GOOD: 1371 goto out_good; 1372 case FLOW_DISSECT_RET_PROTO_AGAIN: 1373 if (skb_flow_dissect_allowed(&num_hdrs)) 1374 goto proto_again; 1375 goto out_good; 1376 case FLOW_DISSECT_RET_CONTINUE: 1377 case FLOW_DISSECT_RET_IPPROTO_AGAIN: 1378 break; 1379 case FLOW_DISSECT_RET_OUT_BAD: 1380 default: 1381 goto out_bad; 1382 } 1383 1384 ip_proto_again: 1385 fdret = FLOW_DISSECT_RET_CONTINUE; 1386 1387 switch (ip_proto) { 1388 case IPPROTO_GRE: 1389 if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) { 1390 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1391 break; 1392 } 1393 1394 fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector, 1395 target_container, data, 1396 &proto, &nhoff, &hlen, flags); 1397 break; 1398 1399 case NEXTHDR_HOP: 1400 case NEXTHDR_ROUTING: 1401 case NEXTHDR_DEST: { 1402 u8 _opthdr[2], *opthdr; 1403 1404 if (proto != htons(ETH_P_IPV6)) 1405 break; 1406 1407 opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr), 1408 data, hlen, &_opthdr); 1409 if (!opthdr) { 1410 fdret = FLOW_DISSECT_RET_OUT_BAD; 1411 break; 1412 } 1413 1414 ip_proto = opthdr[0]; 1415 nhoff += (opthdr[1] + 1) << 3; 1416 1417 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN; 1418 break; 1419 } 1420 case NEXTHDR_FRAGMENT: { 1421 struct frag_hdr _fh, *fh; 1422 1423 if (proto != htons(ETH_P_IPV6)) 1424 break; 1425 1426 fh = __skb_header_pointer(skb, nhoff, sizeof(_fh), 1427 data, hlen, &_fh); 1428 1429 if (!fh) { 1430 fdret = FLOW_DISSECT_RET_OUT_BAD; 1431 break; 1432 } 1433 1434 key_control->flags |= FLOW_DIS_IS_FRAGMENT; 1435 1436 nhoff += sizeof(_fh); 1437 ip_proto = fh->nexthdr; 1438 1439 if (!(fh->frag_off & htons(IP6_OFFSET))) { 1440 key_control->flags |= FLOW_DIS_FIRST_FRAG; 1441 if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) { 1442 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN; 1443 break; 1444 } 1445 } 1446 1447 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1448 break; 1449 } 1450 case IPPROTO_IPIP: 1451 if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) { 1452 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1453 break; 1454 } 1455 1456 proto = htons(ETH_P_IP); 1457 1458 key_control->flags |= FLOW_DIS_ENCAPSULATION; 1459 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) { 1460 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1461 break; 1462 } 1463 1464 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1465 break; 1466 1467 case IPPROTO_IPV6: 1468 if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) { 1469 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1470 break; 1471 } 1472 1473 proto = htons(ETH_P_IPV6); 1474 1475 key_control->flags |= FLOW_DIS_ENCAPSULATION; 1476 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) { 1477 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1478 break; 1479 } 1480 1481 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1482 break; 1483 1484 1485 case IPPROTO_MPLS: 1486 proto = htons(ETH_P_MPLS_UC); 1487 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1488 break; 1489 1490 case IPPROTO_TCP: 1491 __skb_flow_dissect_tcp(skb, flow_dissector, target_container, 1492 data, nhoff, hlen); 1493 break; 1494 1495 case IPPROTO_ICMP: 1496 case IPPROTO_ICMPV6: 1497 __skb_flow_dissect_icmp(skb, flow_dissector, target_container, 1498 data, nhoff, hlen); 1499 break; 1500 1501 default: 1502 break; 1503 } 1504 1505 if (!(key_control->flags & FLOW_DIS_IS_FRAGMENT)) 1506 __skb_flow_dissect_ports(skb, flow_dissector, target_container, 1507 data, nhoff, ip_proto, hlen); 1508 1509 /* Process result of IP proto processing */ 1510 switch (fdret) { 1511 case FLOW_DISSECT_RET_PROTO_AGAIN: 1512 if (skb_flow_dissect_allowed(&num_hdrs)) 1513 goto proto_again; 1514 break; 1515 case FLOW_DISSECT_RET_IPPROTO_AGAIN: 1516 if (skb_flow_dissect_allowed(&num_hdrs)) 1517 goto ip_proto_again; 1518 break; 1519 case FLOW_DISSECT_RET_OUT_GOOD: 1520 case FLOW_DISSECT_RET_CONTINUE: 1521 break; 1522 case FLOW_DISSECT_RET_OUT_BAD: 1523 default: 1524 goto out_bad; 1525 } 1526 1527 out_good: 1528 ret = true; 1529 1530 out: 1531 key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); 1532 key_basic->n_proto = proto; 1533 key_basic->ip_proto = ip_proto; 1534 1535 return ret; 1536 1537 out_bad: 1538 ret = false; 1539 goto out; 1540 } 1541 EXPORT_SYMBOL(__skb_flow_dissect); 1542 1543 static siphash_aligned_key_t hashrnd; 1544 static __always_inline void __flow_hash_secret_init(void) 1545 { 1546 net_get_random_once(&hashrnd, sizeof(hashrnd)); 1547 } 1548 1549 static const void *flow_keys_hash_start(const struct flow_keys *flow) 1550 { 1551 BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT); 1552 return &flow->FLOW_KEYS_HASH_START_FIELD; 1553 } 1554 1555 static inline size_t flow_keys_hash_length(const struct flow_keys *flow) 1556 { 1557 size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs); 1558 1559 BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32)); 1560 1561 switch (flow->control.addr_type) { 1562 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 1563 diff -= sizeof(flow->addrs.v4addrs); 1564 break; 1565 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 1566 diff -= sizeof(flow->addrs.v6addrs); 1567 break; 1568 case FLOW_DISSECTOR_KEY_TIPC: 1569 diff -= sizeof(flow->addrs.tipckey); 1570 break; 1571 } 1572 return sizeof(*flow) - diff; 1573 } 1574 1575 __be32 flow_get_u32_src(const struct flow_keys *flow) 1576 { 1577 switch (flow->control.addr_type) { 1578 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 1579 return flow->addrs.v4addrs.src; 1580 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 1581 return (__force __be32)ipv6_addr_hash( 1582 &flow->addrs.v6addrs.src); 1583 case FLOW_DISSECTOR_KEY_TIPC: 1584 return flow->addrs.tipckey.key; 1585 default: 1586 return 0; 1587 } 1588 } 1589 EXPORT_SYMBOL(flow_get_u32_src); 1590 1591 __be32 flow_get_u32_dst(const struct flow_keys *flow) 1592 { 1593 switch (flow->control.addr_type) { 1594 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 1595 return flow->addrs.v4addrs.dst; 1596 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 1597 return (__force __be32)ipv6_addr_hash( 1598 &flow->addrs.v6addrs.dst); 1599 default: 1600 return 0; 1601 } 1602 } 1603 EXPORT_SYMBOL(flow_get_u32_dst); 1604 1605 /* Sort the source and destination IP and the ports, 1606 * to have consistent hash within the two directions 1607 */ 1608 static inline void __flow_hash_consistentify(struct flow_keys *keys) 1609 { 1610 int addr_diff, i; 1611 1612 switch (keys->control.addr_type) { 1613 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 1614 if ((__force u32)keys->addrs.v4addrs.dst < 1615 (__force u32)keys->addrs.v4addrs.src) 1616 swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst); 1617 1618 if ((__force u16)keys->ports.dst < 1619 (__force u16)keys->ports.src) { 1620 swap(keys->ports.src, keys->ports.dst); 1621 } 1622 break; 1623 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 1624 addr_diff = memcmp(&keys->addrs.v6addrs.dst, 1625 &keys->addrs.v6addrs.src, 1626 sizeof(keys->addrs.v6addrs.dst)); 1627 if (addr_diff < 0) { 1628 for (i = 0; i < 4; i++) 1629 swap(keys->addrs.v6addrs.src.s6_addr32[i], 1630 keys->addrs.v6addrs.dst.s6_addr32[i]); 1631 } 1632 if ((__force u16)keys->ports.dst < 1633 (__force u16)keys->ports.src) { 1634 swap(keys->ports.src, keys->ports.dst); 1635 } 1636 break; 1637 } 1638 } 1639 1640 static inline u32 __flow_hash_from_keys(struct flow_keys *keys, 1641 const siphash_key_t *keyval) 1642 { 1643 u32 hash; 1644 1645 __flow_hash_consistentify(keys); 1646 1647 hash = siphash(flow_keys_hash_start(keys), 1648 flow_keys_hash_length(keys), keyval); 1649 if (!hash) 1650 hash = 1; 1651 1652 return hash; 1653 } 1654 1655 u32 flow_hash_from_keys(struct flow_keys *keys) 1656 { 1657 __flow_hash_secret_init(); 1658 return __flow_hash_from_keys(keys, &hashrnd); 1659 } 1660 EXPORT_SYMBOL(flow_hash_from_keys); 1661 1662 static inline u32 ___skb_get_hash(const struct sk_buff *skb, 1663 struct flow_keys *keys, 1664 const siphash_key_t *keyval) 1665 { 1666 skb_flow_dissect_flow_keys(skb, keys, 1667 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 1668 1669 return __flow_hash_from_keys(keys, keyval); 1670 } 1671 1672 struct _flow_keys_digest_data { 1673 __be16 n_proto; 1674 u8 ip_proto; 1675 u8 padding; 1676 __be32 ports; 1677 __be32 src; 1678 __be32 dst; 1679 }; 1680 1681 void make_flow_keys_digest(struct flow_keys_digest *digest, 1682 const struct flow_keys *flow) 1683 { 1684 struct _flow_keys_digest_data *data = 1685 (struct _flow_keys_digest_data *)digest; 1686 1687 BUILD_BUG_ON(sizeof(*data) > sizeof(*digest)); 1688 1689 memset(digest, 0, sizeof(*digest)); 1690 1691 data->n_proto = flow->basic.n_proto; 1692 data->ip_proto = flow->basic.ip_proto; 1693 data->ports = flow->ports.ports; 1694 data->src = flow->addrs.v4addrs.src; 1695 data->dst = flow->addrs.v4addrs.dst; 1696 } 1697 EXPORT_SYMBOL(make_flow_keys_digest); 1698 1699 static struct flow_dissector flow_keys_dissector_symmetric __read_mostly; 1700 1701 u32 __skb_get_hash_symmetric(const struct sk_buff *skb) 1702 { 1703 struct flow_keys keys; 1704 1705 __flow_hash_secret_init(); 1706 1707 memset(&keys, 0, sizeof(keys)); 1708 __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric, 1709 &keys, NULL, 0, 0, 0, 1710 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 1711 1712 return __flow_hash_from_keys(&keys, &hashrnd); 1713 } 1714 EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); 1715 1716 /** 1717 * __skb_get_hash: calculate a flow hash 1718 * @skb: sk_buff to calculate flow hash from 1719 * 1720 * This function calculates a flow hash based on src/dst addresses 1721 * and src/dst port numbers. Sets hash in skb to non-zero hash value 1722 * on success, zero indicates no valid hash. Also, sets l4_hash in skb 1723 * if hash is a canonical 4-tuple hash over transport ports. 1724 */ 1725 void __skb_get_hash(struct sk_buff *skb) 1726 { 1727 struct flow_keys keys; 1728 u32 hash; 1729 1730 __flow_hash_secret_init(); 1731 1732 hash = ___skb_get_hash(skb, &keys, &hashrnd); 1733 1734 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); 1735 } 1736 EXPORT_SYMBOL(__skb_get_hash); 1737 1738 __u32 skb_get_hash_perturb(const struct sk_buff *skb, 1739 const siphash_key_t *perturb) 1740 { 1741 struct flow_keys keys; 1742 1743 return ___skb_get_hash(skb, &keys, perturb); 1744 } 1745 EXPORT_SYMBOL(skb_get_hash_perturb); 1746 1747 u32 __skb_get_poff(const struct sk_buff *skb, const void *data, 1748 const struct flow_keys_basic *keys, int hlen) 1749 { 1750 u32 poff = keys->control.thoff; 1751 1752 /* skip L4 headers for fragments after the first */ 1753 if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) && 1754 !(keys->control.flags & FLOW_DIS_FIRST_FRAG)) 1755 return poff; 1756 1757 switch (keys->basic.ip_proto) { 1758 case IPPROTO_TCP: { 1759 /* access doff as u8 to avoid unaligned access */ 1760 const u8 *doff; 1761 u8 _doff; 1762 1763 doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff), 1764 data, hlen, &_doff); 1765 if (!doff) 1766 return poff; 1767 1768 poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2); 1769 break; 1770 } 1771 case IPPROTO_UDP: 1772 case IPPROTO_UDPLITE: 1773 poff += sizeof(struct udphdr); 1774 break; 1775 /* For the rest, we do not really care about header 1776 * extensions at this point for now. 1777 */ 1778 case IPPROTO_ICMP: 1779 poff += sizeof(struct icmphdr); 1780 break; 1781 case IPPROTO_ICMPV6: 1782 poff += sizeof(struct icmp6hdr); 1783 break; 1784 case IPPROTO_IGMP: 1785 poff += sizeof(struct igmphdr); 1786 break; 1787 case IPPROTO_DCCP: 1788 poff += sizeof(struct dccp_hdr); 1789 break; 1790 case IPPROTO_SCTP: 1791 poff += sizeof(struct sctphdr); 1792 break; 1793 } 1794 1795 return poff; 1796 } 1797 1798 /** 1799 * skb_get_poff - get the offset to the payload 1800 * @skb: sk_buff to get the payload offset from 1801 * 1802 * The function will get the offset to the payload as far as it could 1803 * be dissected. The main user is currently BPF, so that we can dynamically 1804 * truncate packets without needing to push actual payload to the user 1805 * space and can analyze headers only, instead. 1806 */ 1807 u32 skb_get_poff(const struct sk_buff *skb) 1808 { 1809 struct flow_keys_basic keys; 1810 1811 if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, 1812 NULL, 0, 0, 0, 0)) 1813 return 0; 1814 1815 return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb)); 1816 } 1817 1818 __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys) 1819 { 1820 memset(keys, 0, sizeof(*keys)); 1821 1822 memcpy(&keys->addrs.v6addrs.src, &fl6->saddr, 1823 sizeof(keys->addrs.v6addrs.src)); 1824 memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr, 1825 sizeof(keys->addrs.v6addrs.dst)); 1826 keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1827 keys->ports.src = fl6->fl6_sport; 1828 keys->ports.dst = fl6->fl6_dport; 1829 keys->keyid.keyid = fl6->fl6_gre_key; 1830 keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6); 1831 keys->basic.ip_proto = fl6->flowi6_proto; 1832 1833 return flow_hash_from_keys(keys); 1834 } 1835 EXPORT_SYMBOL(__get_hash_from_flowi6); 1836 1837 static const struct flow_dissector_key flow_keys_dissector_keys[] = { 1838 { 1839 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 1840 .offset = offsetof(struct flow_keys, control), 1841 }, 1842 { 1843 .key_id = FLOW_DISSECTOR_KEY_BASIC, 1844 .offset = offsetof(struct flow_keys, basic), 1845 }, 1846 { 1847 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, 1848 .offset = offsetof(struct flow_keys, addrs.v4addrs), 1849 }, 1850 { 1851 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, 1852 .offset = offsetof(struct flow_keys, addrs.v6addrs), 1853 }, 1854 { 1855 .key_id = FLOW_DISSECTOR_KEY_TIPC, 1856 .offset = offsetof(struct flow_keys, addrs.tipckey), 1857 }, 1858 { 1859 .key_id = FLOW_DISSECTOR_KEY_PORTS, 1860 .offset = offsetof(struct flow_keys, ports), 1861 }, 1862 { 1863 .key_id = FLOW_DISSECTOR_KEY_VLAN, 1864 .offset = offsetof(struct flow_keys, vlan), 1865 }, 1866 { 1867 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, 1868 .offset = offsetof(struct flow_keys, tags), 1869 }, 1870 { 1871 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, 1872 .offset = offsetof(struct flow_keys, keyid), 1873 }, 1874 }; 1875 1876 static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = { 1877 { 1878 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 1879 .offset = offsetof(struct flow_keys, control), 1880 }, 1881 { 1882 .key_id = FLOW_DISSECTOR_KEY_BASIC, 1883 .offset = offsetof(struct flow_keys, basic), 1884 }, 1885 { 1886 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, 1887 .offset = offsetof(struct flow_keys, addrs.v4addrs), 1888 }, 1889 { 1890 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, 1891 .offset = offsetof(struct flow_keys, addrs.v6addrs), 1892 }, 1893 { 1894 .key_id = FLOW_DISSECTOR_KEY_PORTS, 1895 .offset = offsetof(struct flow_keys, ports), 1896 }, 1897 }; 1898 1899 static const struct flow_dissector_key flow_keys_basic_dissector_keys[] = { 1900 { 1901 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 1902 .offset = offsetof(struct flow_keys, control), 1903 }, 1904 { 1905 .key_id = FLOW_DISSECTOR_KEY_BASIC, 1906 .offset = offsetof(struct flow_keys, basic), 1907 }, 1908 }; 1909 1910 struct flow_dissector flow_keys_dissector __read_mostly; 1911 EXPORT_SYMBOL(flow_keys_dissector); 1912 1913 struct flow_dissector flow_keys_basic_dissector __read_mostly; 1914 EXPORT_SYMBOL(flow_keys_basic_dissector); 1915 1916 static int __init init_default_flow_dissectors(void) 1917 { 1918 skb_flow_dissector_init(&flow_keys_dissector, 1919 flow_keys_dissector_keys, 1920 ARRAY_SIZE(flow_keys_dissector_keys)); 1921 skb_flow_dissector_init(&flow_keys_dissector_symmetric, 1922 flow_keys_dissector_symmetric_keys, 1923 ARRAY_SIZE(flow_keys_dissector_symmetric_keys)); 1924 skb_flow_dissector_init(&flow_keys_basic_dissector, 1925 flow_keys_basic_dissector_keys, 1926 ARRAY_SIZE(flow_keys_basic_dissector_keys)); 1927 return 0; 1928 } 1929 core_initcall(init_default_flow_dissectors); 1930