1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020, Intel Corporation. */ 3 4 /* flow director ethtool support for iavf */ 5 6 #include <linux/bitfield.h> 7 #include "iavf.h" 8 9 #define GTPU_PORT 2152 10 #define NAT_T_ESP_PORT 4500 11 #define PFCP_PORT 8805 12 13 static const struct in6_addr ipv6_addr_full_mask = { 14 .in6_u = { 15 .u6_addr8 = { 16 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 17 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 18 } 19 } 20 }; 21 22 static const struct in6_addr ipv6_addr_zero_mask = { 23 .in6_u = { 24 .u6_addr8 = { 25 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 26 } 27 } 28 }; 29 30 /** 31 * iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks 32 * @adapter: pointer to the VF adapter structure 33 * @fltr: Flow Director filter data structure 34 * 35 * Returns 0 if all masks of packet fields are either full or empty. Returns 36 * error on at least one partial mask. 37 */ 38 int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter, 39 struct iavf_fdir_fltr *fltr) 40 { 41 if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX)) 42 goto partial_mask; 43 44 if (fltr->ip_ver == 4) { 45 if (fltr->ip_mask.v4_addrs.src_ip && 46 fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX)) 47 goto partial_mask; 48 49 if (fltr->ip_mask.v4_addrs.dst_ip && 50 fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX)) 51 goto partial_mask; 52 53 if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX) 54 goto partial_mask; 55 } else if (fltr->ip_ver == 6) { 56 if (memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_zero_mask, 57 sizeof(struct in6_addr)) && 58 memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask, 59 sizeof(struct in6_addr))) 60 goto partial_mask; 61 62 if (memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_zero_mask, 63 sizeof(struct in6_addr)) && 64 memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask, 65 sizeof(struct in6_addr))) 66 goto partial_mask; 67 68 if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX) 69 goto partial_mask; 70 } 71 72 if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX) 73 goto partial_mask; 74 75 if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX)) 76 goto partial_mask; 77 78 if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX)) 79 goto partial_mask; 80 81 if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX)) 82 goto partial_mask; 83 84 if (fltr->ip_mask.l4_header && 85 fltr->ip_mask.l4_header != htonl(U32_MAX)) 86 goto partial_mask; 87 88 return 0; 89 90 partial_mask: 91 dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n"); 92 return -EOPNOTSUPP; 93 } 94 95 /** 96 * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload 97 * @fltr: Flow Director filter data structure 98 */ 99 static u16 iavf_pkt_udp_no_pay_len(struct iavf_fdir_fltr *fltr) 100 { 101 return sizeof(struct ethhdr) + 102 (fltr->ip_ver == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + 103 sizeof(struct udphdr); 104 } 105 106 /** 107 * iavf_fill_fdir_gtpu_hdr - fill the GTP-U protocol header 108 * @fltr: Flow Director filter data structure 109 * @proto_hdrs: Flow Director protocol headers data structure 110 * 111 * Returns 0 if the GTP-U protocol header is set successfully 112 */ 113 static int 114 iavf_fill_fdir_gtpu_hdr(struct iavf_fdir_fltr *fltr, 115 struct virtchnl_proto_hdrs *proto_hdrs) 116 { 117 struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; 118 struct virtchnl_proto_hdr *ghdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 119 struct virtchnl_proto_hdr *ehdr = NULL; /* Extension Header if it exists */ 120 u16 adj_offs, hdr_offs; 121 int i; 122 123 VIRTCHNL_SET_PROTO_HDR_TYPE(ghdr, GTPU_IP); 124 125 adj_offs = iavf_pkt_udp_no_pay_len(fltr); 126 127 for (i = 0; i < fltr->flex_cnt; i++) { 128 #define IAVF_GTPU_HDR_TEID_OFFS0 4 129 #define IAVF_GTPU_HDR_TEID_OFFS1 6 130 #define IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS 10 131 #define IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK 0x00FF /* skip N_PDU */ 132 /* PDU Session Container Extension Header (PSC) */ 133 #define IAVF_GTPU_PSC_EXTHDR_TYPE 0x85 134 #define IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS 13 135 #define IAVF_GTPU_HDR_PSC_PDU_QFI_MASK 0x3F /* skip Type */ 136 #define IAVF_GTPU_EH_QFI_IDX 1 137 138 if (fltr->flex_words[i].offset < adj_offs) 139 return -EINVAL; 140 141 hdr_offs = fltr->flex_words[i].offset - adj_offs; 142 143 switch (hdr_offs) { 144 case IAVF_GTPU_HDR_TEID_OFFS0: 145 case IAVF_GTPU_HDR_TEID_OFFS1: { 146 __be16 *pay_word = (__be16 *)ghdr->buffer; 147 148 pay_word[hdr_offs >> 1] = htons(fltr->flex_words[i].word); 149 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ghdr, GTPU_IP, TEID); 150 } 151 break; 152 case IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS: 153 if ((fltr->flex_words[i].word & 154 IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK) != 155 IAVF_GTPU_PSC_EXTHDR_TYPE) 156 return -EOPNOTSUPP; 157 if (!ehdr) 158 ehdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 159 VIRTCHNL_SET_PROTO_HDR_TYPE(ehdr, GTPU_EH); 160 break; 161 case IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS: 162 if (!ehdr) 163 return -EINVAL; 164 ehdr->buffer[IAVF_GTPU_EH_QFI_IDX] = 165 fltr->flex_words[i].word & 166 IAVF_GTPU_HDR_PSC_PDU_QFI_MASK; 167 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ehdr, GTPU_EH, QFI); 168 break; 169 default: 170 return -EINVAL; 171 } 172 } 173 174 uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ 175 176 return 0; 177 } 178 179 /** 180 * iavf_fill_fdir_pfcp_hdr - fill the PFCP protocol header 181 * @fltr: Flow Director filter data structure 182 * @proto_hdrs: Flow Director protocol headers data structure 183 * 184 * Returns 0 if the PFCP protocol header is set successfully 185 */ 186 static int 187 iavf_fill_fdir_pfcp_hdr(struct iavf_fdir_fltr *fltr, 188 struct virtchnl_proto_hdrs *proto_hdrs) 189 { 190 struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; 191 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 192 u16 adj_offs, hdr_offs; 193 int i; 194 195 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP); 196 197 adj_offs = iavf_pkt_udp_no_pay_len(fltr); 198 199 for (i = 0; i < fltr->flex_cnt; i++) { 200 #define IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS 0 201 if (fltr->flex_words[i].offset < adj_offs) 202 return -EINVAL; 203 204 hdr_offs = fltr->flex_words[i].offset - adj_offs; 205 206 switch (hdr_offs) { 207 case IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS: 208 hdr->buffer[0] = (fltr->flex_words[i].word >> 8) & 0xff; 209 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD); 210 break; 211 default: 212 return -EINVAL; 213 } 214 } 215 216 uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ 217 218 return 0; 219 } 220 221 /** 222 * iavf_fill_fdir_nat_t_esp_hdr - fill the NAT-T-ESP protocol header 223 * @fltr: Flow Director filter data structure 224 * @proto_hdrs: Flow Director protocol headers data structure 225 * 226 * Returns 0 if the NAT-T-ESP protocol header is set successfully 227 */ 228 static int 229 iavf_fill_fdir_nat_t_esp_hdr(struct iavf_fdir_fltr *fltr, 230 struct virtchnl_proto_hdrs *proto_hdrs) 231 { 232 struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; 233 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 234 u16 adj_offs, hdr_offs; 235 u32 spi = 0; 236 int i; 237 238 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP); 239 240 adj_offs = iavf_pkt_udp_no_pay_len(fltr); 241 242 for (i = 0; i < fltr->flex_cnt; i++) { 243 #define IAVF_NAT_T_ESP_SPI_OFFS0 0 244 #define IAVF_NAT_T_ESP_SPI_OFFS1 2 245 if (fltr->flex_words[i].offset < adj_offs) 246 return -EINVAL; 247 248 hdr_offs = fltr->flex_words[i].offset - adj_offs; 249 250 switch (hdr_offs) { 251 case IAVF_NAT_T_ESP_SPI_OFFS0: 252 spi |= fltr->flex_words[i].word << 16; 253 break; 254 case IAVF_NAT_T_ESP_SPI_OFFS1: 255 spi |= fltr->flex_words[i].word; 256 break; 257 default: 258 return -EINVAL; 259 } 260 } 261 262 if (!spi) 263 return -EOPNOTSUPP; /* Not support IKE Header Format with SPI 0 */ 264 265 *(__be32 *)hdr->buffer = htonl(spi); 266 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI); 267 268 uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ 269 270 return 0; 271 } 272 273 /** 274 * iavf_fill_fdir_udp_flex_pay_hdr - fill the UDP payload header 275 * @fltr: Flow Director filter data structure 276 * @proto_hdrs: Flow Director protocol headers data structure 277 * 278 * Returns 0 if the UDP payload defined protocol header is set successfully 279 */ 280 static int 281 iavf_fill_fdir_udp_flex_pay_hdr(struct iavf_fdir_fltr *fltr, 282 struct virtchnl_proto_hdrs *proto_hdrs) 283 { 284 int err; 285 286 switch (ntohs(fltr->ip_data.dst_port)) { 287 case GTPU_PORT: 288 err = iavf_fill_fdir_gtpu_hdr(fltr, proto_hdrs); 289 break; 290 case NAT_T_ESP_PORT: 291 err = iavf_fill_fdir_nat_t_esp_hdr(fltr, proto_hdrs); 292 break; 293 case PFCP_PORT: 294 err = iavf_fill_fdir_pfcp_hdr(fltr, proto_hdrs); 295 break; 296 default: 297 err = -EOPNOTSUPP; 298 break; 299 } 300 301 return err; 302 } 303 304 /** 305 * iavf_fill_fdir_ip4_hdr - fill the IPv4 protocol header 306 * @fltr: Flow Director filter data structure 307 * @proto_hdrs: Flow Director protocol headers data structure 308 * 309 * Returns 0 if the IPv4 protocol header is set successfully 310 */ 311 static int 312 iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr, 313 struct virtchnl_proto_hdrs *proto_hdrs) 314 { 315 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 316 struct iphdr *iph = (struct iphdr *)hdr->buffer; 317 318 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4); 319 320 if (fltr->ip_mask.tos == U8_MAX) { 321 iph->tos = fltr->ip_data.tos; 322 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP); 323 } 324 325 if (fltr->ip_mask.proto == U8_MAX) { 326 iph->protocol = fltr->ip_data.proto; 327 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT); 328 } 329 330 if (fltr->ip_mask.v4_addrs.src_ip == htonl(U32_MAX)) { 331 iph->saddr = fltr->ip_data.v4_addrs.src_ip; 332 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC); 333 } 334 335 if (fltr->ip_mask.v4_addrs.dst_ip == htonl(U32_MAX)) { 336 iph->daddr = fltr->ip_data.v4_addrs.dst_ip; 337 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST); 338 } 339 340 return 0; 341 } 342 343 /** 344 * iavf_fill_fdir_ip6_hdr - fill the IPv6 protocol header 345 * @fltr: Flow Director filter data structure 346 * @proto_hdrs: Flow Director protocol headers data structure 347 * 348 * Returns 0 if the IPv6 protocol header is set successfully 349 */ 350 static int 351 iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr, 352 struct virtchnl_proto_hdrs *proto_hdrs) 353 { 354 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 355 struct ipv6hdr *iph = (struct ipv6hdr *)hdr->buffer; 356 357 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6); 358 359 if (fltr->ip_mask.tclass == U8_MAX) { 360 iph->priority = (fltr->ip_data.tclass >> 4) & 0xF; 361 iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0; 362 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC); 363 } 364 365 if (fltr->ip_mask.proto == U8_MAX) { 366 iph->nexthdr = fltr->ip_data.proto; 367 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT); 368 } 369 370 if (!memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask, 371 sizeof(struct in6_addr))) { 372 memcpy(&iph->saddr, &fltr->ip_data.v6_addrs.src_ip, 373 sizeof(struct in6_addr)); 374 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC); 375 } 376 377 if (!memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask, 378 sizeof(struct in6_addr))) { 379 memcpy(&iph->daddr, &fltr->ip_data.v6_addrs.dst_ip, 380 sizeof(struct in6_addr)); 381 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST); 382 } 383 384 return 0; 385 } 386 387 /** 388 * iavf_fill_fdir_tcp_hdr - fill the TCP protocol header 389 * @fltr: Flow Director filter data structure 390 * @proto_hdrs: Flow Director protocol headers data structure 391 * 392 * Returns 0 if the TCP protocol header is set successfully 393 */ 394 static int 395 iavf_fill_fdir_tcp_hdr(struct iavf_fdir_fltr *fltr, 396 struct virtchnl_proto_hdrs *proto_hdrs) 397 { 398 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 399 struct tcphdr *tcph = (struct tcphdr *)hdr->buffer; 400 401 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP); 402 403 if (fltr->ip_mask.src_port == htons(U16_MAX)) { 404 tcph->source = fltr->ip_data.src_port; 405 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); 406 } 407 408 if (fltr->ip_mask.dst_port == htons(U16_MAX)) { 409 tcph->dest = fltr->ip_data.dst_port; 410 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT); 411 } 412 413 return 0; 414 } 415 416 /** 417 * iavf_fill_fdir_udp_hdr - fill the UDP protocol header 418 * @fltr: Flow Director filter data structure 419 * @proto_hdrs: Flow Director protocol headers data structure 420 * 421 * Returns 0 if the UDP protocol header is set successfully 422 */ 423 static int 424 iavf_fill_fdir_udp_hdr(struct iavf_fdir_fltr *fltr, 425 struct virtchnl_proto_hdrs *proto_hdrs) 426 { 427 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 428 struct udphdr *udph = (struct udphdr *)hdr->buffer; 429 430 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP); 431 432 if (fltr->ip_mask.src_port == htons(U16_MAX)) { 433 udph->source = fltr->ip_data.src_port; 434 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); 435 } 436 437 if (fltr->ip_mask.dst_port == htons(U16_MAX)) { 438 udph->dest = fltr->ip_data.dst_port; 439 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT); 440 } 441 442 if (!fltr->flex_cnt) 443 return 0; 444 445 return iavf_fill_fdir_udp_flex_pay_hdr(fltr, proto_hdrs); 446 } 447 448 /** 449 * iavf_fill_fdir_sctp_hdr - fill the SCTP protocol header 450 * @fltr: Flow Director filter data structure 451 * @proto_hdrs: Flow Director protocol headers data structure 452 * 453 * Returns 0 if the SCTP protocol header is set successfully 454 */ 455 static int 456 iavf_fill_fdir_sctp_hdr(struct iavf_fdir_fltr *fltr, 457 struct virtchnl_proto_hdrs *proto_hdrs) 458 { 459 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 460 struct sctphdr *sctph = (struct sctphdr *)hdr->buffer; 461 462 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP); 463 464 if (fltr->ip_mask.src_port == htons(U16_MAX)) { 465 sctph->source = fltr->ip_data.src_port; 466 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT); 467 } 468 469 if (fltr->ip_mask.dst_port == htons(U16_MAX)) { 470 sctph->dest = fltr->ip_data.dst_port; 471 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT); 472 } 473 474 return 0; 475 } 476 477 /** 478 * iavf_fill_fdir_ah_hdr - fill the AH protocol header 479 * @fltr: Flow Director filter data structure 480 * @proto_hdrs: Flow Director protocol headers data structure 481 * 482 * Returns 0 if the AH protocol header is set successfully 483 */ 484 static int 485 iavf_fill_fdir_ah_hdr(struct iavf_fdir_fltr *fltr, 486 struct virtchnl_proto_hdrs *proto_hdrs) 487 { 488 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 489 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)hdr->buffer; 490 491 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH); 492 493 if (fltr->ip_mask.spi == htonl(U32_MAX)) { 494 ah->spi = fltr->ip_data.spi; 495 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI); 496 } 497 498 return 0; 499 } 500 501 /** 502 * iavf_fill_fdir_esp_hdr - fill the ESP protocol header 503 * @fltr: Flow Director filter data structure 504 * @proto_hdrs: Flow Director protocol headers data structure 505 * 506 * Returns 0 if the ESP protocol header is set successfully 507 */ 508 static int 509 iavf_fill_fdir_esp_hdr(struct iavf_fdir_fltr *fltr, 510 struct virtchnl_proto_hdrs *proto_hdrs) 511 { 512 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 513 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)hdr->buffer; 514 515 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP); 516 517 if (fltr->ip_mask.spi == htonl(U32_MAX)) { 518 esph->spi = fltr->ip_data.spi; 519 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI); 520 } 521 522 return 0; 523 } 524 525 /** 526 * iavf_fill_fdir_l4_hdr - fill the L4 protocol header 527 * @fltr: Flow Director filter data structure 528 * @proto_hdrs: Flow Director protocol headers data structure 529 * 530 * Returns 0 if the L4 protocol header is set successfully 531 */ 532 static int 533 iavf_fill_fdir_l4_hdr(struct iavf_fdir_fltr *fltr, 534 struct virtchnl_proto_hdrs *proto_hdrs) 535 { 536 struct virtchnl_proto_hdr *hdr; 537 __be32 *l4_4_data; 538 539 if (!fltr->ip_mask.proto) /* IPv4/IPv6 header only */ 540 return 0; 541 542 hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 543 l4_4_data = (__be32 *)hdr->buffer; 544 545 /* L2TPv3 over IP with 'Session ID' */ 546 if (fltr->ip_data.proto == 115 && fltr->ip_mask.l4_header == htonl(U32_MAX)) { 547 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3); 548 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID); 549 550 *l4_4_data = fltr->ip_data.l4_header; 551 } else { 552 return -EOPNOTSUPP; 553 } 554 555 return 0; 556 } 557 558 /** 559 * iavf_fill_fdir_eth_hdr - fill the Ethernet protocol header 560 * @fltr: Flow Director filter data structure 561 * @proto_hdrs: Flow Director protocol headers data structure 562 * 563 * Returns 0 if the Ethernet protocol header is set successfully 564 */ 565 static int 566 iavf_fill_fdir_eth_hdr(struct iavf_fdir_fltr *fltr, 567 struct virtchnl_proto_hdrs *proto_hdrs) 568 { 569 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; 570 struct ethhdr *ehdr = (struct ethhdr *)hdr->buffer; 571 572 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH); 573 574 if (fltr->eth_mask.etype == htons(U16_MAX)) { 575 if (fltr->eth_data.etype == htons(ETH_P_IP) || 576 fltr->eth_data.etype == htons(ETH_P_IPV6)) 577 return -EOPNOTSUPP; 578 579 ehdr->h_proto = fltr->eth_data.etype; 580 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE); 581 } 582 583 return 0; 584 } 585 586 /** 587 * iavf_fill_fdir_add_msg - fill the Flow Director filter into virtchnl message 588 * @adapter: pointer to the VF adapter structure 589 * @fltr: Flow Director filter data structure 590 * 591 * Returns 0 if the add Flow Director virtchnl message is filled successfully 592 */ 593 int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) 594 { 595 struct virtchnl_fdir_add *vc_msg = &fltr->vc_add_msg; 596 struct virtchnl_proto_hdrs *proto_hdrs; 597 int err; 598 599 proto_hdrs = &vc_msg->rule_cfg.proto_hdrs; 600 601 err = iavf_fill_fdir_eth_hdr(fltr, proto_hdrs); /* L2 always exists */ 602 if (err) 603 return err; 604 605 switch (fltr->flow_type) { 606 case IAVF_FDIR_FLOW_IPV4_TCP: 607 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | 608 iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs); 609 break; 610 case IAVF_FDIR_FLOW_IPV4_UDP: 611 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | 612 iavf_fill_fdir_udp_hdr(fltr, proto_hdrs); 613 break; 614 case IAVF_FDIR_FLOW_IPV4_SCTP: 615 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | 616 iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs); 617 break; 618 case IAVF_FDIR_FLOW_IPV4_AH: 619 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | 620 iavf_fill_fdir_ah_hdr(fltr, proto_hdrs); 621 break; 622 case IAVF_FDIR_FLOW_IPV4_ESP: 623 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | 624 iavf_fill_fdir_esp_hdr(fltr, proto_hdrs); 625 break; 626 case IAVF_FDIR_FLOW_IPV4_OTHER: 627 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | 628 iavf_fill_fdir_l4_hdr(fltr, proto_hdrs); 629 break; 630 case IAVF_FDIR_FLOW_IPV6_TCP: 631 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | 632 iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs); 633 break; 634 case IAVF_FDIR_FLOW_IPV6_UDP: 635 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | 636 iavf_fill_fdir_udp_hdr(fltr, proto_hdrs); 637 break; 638 case IAVF_FDIR_FLOW_IPV6_SCTP: 639 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | 640 iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs); 641 break; 642 case IAVF_FDIR_FLOW_IPV6_AH: 643 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | 644 iavf_fill_fdir_ah_hdr(fltr, proto_hdrs); 645 break; 646 case IAVF_FDIR_FLOW_IPV6_ESP: 647 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | 648 iavf_fill_fdir_esp_hdr(fltr, proto_hdrs); 649 break; 650 case IAVF_FDIR_FLOW_IPV6_OTHER: 651 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | 652 iavf_fill_fdir_l4_hdr(fltr, proto_hdrs); 653 break; 654 case IAVF_FDIR_FLOW_NON_IP_L2: 655 break; 656 default: 657 err = -EINVAL; 658 break; 659 } 660 661 if (err) 662 return err; 663 664 vc_msg->vsi_id = adapter->vsi.id; 665 vc_msg->rule_cfg.action_set.count = 1; 666 vc_msg->rule_cfg.action_set.actions[0].type = fltr->action; 667 vc_msg->rule_cfg.action_set.actions[0].act_conf.queue.index = fltr->q_index; 668 669 return 0; 670 } 671 672 /** 673 * iavf_fdir_flow_proto_name - get the flow protocol name 674 * @flow_type: Flow Director filter flow type 675 **/ 676 static const char *iavf_fdir_flow_proto_name(enum iavf_fdir_flow_type flow_type) 677 { 678 switch (flow_type) { 679 case IAVF_FDIR_FLOW_IPV4_TCP: 680 case IAVF_FDIR_FLOW_IPV6_TCP: 681 return "TCP"; 682 case IAVF_FDIR_FLOW_IPV4_UDP: 683 case IAVF_FDIR_FLOW_IPV6_UDP: 684 return "UDP"; 685 case IAVF_FDIR_FLOW_IPV4_SCTP: 686 case IAVF_FDIR_FLOW_IPV6_SCTP: 687 return "SCTP"; 688 case IAVF_FDIR_FLOW_IPV4_AH: 689 case IAVF_FDIR_FLOW_IPV6_AH: 690 return "AH"; 691 case IAVF_FDIR_FLOW_IPV4_ESP: 692 case IAVF_FDIR_FLOW_IPV6_ESP: 693 return "ESP"; 694 case IAVF_FDIR_FLOW_IPV4_OTHER: 695 case IAVF_FDIR_FLOW_IPV6_OTHER: 696 return "Other"; 697 case IAVF_FDIR_FLOW_NON_IP_L2: 698 return "Ethernet"; 699 default: 700 return NULL; 701 } 702 } 703 704 /** 705 * iavf_print_fdir_fltr 706 * @adapter: adapter structure 707 * @fltr: Flow Director filter to print 708 * 709 * Print the Flow Director filter 710 **/ 711 void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) 712 { 713 const char *proto = iavf_fdir_flow_proto_name(fltr->flow_type); 714 715 if (!proto) 716 return; 717 718 switch (fltr->flow_type) { 719 case IAVF_FDIR_FLOW_IPV4_TCP: 720 case IAVF_FDIR_FLOW_IPV4_UDP: 721 case IAVF_FDIR_FLOW_IPV4_SCTP: 722 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: dst_port %hu src_port %hu\n", 723 fltr->loc, 724 &fltr->ip_data.v4_addrs.dst_ip, 725 &fltr->ip_data.v4_addrs.src_ip, 726 proto, 727 ntohs(fltr->ip_data.dst_port), 728 ntohs(fltr->ip_data.src_port)); 729 break; 730 case IAVF_FDIR_FLOW_IPV4_AH: 731 case IAVF_FDIR_FLOW_IPV4_ESP: 732 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: SPI %u\n", 733 fltr->loc, 734 &fltr->ip_data.v4_addrs.dst_ip, 735 &fltr->ip_data.v4_addrs.src_ip, 736 proto, 737 ntohl(fltr->ip_data.spi)); 738 break; 739 case IAVF_FDIR_FLOW_IPV4_OTHER: 740 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 proto: %u L4_bytes: 0x%x\n", 741 fltr->loc, 742 &fltr->ip_data.v4_addrs.dst_ip, 743 &fltr->ip_data.v4_addrs.src_ip, 744 fltr->ip_data.proto, 745 ntohl(fltr->ip_data.l4_header)); 746 break; 747 case IAVF_FDIR_FLOW_IPV6_TCP: 748 case IAVF_FDIR_FLOW_IPV6_UDP: 749 case IAVF_FDIR_FLOW_IPV6_SCTP: 750 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: dst_port %hu src_port %hu\n", 751 fltr->loc, 752 &fltr->ip_data.v6_addrs.dst_ip, 753 &fltr->ip_data.v6_addrs.src_ip, 754 proto, 755 ntohs(fltr->ip_data.dst_port), 756 ntohs(fltr->ip_data.src_port)); 757 break; 758 case IAVF_FDIR_FLOW_IPV6_AH: 759 case IAVF_FDIR_FLOW_IPV6_ESP: 760 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: SPI %u\n", 761 fltr->loc, 762 &fltr->ip_data.v6_addrs.dst_ip, 763 &fltr->ip_data.v6_addrs.src_ip, 764 proto, 765 ntohl(fltr->ip_data.spi)); 766 break; 767 case IAVF_FDIR_FLOW_IPV6_OTHER: 768 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 proto: %u L4_bytes: 0x%x\n", 769 fltr->loc, 770 &fltr->ip_data.v6_addrs.dst_ip, 771 &fltr->ip_data.v6_addrs.src_ip, 772 fltr->ip_data.proto, 773 ntohl(fltr->ip_data.l4_header)); 774 break; 775 case IAVF_FDIR_FLOW_NON_IP_L2: 776 dev_info(&adapter->pdev->dev, "Rule ID: %u eth_type: 0x%x\n", 777 fltr->loc, 778 ntohs(fltr->eth_data.etype)); 779 break; 780 default: 781 break; 782 } 783 } 784 785 /** 786 * iavf_fdir_is_dup_fltr - test if filter is already in list 787 * @adapter: pointer to the VF adapter structure 788 * @fltr: Flow Director filter data structure 789 * 790 * Returns true if the filter is found in the list 791 */ 792 bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) 793 { 794 struct iavf_fdir_fltr *tmp; 795 bool ret = false; 796 797 spin_lock_bh(&adapter->fdir_fltr_lock); 798 list_for_each_entry(tmp, &adapter->fdir_list_head, list) { 799 if (tmp->flow_type != fltr->flow_type) 800 continue; 801 802 if (!memcmp(&tmp->eth_data, &fltr->eth_data, 803 sizeof(fltr->eth_data)) && 804 !memcmp(&tmp->ip_data, &fltr->ip_data, 805 sizeof(fltr->ip_data)) && 806 !memcmp(&tmp->ext_data, &fltr->ext_data, 807 sizeof(fltr->ext_data))) { 808 ret = true; 809 break; 810 } 811 } 812 spin_unlock_bh(&adapter->fdir_fltr_lock); 813 814 return ret; 815 } 816 817 /** 818 * iavf_find_fdir_fltr_by_loc - find filter with location 819 * @adapter: pointer to the VF adapter structure 820 * @loc: location to find. 821 * 822 * Returns pointer to Flow Director filter if found or null 823 */ 824 struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc) 825 { 826 struct iavf_fdir_fltr *rule; 827 828 list_for_each_entry(rule, &adapter->fdir_list_head, list) 829 if (rule->loc == loc) 830 return rule; 831 832 return NULL; 833 } 834 835 /** 836 * iavf_fdir_list_add_fltr - add a new node to the flow director filter list 837 * @adapter: pointer to the VF adapter structure 838 * @fltr: filter node to add to structure 839 */ 840 void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) 841 { 842 struct iavf_fdir_fltr *rule, *parent = NULL; 843 844 list_for_each_entry(rule, &adapter->fdir_list_head, list) { 845 if (rule->loc >= fltr->loc) 846 break; 847 parent = rule; 848 } 849 850 if (parent) 851 list_add(&fltr->list, &parent->list); 852 else 853 list_add(&fltr->list, &adapter->fdir_list_head); 854 } 855