1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* Copyright (c) 2018 Mellanox Technologies. */ 3 4 #include <net/vxlan.h> 5 #include <net/gre.h> 6 #include <net/geneve.h> 7 #include "en/tc_tun.h" 8 #include "en_tc.h" 9 10 struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev) 11 { 12 if (netif_is_vxlan(tunnel_dev)) 13 return &vxlan_tunnel; 14 else if (netif_is_geneve(tunnel_dev)) 15 return &geneve_tunnel; 16 else if (netif_is_gretap(tunnel_dev) || 17 netif_is_ip6gretap(tunnel_dev)) 18 return &gre_tunnel; 19 else 20 return NULL; 21 } 22 23 static int get_route_and_out_devs(struct mlx5e_priv *priv, 24 struct net_device *dev, 25 struct net_device **route_dev, 26 struct net_device **out_dev) 27 { 28 struct net_device *uplink_dev, *uplink_upper, *real_dev; 29 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 30 bool dst_is_lag_dev; 31 32 real_dev = is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : dev; 33 uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); 34 35 rcu_read_lock(); 36 uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev); 37 /* mlx5_lag_is_sriov() is a blocking function which can't be called 38 * while holding rcu read lock. Take the net_device for correctness 39 * sake. 40 */ 41 if (uplink_upper) 42 dev_hold(uplink_upper); 43 rcu_read_unlock(); 44 45 dst_is_lag_dev = (uplink_upper && 46 netif_is_lag_master(uplink_upper) && 47 real_dev == uplink_upper && 48 mlx5_lag_is_sriov(priv->mdev)); 49 if (uplink_upper) 50 dev_put(uplink_upper); 51 52 /* if the egress device isn't on the same HW e-switch or 53 * it's a LAG device, use the uplink 54 */ 55 *route_dev = dev; 56 if (!netdev_port_same_parent_id(priv->netdev, real_dev) || 57 dst_is_lag_dev || is_vlan_dev(*route_dev)) 58 *out_dev = uplink_dev; 59 else if (mlx5e_eswitch_rep(dev) && 60 mlx5e_is_valid_eswitch_fwd_dev(priv, dev)) 61 *out_dev = *route_dev; 62 else 63 return -EOPNOTSUPP; 64 65 if (!(mlx5e_eswitch_rep(*out_dev) && 66 mlx5e_is_uplink_rep(netdev_priv(*out_dev)))) 67 return -EOPNOTSUPP; 68 69 if (mlx5e_eswitch_uplink_rep(priv->netdev) && *out_dev != priv->netdev) 70 return -EOPNOTSUPP; 71 72 return 0; 73 } 74 75 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, 76 struct net_device *mirred_dev, 77 struct net_device **out_dev, 78 struct net_device **route_dev, 79 struct flowi4 *fl4, 80 struct neighbour **out_n, 81 u8 *out_ttl) 82 { 83 struct neighbour *n; 84 struct rtable *rt; 85 86 #if IS_ENABLED(CONFIG_INET) 87 struct mlx5_core_dev *mdev = priv->mdev; 88 struct net_device *uplink_dev; 89 int ret; 90 91 if (mlx5_lag_is_multipath(mdev)) { 92 struct mlx5_eswitch *esw = mdev->priv.eswitch; 93 94 uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); 95 fl4->flowi4_oif = uplink_dev->ifindex; 96 } 97 98 rt = ip_route_output_key(dev_net(mirred_dev), fl4); 99 ret = PTR_ERR_OR_ZERO(rt); 100 if (ret) 101 return ret; 102 103 if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) { 104 ip_rt_put(rt); 105 return -ENETUNREACH; 106 } 107 #else 108 return -EOPNOTSUPP; 109 #endif 110 111 ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev); 112 if (ret < 0) { 113 ip_rt_put(rt); 114 return ret; 115 } 116 117 if (!(*out_ttl)) 118 *out_ttl = ip4_dst_hoplimit(&rt->dst); 119 n = dst_neigh_lookup(&rt->dst, &fl4->daddr); 120 ip_rt_put(rt); 121 if (!n) 122 return -ENOMEM; 123 124 *out_n = n; 125 return 0; 126 } 127 128 static const char *mlx5e_netdev_kind(struct net_device *dev) 129 { 130 if (dev->rtnl_link_ops) 131 return dev->rtnl_link_ops->kind; 132 else 133 return "unknown"; 134 } 135 136 static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto, 137 struct mlx5e_encap_entry *e) 138 { 139 if (!e->tunnel) { 140 pr_warn("mlx5: Cannot generate tunnel header for this tunnel\n"); 141 return -EOPNOTSUPP; 142 } 143 144 return e->tunnel->generate_ip_tun_hdr(buf, ip_proto, e); 145 } 146 147 static char *gen_eth_tnl_hdr(char *buf, struct net_device *dev, 148 struct mlx5e_encap_entry *e, 149 u16 proto) 150 { 151 struct ethhdr *eth = (struct ethhdr *)buf; 152 char *ip; 153 154 ether_addr_copy(eth->h_dest, e->h_dest); 155 ether_addr_copy(eth->h_source, dev->dev_addr); 156 if (is_vlan_dev(dev)) { 157 struct vlan_hdr *vlan = (struct vlan_hdr *) 158 ((char *)eth + ETH_HLEN); 159 ip = (char *)vlan + VLAN_HLEN; 160 eth->h_proto = vlan_dev_vlan_proto(dev); 161 vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev)); 162 vlan->h_vlan_encapsulated_proto = htons(proto); 163 } else { 164 eth->h_proto = htons(proto); 165 ip = (char *)eth + ETH_HLEN; 166 } 167 168 return ip; 169 } 170 171 int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, 172 struct net_device *mirred_dev, 173 struct mlx5e_encap_entry *e) 174 { 175 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 176 const struct ip_tunnel_key *tun_key = &e->tun_info->key; 177 struct net_device *out_dev, *route_dev; 178 struct flowi4 fl4 = {}; 179 struct neighbour *n; 180 int ipv4_encap_size; 181 char *encap_header; 182 u8 nud_state, ttl; 183 struct iphdr *ip; 184 int err; 185 186 /* add the IP fields */ 187 fl4.flowi4_tos = tun_key->tos; 188 fl4.daddr = tun_key->u.ipv4.dst; 189 fl4.saddr = tun_key->u.ipv4.src; 190 ttl = tun_key->ttl; 191 192 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev, 193 &fl4, &n, &ttl); 194 if (err) 195 return err; 196 197 ipv4_encap_size = 198 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) + 199 sizeof(struct iphdr) + 200 e->tunnel->calc_hlen(e); 201 202 if (max_encap_size < ipv4_encap_size) { 203 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", 204 ipv4_encap_size, max_encap_size); 205 err = -EOPNOTSUPP; 206 goto release_neigh; 207 } 208 209 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); 210 if (!encap_header) { 211 err = -ENOMEM; 212 goto release_neigh; 213 } 214 215 /* used by mlx5e_detach_encap to lookup a neigh hash table 216 * entry in the neigh hash table when a user deletes a rule 217 */ 218 e->m_neigh.dev = n->dev; 219 e->m_neigh.family = n->ops->family; 220 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 221 e->out_dev = out_dev; 222 e->route_dev = route_dev; 223 224 /* It's important to add the neigh to the hash table before checking 225 * the neigh validity state. So if we'll get a notification, in case the 226 * neigh changes it's validity state, we would find the relevant neigh 227 * in the hash. 228 */ 229 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); 230 if (err) 231 goto free_encap; 232 233 read_lock_bh(&n->lock); 234 nud_state = n->nud_state; 235 ether_addr_copy(e->h_dest, n->ha); 236 read_unlock_bh(&n->lock); 237 238 /* add ethernet header */ 239 ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e, 240 ETH_P_IP); 241 242 /* add ip header */ 243 ip->tos = tun_key->tos; 244 ip->version = 0x4; 245 ip->ihl = 0x5; 246 ip->ttl = ttl; 247 ip->daddr = fl4.daddr; 248 ip->saddr = fl4.saddr; 249 250 /* add tunneling protocol header */ 251 err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr), 252 &ip->protocol, e); 253 if (err) 254 goto destroy_neigh_entry; 255 256 e->encap_size = ipv4_encap_size; 257 e->encap_header = encap_header; 258 259 if (!(nud_state & NUD_VALID)) { 260 neigh_event_send(n, NULL); 261 /* the encap entry will be made valid on neigh update event 262 * and not used before that. 263 */ 264 goto release_neigh; 265 } 266 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, 267 e->reformat_type, 268 ipv4_encap_size, encap_header, 269 MLX5_FLOW_NAMESPACE_FDB); 270 if (IS_ERR(e->pkt_reformat)) { 271 err = PTR_ERR(e->pkt_reformat); 272 goto destroy_neigh_entry; 273 } 274 275 e->flags |= MLX5_ENCAP_ENTRY_VALID; 276 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); 277 neigh_release(n); 278 return err; 279 280 destroy_neigh_entry: 281 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); 282 free_encap: 283 kfree(encap_header); 284 release_neigh: 285 neigh_release(n); 286 return err; 287 } 288 289 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) 290 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, 291 struct net_device *mirred_dev, 292 struct net_device **out_dev, 293 struct net_device **route_dev, 294 struct flowi6 *fl6, 295 struct neighbour **out_n, 296 u8 *out_ttl) 297 { 298 struct dst_entry *dst; 299 struct neighbour *n; 300 301 int ret; 302 303 dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6, 304 NULL); 305 if (IS_ERR(dst)) 306 return PTR_ERR(dst); 307 308 if (!(*out_ttl)) 309 *out_ttl = ip6_dst_hoplimit(dst); 310 311 ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev); 312 if (ret < 0) { 313 dst_release(dst); 314 return ret; 315 } 316 317 n = dst_neigh_lookup(dst, &fl6->daddr); 318 dst_release(dst); 319 if (!n) 320 return -ENOMEM; 321 322 *out_n = n; 323 return 0; 324 } 325 326 int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, 327 struct net_device *mirred_dev, 328 struct mlx5e_encap_entry *e) 329 { 330 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 331 const struct ip_tunnel_key *tun_key = &e->tun_info->key; 332 struct net_device *out_dev, *route_dev; 333 struct flowi6 fl6 = {}; 334 struct ipv6hdr *ip6h; 335 struct neighbour *n = NULL; 336 int ipv6_encap_size; 337 char *encap_header; 338 u8 nud_state, ttl; 339 int err; 340 341 ttl = tun_key->ttl; 342 343 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); 344 fl6.daddr = tun_key->u.ipv6.dst; 345 fl6.saddr = tun_key->u.ipv6.src; 346 347 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev, 348 &fl6, &n, &ttl); 349 if (err) 350 return err; 351 352 ipv6_encap_size = 353 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) + 354 sizeof(struct ipv6hdr) + 355 e->tunnel->calc_hlen(e); 356 357 if (max_encap_size < ipv6_encap_size) { 358 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", 359 ipv6_encap_size, max_encap_size); 360 err = -EOPNOTSUPP; 361 goto release_neigh; 362 } 363 364 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); 365 if (!encap_header) { 366 err = -ENOMEM; 367 goto release_neigh; 368 } 369 370 /* used by mlx5e_detach_encap to lookup a neigh hash table 371 * entry in the neigh hash table when a user deletes a rule 372 */ 373 e->m_neigh.dev = n->dev; 374 e->m_neigh.family = n->ops->family; 375 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 376 e->out_dev = out_dev; 377 e->route_dev = route_dev; 378 379 /* It's importent to add the neigh to the hash table before checking 380 * the neigh validity state. So if we'll get a notification, in case the 381 * neigh changes it's validity state, we would find the relevant neigh 382 * in the hash. 383 */ 384 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); 385 if (err) 386 goto free_encap; 387 388 read_lock_bh(&n->lock); 389 nud_state = n->nud_state; 390 ether_addr_copy(e->h_dest, n->ha); 391 read_unlock_bh(&n->lock); 392 393 /* add ethernet header */ 394 ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e, 395 ETH_P_IPV6); 396 397 /* add ip header */ 398 ip6_flow_hdr(ip6h, tun_key->tos, 0); 399 /* the HW fills up ipv6 payload len */ 400 ip6h->hop_limit = ttl; 401 ip6h->daddr = fl6.daddr; 402 ip6h->saddr = fl6.saddr; 403 404 /* add tunneling protocol header */ 405 err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr), 406 &ip6h->nexthdr, e); 407 if (err) 408 goto destroy_neigh_entry; 409 410 e->encap_size = ipv6_encap_size; 411 e->encap_header = encap_header; 412 413 if (!(nud_state & NUD_VALID)) { 414 neigh_event_send(n, NULL); 415 /* the encap entry will be made valid on neigh update event 416 * and not used before that. 417 */ 418 goto release_neigh; 419 } 420 421 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, 422 e->reformat_type, 423 ipv6_encap_size, encap_header, 424 MLX5_FLOW_NAMESPACE_FDB); 425 if (IS_ERR(e->pkt_reformat)) { 426 err = PTR_ERR(e->pkt_reformat); 427 goto destroy_neigh_entry; 428 } 429 430 e->flags |= MLX5_ENCAP_ENTRY_VALID; 431 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); 432 neigh_release(n); 433 return err; 434 435 destroy_neigh_entry: 436 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); 437 free_encap: 438 kfree(encap_header); 439 release_neigh: 440 neigh_release(n); 441 return err; 442 } 443 #endif 444 445 bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, 446 struct net_device *netdev) 447 { 448 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(netdev); 449 450 if (tunnel && tunnel->can_offload(priv)) 451 return true; 452 else 453 return false; 454 } 455 456 int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev, 457 struct mlx5e_priv *priv, 458 struct mlx5e_encap_entry *e, 459 struct netlink_ext_ack *extack) 460 { 461 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(tunnel_dev); 462 463 if (!tunnel) { 464 e->reformat_type = -1; 465 return -EOPNOTSUPP; 466 } 467 468 return tunnel->init_encap_attr(tunnel_dev, priv, e, extack); 469 } 470 471 int mlx5e_tc_tun_parse(struct net_device *filter_dev, 472 struct mlx5e_priv *priv, 473 struct mlx5_flow_spec *spec, 474 struct flow_cls_offload *f, 475 u8 *match_level) 476 { 477 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev); 478 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 479 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 480 outer_headers); 481 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 482 outer_headers); 483 struct netlink_ext_ack *extack = f->common.extack; 484 int err = 0; 485 486 if (!tunnel) { 487 netdev_warn(priv->netdev, 488 "decapsulation offload is not supported for %s net device\n", 489 mlx5e_netdev_kind(filter_dev)); 490 err = -EOPNOTSUPP; 491 goto out; 492 } 493 494 *match_level = tunnel->match_level; 495 496 if (tunnel->parse_udp_ports) { 497 err = tunnel->parse_udp_ports(priv, spec, f, 498 headers_c, headers_v); 499 if (err) 500 goto out; 501 } 502 503 if (tunnel->parse_tunnel) { 504 err = tunnel->parse_tunnel(priv, spec, f, 505 headers_c, headers_v); 506 if (err) 507 goto out; 508 } 509 510 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 511 struct flow_match_control match; 512 u16 addr_type; 513 514 flow_rule_match_enc_control(rule, &match); 515 addr_type = match.key->addr_type; 516 517 /* For tunnel addr_type used same key id`s as for non-tunnel */ 518 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 519 struct flow_match_ipv4_addrs match; 520 521 flow_rule_match_enc_ipv4_addrs(rule, &match); 522 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 523 src_ipv4_src_ipv6.ipv4_layout.ipv4, 524 ntohl(match.mask->src)); 525 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 526 src_ipv4_src_ipv6.ipv4_layout.ipv4, 527 ntohl(match.key->src)); 528 529 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 530 dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 531 ntohl(match.mask->dst)); 532 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 533 dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 534 ntohl(match.key->dst)); 535 536 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, 537 ethertype); 538 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 539 ETH_P_IP); 540 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 541 struct flow_match_ipv6_addrs match; 542 543 flow_rule_match_enc_ipv6_addrs(rule, &match); 544 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 545 src_ipv4_src_ipv6.ipv6_layout.ipv6), 546 &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, 547 ipv6)); 548 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 549 src_ipv4_src_ipv6.ipv6_layout.ipv6), 550 &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, 551 ipv6)); 552 553 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 554 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 555 &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, 556 ipv6)); 557 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 558 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 559 &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, 560 ipv6)); 561 562 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, 563 ethertype); 564 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 565 ETH_P_IPV6); 566 } 567 } 568 569 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { 570 struct flow_match_ip match; 571 572 flow_rule_match_enc_ip(rule, &match); 573 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, 574 match.mask->tos & 0x3); 575 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, 576 match.key->tos & 0x3); 577 578 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, 579 match.mask->tos >> 2); 580 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, 581 match.key->tos >> 2); 582 583 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, 584 match.mask->ttl); 585 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, 586 match.key->ttl); 587 588 if (match.mask->ttl && 589 !MLX5_CAP_ESW_FLOWTABLE_FDB 590 (priv->mdev, 591 ft_field_support.outer_ipv4_ttl)) { 592 NL_SET_ERR_MSG_MOD(extack, 593 "Matching on TTL is not supported"); 594 err = -EOPNOTSUPP; 595 goto out; 596 } 597 } 598 599 /* Enforce DMAC when offloading incoming tunneled flows. 600 * Flow counters require a match on the DMAC. 601 */ 602 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16); 603 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0); 604 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 605 dmac_47_16), priv->netdev->dev_addr); 606 607 /* let software handle IP fragments */ 608 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 609 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); 610 611 return 0; 612 613 out: 614 return err; 615 } 616 617 int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv, 618 struct mlx5_flow_spec *spec, 619 struct flow_cls_offload *f, 620 void *headers_c, 621 void *headers_v) 622 { 623 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 624 struct netlink_ext_ack *extack = f->common.extack; 625 struct flow_match_ports enc_ports; 626 627 /* Full udp dst port must be given */ 628 629 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { 630 NL_SET_ERR_MSG_MOD(extack, 631 "UDP tunnel decap filter must include enc_dst_port condition"); 632 netdev_warn(priv->netdev, 633 "UDP tunnel decap filter must include enc_dst_port condition\n"); 634 return -EOPNOTSUPP; 635 } 636 637 flow_rule_match_enc_ports(rule, &enc_ports); 638 639 if (memchr_inv(&enc_ports.mask->dst, 0xff, 640 sizeof(enc_ports.mask->dst))) { 641 NL_SET_ERR_MSG_MOD(extack, 642 "UDP tunnel decap filter must match enc_dst_port fully"); 643 netdev_warn(priv->netdev, 644 "UDP tunnel decap filter must match enc_dst_port fully\n"); 645 return -EOPNOTSUPP; 646 } 647 648 /* match on UDP protocol and dst port number */ 649 650 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); 651 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); 652 653 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, 654 ntohs(enc_ports.mask->dst)); 655 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 656 ntohs(enc_ports.key->dst)); 657 658 /* UDP src port on outer header is generated by HW, 659 * so it is probably a bad idea to request matching it. 660 * Nonetheless, it is allowed. 661 */ 662 663 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, 664 ntohs(enc_ports.mask->src)); 665 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, 666 ntohs(enc_ports.key->src)); 667 668 return 0; 669 } 670