1e8f887acSAmir Vadai /* 2e8f887acSAmir Vadai * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3e8f887acSAmir Vadai * 4e8f887acSAmir Vadai * This software is available to you under a choice of one of two 5e8f887acSAmir Vadai * licenses. You may choose to be licensed under the terms of the GNU 6e8f887acSAmir Vadai * General Public License (GPL) Version 2, available from the file 7e8f887acSAmir Vadai * COPYING in the main directory of this source tree, or the 8e8f887acSAmir Vadai * OpenIB.org BSD license below: 9e8f887acSAmir Vadai * 10e8f887acSAmir Vadai * Redistribution and use in source and binary forms, with or 11e8f887acSAmir Vadai * without modification, are permitted provided that the following 12e8f887acSAmir Vadai * conditions are met: 13e8f887acSAmir Vadai * 14e8f887acSAmir Vadai * - Redistributions of source code must retain the above 15e8f887acSAmir Vadai * copyright notice, this list of conditions and the following 16e8f887acSAmir Vadai * disclaimer. 17e8f887acSAmir Vadai * 18e8f887acSAmir Vadai * - Redistributions in binary form must reproduce the above 19e8f887acSAmir Vadai * copyright notice, this list of conditions and the following 20e8f887acSAmir Vadai * disclaimer in the documentation and/or other materials 21e8f887acSAmir Vadai * provided with the distribution. 22e8f887acSAmir Vadai * 23e8f887acSAmir Vadai * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24e8f887acSAmir Vadai * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25e8f887acSAmir Vadai * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26e8f887acSAmir Vadai * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27e8f887acSAmir Vadai * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28e8f887acSAmir Vadai * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29e8f887acSAmir Vadai * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30e8f887acSAmir Vadai * SOFTWARE. 31e8f887acSAmir Vadai */ 32e8f887acSAmir Vadai 33e3a2b7edSAmir Vadai #include <net/flow_dissector.h> 343f7d0eb4SOr Gerlitz #include <net/sch_generic.h> 35e3a2b7edSAmir Vadai #include <net/pkt_cls.h> 36e3a2b7edSAmir Vadai #include <net/tc_act/tc_gact.h> 3712185a9fSAmir Vadai #include <net/tc_act/tc_skbedit.h> 38e8f887acSAmir Vadai #include <linux/mlx5/fs.h> 39e8f887acSAmir Vadai #include <linux/mlx5/device.h> 40e8f887acSAmir Vadai #include <linux/rhashtable.h> 4103a9d11eSOr Gerlitz #include <net/switchdev.h> 4203a9d11eSOr Gerlitz #include <net/tc_act/tc_mirred.h> 43776b12b6SOr Gerlitz #include <net/tc_act/tc_vlan.h> 44bbd00f7eSHadar Hen Zion #include <net/tc_act/tc_tunnel_key.h> 45a54e20b4SHadar Hen Zion #include <net/vxlan.h> 46e8f887acSAmir Vadai #include "en.h" 47e8f887acSAmir Vadai #include "en_tc.h" 4803a9d11eSOr Gerlitz #include "eswitch.h" 49bbd00f7eSHadar Hen Zion #include "vxlan.h" 50e8f887acSAmir Vadai 5165ba8fb7SOr Gerlitz enum { 5265ba8fb7SOr Gerlitz MLX5E_TC_FLOW_ESWITCH = BIT(0), 5365ba8fb7SOr Gerlitz }; 5465ba8fb7SOr Gerlitz 55e8f887acSAmir Vadai struct mlx5e_tc_flow { 56e8f887acSAmir Vadai struct rhash_head node; 57e8f887acSAmir Vadai u64 cookie; 5865ba8fb7SOr Gerlitz u8 flags; 5974491de9SMark Bloch struct mlx5_flow_handle *rule; 60a54e20b4SHadar Hen Zion struct list_head encap; /* flows sharing the same encap */ 61776b12b6SOr Gerlitz struct mlx5_esw_flow_attr *attr; 62e8f887acSAmir Vadai }; 63e8f887acSAmir Vadai 64a54e20b4SHadar Hen Zion enum { 65a54e20b4SHadar Hen Zion MLX5_HEADER_TYPE_VXLAN = 0x0, 66a54e20b4SHadar Hen Zion MLX5_HEADER_TYPE_NVGRE = 0x1, 67a54e20b4SHadar Hen Zion }; 68a54e20b4SHadar Hen Zion 69acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_ENTRIES 1024 70acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4 71e8f887acSAmir Vadai 7274491de9SMark Bloch static struct mlx5_flow_handle * 7374491de9SMark Bloch mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, 74c5bb1730SMaor Gottlieb struct mlx5_flow_spec *spec, 75e8f887acSAmir Vadai u32 action, u32 flow_tag) 76e8f887acSAmir Vadai { 77aad7e08dSAmir Vadai struct mlx5_core_dev *dev = priv->mdev; 78aad7e08dSAmir Vadai struct mlx5_flow_destination dest = { 0 }; 7966958ed9SHadar Hen Zion struct mlx5_flow_act flow_act = { 8066958ed9SHadar Hen Zion .action = action, 8166958ed9SHadar Hen Zion .flow_tag = flow_tag, 8266958ed9SHadar Hen Zion .encap_id = 0, 8366958ed9SHadar Hen Zion }; 84aad7e08dSAmir Vadai struct mlx5_fc *counter = NULL; 8574491de9SMark Bloch struct mlx5_flow_handle *rule; 86e8f887acSAmir Vadai bool table_created = false; 87e8f887acSAmir Vadai 88aad7e08dSAmir Vadai if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 89aad7e08dSAmir Vadai dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 90aad7e08dSAmir Vadai dest.ft = priv->fs.vlan.ft.t; 9155130287SOr Gerlitz } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 92aad7e08dSAmir Vadai counter = mlx5_fc_create(dev, true); 93aad7e08dSAmir Vadai if (IS_ERR(counter)) 94aad7e08dSAmir Vadai return ERR_CAST(counter); 95aad7e08dSAmir Vadai 96aad7e08dSAmir Vadai dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 97aad7e08dSAmir Vadai dest.counter = counter; 98aad7e08dSAmir Vadai } 99aad7e08dSAmir Vadai 100acff797cSMaor Gottlieb if (IS_ERR_OR_NULL(priv->fs.tc.t)) { 101acff797cSMaor Gottlieb priv->fs.tc.t = 102acff797cSMaor Gottlieb mlx5_create_auto_grouped_flow_table(priv->fs.ns, 103acff797cSMaor Gottlieb MLX5E_TC_PRIO, 104acff797cSMaor Gottlieb MLX5E_TC_TABLE_NUM_ENTRIES, 105acff797cSMaor Gottlieb MLX5E_TC_TABLE_NUM_GROUPS, 106c9f1b073SHadar Hen Zion 0, 0); 107acff797cSMaor Gottlieb if (IS_ERR(priv->fs.tc.t)) { 108e8f887acSAmir Vadai netdev_err(priv->netdev, 109e8f887acSAmir Vadai "Failed to create tc offload table\n"); 110aad7e08dSAmir Vadai rule = ERR_CAST(priv->fs.tc.t); 111aad7e08dSAmir Vadai goto err_create_ft; 112e8f887acSAmir Vadai } 113e8f887acSAmir Vadai 114e8f887acSAmir Vadai table_created = true; 115e8f887acSAmir Vadai } 116e8f887acSAmir Vadai 117c5bb1730SMaor Gottlieb spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 11866958ed9SHadar Hen Zion rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1); 119e8f887acSAmir Vadai 120aad7e08dSAmir Vadai if (IS_ERR(rule)) 121aad7e08dSAmir Vadai goto err_add_rule; 122aad7e08dSAmir Vadai 123aad7e08dSAmir Vadai return rule; 124aad7e08dSAmir Vadai 125aad7e08dSAmir Vadai err_add_rule: 126aad7e08dSAmir Vadai if (table_created) { 127acff797cSMaor Gottlieb mlx5_destroy_flow_table(priv->fs.tc.t); 128acff797cSMaor Gottlieb priv->fs.tc.t = NULL; 129e8f887acSAmir Vadai } 130aad7e08dSAmir Vadai err_create_ft: 131aad7e08dSAmir Vadai mlx5_fc_destroy(dev, counter); 132e8f887acSAmir Vadai 133e8f887acSAmir Vadai return rule; 134e8f887acSAmir Vadai } 135e8f887acSAmir Vadai 136d85cdccbSOr Gerlitz static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, 137d85cdccbSOr Gerlitz struct mlx5e_tc_flow *flow) 138d85cdccbSOr Gerlitz { 139d85cdccbSOr Gerlitz struct mlx5_fc *counter = NULL; 140d85cdccbSOr Gerlitz 141d85cdccbSOr Gerlitz if (!IS_ERR(flow->rule)) { 142d85cdccbSOr Gerlitz counter = mlx5_flow_rule_counter(flow->rule); 143d85cdccbSOr Gerlitz mlx5_del_flow_rules(flow->rule); 144d85cdccbSOr Gerlitz mlx5_fc_destroy(priv->mdev, counter); 145d85cdccbSOr Gerlitz } 146d85cdccbSOr Gerlitz 147d85cdccbSOr Gerlitz if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { 148d85cdccbSOr Gerlitz mlx5_destroy_flow_table(priv->fs.tc.t); 149d85cdccbSOr Gerlitz priv->fs.tc.t = NULL; 150d85cdccbSOr Gerlitz } 151d85cdccbSOr Gerlitz } 152d85cdccbSOr Gerlitz 15374491de9SMark Bloch static struct mlx5_flow_handle * 15474491de9SMark Bloch mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 155adb4c123SOr Gerlitz struct mlx5_flow_spec *spec, 156776b12b6SOr Gerlitz struct mlx5_esw_flow_attr *attr) 157adb4c123SOr Gerlitz { 158adb4c123SOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1598b32580dSOr Gerlitz int err; 1608b32580dSOr Gerlitz 1618b32580dSOr Gerlitz err = mlx5_eswitch_add_vlan_action(esw, attr); 1628b32580dSOr Gerlitz if (err) 1638b32580dSOr Gerlitz return ERR_PTR(err); 164adb4c123SOr Gerlitz 165776b12b6SOr Gerlitz return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 166adb4c123SOr Gerlitz } 167adb4c123SOr Gerlitz 1685067b602SRoi Dayan static void mlx5e_detach_encap(struct mlx5e_priv *priv, 169d85cdccbSOr Gerlitz struct mlx5e_tc_flow *flow); 170d85cdccbSOr Gerlitz 171d85cdccbSOr Gerlitz static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, 172d85cdccbSOr Gerlitz struct mlx5e_tc_flow *flow) 173d85cdccbSOr Gerlitz { 174d85cdccbSOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 175d85cdccbSOr Gerlitz 176d85cdccbSOr Gerlitz mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr); 177d85cdccbSOr Gerlitz 178d85cdccbSOr Gerlitz mlx5_eswitch_del_vlan_action(esw, flow->attr); 179d85cdccbSOr Gerlitz 180d85cdccbSOr Gerlitz if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) 181d85cdccbSOr Gerlitz mlx5e_detach_encap(priv, flow); 182d85cdccbSOr Gerlitz } 183d85cdccbSOr Gerlitz 184d85cdccbSOr Gerlitz static void mlx5e_detach_encap(struct mlx5e_priv *priv, 185d85cdccbSOr Gerlitz struct mlx5e_tc_flow *flow) 186d85cdccbSOr Gerlitz { 1875067b602SRoi Dayan struct list_head *next = flow->encap.next; 1885067b602SRoi Dayan 1895067b602SRoi Dayan list_del(&flow->encap); 1905067b602SRoi Dayan if (list_empty(next)) { 1915067b602SRoi Dayan struct mlx5_encap_entry *e; 1925067b602SRoi Dayan 1935067b602SRoi Dayan e = list_entry(next, struct mlx5_encap_entry, flows); 1945067b602SRoi Dayan if (e->n) { 1955067b602SRoi Dayan mlx5_encap_dealloc(priv->mdev, e->encap_id); 1965067b602SRoi Dayan neigh_release(e->n); 1975067b602SRoi Dayan } 1985067b602SRoi Dayan hlist_del_rcu(&e->encap_hlist); 1995067b602SRoi Dayan kfree(e); 2005067b602SRoi Dayan } 2015067b602SRoi Dayan } 2025067b602SRoi Dayan 2035e86397aSOr Gerlitz /* we get here also when setting rule to the FW failed, etc. It means that the 2045e86397aSOr Gerlitz * flow rule itself might not exist, but some offloading related to the actions 2055e86397aSOr Gerlitz * should be cleaned. 2065e86397aSOr Gerlitz */ 207e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 208961e8979SRoi Dayan struct mlx5e_tc_flow *flow) 209e8f887acSAmir Vadai { 210d85cdccbSOr Gerlitz if (flow->flags & MLX5E_TC_FLOW_ESWITCH) 211d85cdccbSOr Gerlitz mlx5e_tc_del_fdb_flow(priv, flow); 212d85cdccbSOr Gerlitz else 213d85cdccbSOr Gerlitz mlx5e_tc_del_nic_flow(priv, flow); 214e8f887acSAmir Vadai } 215e8f887acSAmir Vadai 216bbd00f7eSHadar Hen Zion static void parse_vxlan_attr(struct mlx5_flow_spec *spec, 217bbd00f7eSHadar Hen Zion struct tc_cls_flower_offload *f) 218bbd00f7eSHadar Hen Zion { 219bbd00f7eSHadar Hen Zion void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 220bbd00f7eSHadar Hen Zion outer_headers); 221bbd00f7eSHadar Hen Zion void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 222bbd00f7eSHadar Hen Zion outer_headers); 223bbd00f7eSHadar Hen Zion void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 224bbd00f7eSHadar Hen Zion misc_parameters); 225bbd00f7eSHadar Hen Zion void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 226bbd00f7eSHadar Hen Zion misc_parameters); 227bbd00f7eSHadar Hen Zion 228bbd00f7eSHadar Hen Zion MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); 229bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); 230bbd00f7eSHadar Hen Zion 231bbd00f7eSHadar Hen Zion if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 232bbd00f7eSHadar Hen Zion struct flow_dissector_key_keyid *key = 233bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 234bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_KEYID, 235bbd00f7eSHadar Hen Zion f->key); 236bbd00f7eSHadar Hen Zion struct flow_dissector_key_keyid *mask = 237bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 238bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_KEYID, 239bbd00f7eSHadar Hen Zion f->mask); 240bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni, 241bbd00f7eSHadar Hen Zion be32_to_cpu(mask->keyid)); 242bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, 243bbd00f7eSHadar Hen Zion be32_to_cpu(key->keyid)); 244bbd00f7eSHadar Hen Zion } 245bbd00f7eSHadar Hen Zion } 246bbd00f7eSHadar Hen Zion 247bbd00f7eSHadar Hen Zion static int parse_tunnel_attr(struct mlx5e_priv *priv, 248bbd00f7eSHadar Hen Zion struct mlx5_flow_spec *spec, 249bbd00f7eSHadar Hen Zion struct tc_cls_flower_offload *f) 250bbd00f7eSHadar Hen Zion { 251bbd00f7eSHadar Hen Zion void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 252bbd00f7eSHadar Hen Zion outer_headers); 253bbd00f7eSHadar Hen Zion void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 254bbd00f7eSHadar Hen Zion outer_headers); 255bbd00f7eSHadar Hen Zion 2562e72eb43SOr Gerlitz struct flow_dissector_key_control *enc_control = 2572e72eb43SOr Gerlitz skb_flow_dissector_target(f->dissector, 2582e72eb43SOr Gerlitz FLOW_DISSECTOR_KEY_ENC_CONTROL, 2592e72eb43SOr Gerlitz f->key); 2602e72eb43SOr Gerlitz 261bbd00f7eSHadar Hen Zion if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { 262bbd00f7eSHadar Hen Zion struct flow_dissector_key_ports *key = 263bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 264bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_PORTS, 265bbd00f7eSHadar Hen Zion f->key); 266bbd00f7eSHadar Hen Zion struct flow_dissector_key_ports *mask = 267bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 268bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_PORTS, 269bbd00f7eSHadar Hen Zion f->mask); 270bbd00f7eSHadar Hen Zion 271bbd00f7eSHadar Hen Zion /* Full udp dst port must be given */ 272bbd00f7eSHadar Hen Zion if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) 2732fcd82e9SOr Gerlitz goto vxlan_match_offload_err; 274bbd00f7eSHadar Hen Zion 275bbd00f7eSHadar Hen Zion if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && 276bbd00f7eSHadar Hen Zion MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) 277bbd00f7eSHadar Hen Zion parse_vxlan_attr(spec, f); 2782fcd82e9SOr Gerlitz else { 2792fcd82e9SOr Gerlitz netdev_warn(priv->netdev, 2802fcd82e9SOr Gerlitz "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst)); 281bbd00f7eSHadar Hen Zion return -EOPNOTSUPP; 2822fcd82e9SOr Gerlitz } 283bbd00f7eSHadar Hen Zion 284bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_c, 285bbd00f7eSHadar Hen Zion udp_dport, ntohs(mask->dst)); 286bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, 287bbd00f7eSHadar Hen Zion udp_dport, ntohs(key->dst)); 288bbd00f7eSHadar Hen Zion 289cd377663SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_c, 290cd377663SOr Gerlitz udp_sport, ntohs(mask->src)); 291cd377663SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, 292cd377663SOr Gerlitz udp_sport, ntohs(key->src)); 293bbd00f7eSHadar Hen Zion } else { /* udp dst port must be given */ 2942fcd82e9SOr Gerlitz vxlan_match_offload_err: 2952fcd82e9SOr Gerlitz netdev_warn(priv->netdev, 2962fcd82e9SOr Gerlitz "IP tunnel decap offload supported only for vxlan, must set UDP dport\n"); 297bbd00f7eSHadar Hen Zion return -EOPNOTSUPP; 298bbd00f7eSHadar Hen Zion } 299bbd00f7eSHadar Hen Zion 3002e72eb43SOr Gerlitz if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 301bbd00f7eSHadar Hen Zion struct flow_dissector_key_ipv4_addrs *key = 302bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 303bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 304bbd00f7eSHadar Hen Zion f->key); 305bbd00f7eSHadar Hen Zion struct flow_dissector_key_ipv4_addrs *mask = 306bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 307bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 308bbd00f7eSHadar Hen Zion f->mask); 309bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_c, 310bbd00f7eSHadar Hen Zion src_ipv4_src_ipv6.ipv4_layout.ipv4, 311bbd00f7eSHadar Hen Zion ntohl(mask->src)); 312bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, 313bbd00f7eSHadar Hen Zion src_ipv4_src_ipv6.ipv4_layout.ipv4, 314bbd00f7eSHadar Hen Zion ntohl(key->src)); 315bbd00f7eSHadar Hen Zion 316bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_c, 317bbd00f7eSHadar Hen Zion dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 318bbd00f7eSHadar Hen Zion ntohl(mask->dst)); 319bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, 320bbd00f7eSHadar Hen Zion dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 321bbd00f7eSHadar Hen Zion ntohl(key->dst)); 322bbd00f7eSHadar Hen Zion 323bbd00f7eSHadar Hen Zion MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); 324bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); 32519f44401SOr Gerlitz } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 32619f44401SOr Gerlitz struct flow_dissector_key_ipv6_addrs *key = 32719f44401SOr Gerlitz skb_flow_dissector_target(f->dissector, 32819f44401SOr Gerlitz FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, 32919f44401SOr Gerlitz f->key); 33019f44401SOr Gerlitz struct flow_dissector_key_ipv6_addrs *mask = 33119f44401SOr Gerlitz skb_flow_dissector_target(f->dissector, 33219f44401SOr Gerlitz FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, 33319f44401SOr Gerlitz f->mask); 33419f44401SOr Gerlitz 33519f44401SOr Gerlitz memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 33619f44401SOr Gerlitz src_ipv4_src_ipv6.ipv6_layout.ipv6), 33719f44401SOr Gerlitz &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 33819f44401SOr Gerlitz memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 33919f44401SOr Gerlitz src_ipv4_src_ipv6.ipv6_layout.ipv6), 34019f44401SOr Gerlitz &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 34119f44401SOr Gerlitz 34219f44401SOr Gerlitz memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 34319f44401SOr Gerlitz dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 34419f44401SOr Gerlitz &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 34519f44401SOr Gerlitz memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 34619f44401SOr Gerlitz dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 34719f44401SOr Gerlitz &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 34819f44401SOr Gerlitz 34919f44401SOr Gerlitz MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); 35019f44401SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6); 3512e72eb43SOr Gerlitz } 352bbd00f7eSHadar Hen Zion 353bbd00f7eSHadar Hen Zion /* Enforce DMAC when offloading incoming tunneled flows. 354bbd00f7eSHadar Hen Zion * Flow counters require a match on the DMAC. 355bbd00f7eSHadar Hen Zion */ 356bbd00f7eSHadar Hen Zion MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16); 357bbd00f7eSHadar Hen Zion MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0); 358bbd00f7eSHadar Hen Zion ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 359bbd00f7eSHadar Hen Zion dmac_47_16), priv->netdev->dev_addr); 360bbd00f7eSHadar Hen Zion 361bbd00f7eSHadar Hen Zion /* let software handle IP fragments */ 362bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 363bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); 364bbd00f7eSHadar Hen Zion 365bbd00f7eSHadar Hen Zion return 0; 366bbd00f7eSHadar Hen Zion } 367bbd00f7eSHadar Hen Zion 368de0af0bfSRoi Dayan static int __parse_cls_flower(struct mlx5e_priv *priv, 369de0af0bfSRoi Dayan struct mlx5_flow_spec *spec, 370de0af0bfSRoi Dayan struct tc_cls_flower_offload *f, 371de0af0bfSRoi Dayan u8 *min_inline) 372e3a2b7edSAmir Vadai { 373c5bb1730SMaor Gottlieb void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 374c5bb1730SMaor Gottlieb outer_headers); 375c5bb1730SMaor Gottlieb void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 376c5bb1730SMaor Gottlieb outer_headers); 377e3a2b7edSAmir Vadai u16 addr_type = 0; 378e3a2b7edSAmir Vadai u8 ip_proto = 0; 379e3a2b7edSAmir Vadai 380de0af0bfSRoi Dayan *min_inline = MLX5_INLINE_MODE_L2; 381de0af0bfSRoi Dayan 382e3a2b7edSAmir Vadai if (f->dissector->used_keys & 383e3a2b7edSAmir Vadai ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 384e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_BASIC) | 385e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 386095b6cfdSOr Gerlitz BIT(FLOW_DISSECTOR_KEY_VLAN) | 387e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 388e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 389bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_PORTS) | 390bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | 391bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 392bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | 393bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | 394bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) { 395e3a2b7edSAmir Vadai netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", 396e3a2b7edSAmir Vadai f->dissector->used_keys); 397e3a2b7edSAmir Vadai return -EOPNOTSUPP; 398e3a2b7edSAmir Vadai } 399e3a2b7edSAmir Vadai 400bbd00f7eSHadar Hen Zion if ((dissector_uses_key(f->dissector, 401bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) || 402bbd00f7eSHadar Hen Zion dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) || 403bbd00f7eSHadar Hen Zion dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) && 404bbd00f7eSHadar Hen Zion dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 405bbd00f7eSHadar Hen Zion struct flow_dissector_key_control *key = 406bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 407bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_CONTROL, 408bbd00f7eSHadar Hen Zion f->key); 409bbd00f7eSHadar Hen Zion switch (key->addr_type) { 410bbd00f7eSHadar Hen Zion case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 41119f44401SOr Gerlitz case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 412bbd00f7eSHadar Hen Zion if (parse_tunnel_attr(priv, spec, f)) 413bbd00f7eSHadar Hen Zion return -EOPNOTSUPP; 414bbd00f7eSHadar Hen Zion break; 415bbd00f7eSHadar Hen Zion default: 416bbd00f7eSHadar Hen Zion return -EOPNOTSUPP; 417bbd00f7eSHadar Hen Zion } 418bbd00f7eSHadar Hen Zion 419bbd00f7eSHadar Hen Zion /* In decap flow, header pointers should point to the inner 420bbd00f7eSHadar Hen Zion * headers, outer header were already set by parse_tunnel_attr 421bbd00f7eSHadar Hen Zion */ 422bbd00f7eSHadar Hen Zion headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 423bbd00f7eSHadar Hen Zion inner_headers); 424bbd00f7eSHadar Hen Zion headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 425bbd00f7eSHadar Hen Zion inner_headers); 426bbd00f7eSHadar Hen Zion } 427bbd00f7eSHadar Hen Zion 428e3a2b7edSAmir Vadai if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 429e3a2b7edSAmir Vadai struct flow_dissector_key_control *key = 430e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 4311dbd0d37SHadar Hen Zion FLOW_DISSECTOR_KEY_CONTROL, 432e3a2b7edSAmir Vadai f->key); 4333f7d0eb4SOr Gerlitz 4343f7d0eb4SOr Gerlitz struct flow_dissector_key_control *mask = 4353f7d0eb4SOr Gerlitz skb_flow_dissector_target(f->dissector, 4363f7d0eb4SOr Gerlitz FLOW_DISSECTOR_KEY_CONTROL, 4373f7d0eb4SOr Gerlitz f->mask); 438e3a2b7edSAmir Vadai addr_type = key->addr_type; 4393f7d0eb4SOr Gerlitz 4403f7d0eb4SOr Gerlitz if (mask->flags & FLOW_DIS_IS_FRAGMENT) { 4413f7d0eb4SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 4423f7d0eb4SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 4433f7d0eb4SOr Gerlitz key->flags & FLOW_DIS_IS_FRAGMENT); 4440827444dSOr Gerlitz 4450827444dSOr Gerlitz /* the HW doesn't need L3 inline to match on frag=no */ 4460827444dSOr Gerlitz if (key->flags & FLOW_DIS_IS_FRAGMENT) 4470827444dSOr Gerlitz *min_inline = MLX5_INLINE_MODE_IP; 4483f7d0eb4SOr Gerlitz } 449e3a2b7edSAmir Vadai } 450e3a2b7edSAmir Vadai 451e3a2b7edSAmir Vadai if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 452e3a2b7edSAmir Vadai struct flow_dissector_key_basic *key = 453e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 454e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_BASIC, 455e3a2b7edSAmir Vadai f->key); 456e3a2b7edSAmir Vadai struct flow_dissector_key_basic *mask = 457e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 458e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_BASIC, 459e3a2b7edSAmir Vadai f->mask); 460e3a2b7edSAmir Vadai ip_proto = key->ip_proto; 461e3a2b7edSAmir Vadai 462e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, 463e3a2b7edSAmir Vadai ntohs(mask->n_proto)); 464e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 465e3a2b7edSAmir Vadai ntohs(key->n_proto)); 466e3a2b7edSAmir Vadai 467e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 468e3a2b7edSAmir Vadai mask->ip_proto); 469e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 470e3a2b7edSAmir Vadai key->ip_proto); 471de0af0bfSRoi Dayan 472de0af0bfSRoi Dayan if (mask->ip_proto) 473de0af0bfSRoi Dayan *min_inline = MLX5_INLINE_MODE_IP; 474e3a2b7edSAmir Vadai } 475e3a2b7edSAmir Vadai 476e3a2b7edSAmir Vadai if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 477e3a2b7edSAmir Vadai struct flow_dissector_key_eth_addrs *key = 478e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 479e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_ETH_ADDRS, 480e3a2b7edSAmir Vadai f->key); 481e3a2b7edSAmir Vadai struct flow_dissector_key_eth_addrs *mask = 482e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 483e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_ETH_ADDRS, 484e3a2b7edSAmir Vadai f->mask); 485e3a2b7edSAmir Vadai 486e3a2b7edSAmir Vadai ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 487e3a2b7edSAmir Vadai dmac_47_16), 488e3a2b7edSAmir Vadai mask->dst); 489e3a2b7edSAmir Vadai ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 490e3a2b7edSAmir Vadai dmac_47_16), 491e3a2b7edSAmir Vadai key->dst); 492e3a2b7edSAmir Vadai 493e3a2b7edSAmir Vadai ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 494e3a2b7edSAmir Vadai smac_47_16), 495e3a2b7edSAmir Vadai mask->src); 496e3a2b7edSAmir Vadai ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 497e3a2b7edSAmir Vadai smac_47_16), 498e3a2b7edSAmir Vadai key->src); 499e3a2b7edSAmir Vadai } 500e3a2b7edSAmir Vadai 501095b6cfdSOr Gerlitz if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { 502095b6cfdSOr Gerlitz struct flow_dissector_key_vlan *key = 503095b6cfdSOr Gerlitz skb_flow_dissector_target(f->dissector, 504095b6cfdSOr Gerlitz FLOW_DISSECTOR_KEY_VLAN, 505095b6cfdSOr Gerlitz f->key); 506095b6cfdSOr Gerlitz struct flow_dissector_key_vlan *mask = 507095b6cfdSOr Gerlitz skb_flow_dissector_target(f->dissector, 508095b6cfdSOr Gerlitz FLOW_DISSECTOR_KEY_VLAN, 509095b6cfdSOr Gerlitz f->mask); 510358d79a4SOr Gerlitz if (mask->vlan_id || mask->vlan_priority) { 51110543365SMohamad Haj Yahia MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); 51210543365SMohamad Haj Yahia MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); 513095b6cfdSOr Gerlitz 514095b6cfdSOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); 515095b6cfdSOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); 516358d79a4SOr Gerlitz 517358d79a4SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority); 518358d79a4SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority); 519095b6cfdSOr Gerlitz } 520095b6cfdSOr Gerlitz } 521095b6cfdSOr Gerlitz 522e3a2b7edSAmir Vadai if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 523e3a2b7edSAmir Vadai struct flow_dissector_key_ipv4_addrs *key = 524e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 525e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_IPV4_ADDRS, 526e3a2b7edSAmir Vadai f->key); 527e3a2b7edSAmir Vadai struct flow_dissector_key_ipv4_addrs *mask = 528e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 529e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_IPV4_ADDRS, 530e3a2b7edSAmir Vadai f->mask); 531e3a2b7edSAmir Vadai 532e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 533e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv4_layout.ipv4), 534e3a2b7edSAmir Vadai &mask->src, sizeof(mask->src)); 535e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 536e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv4_layout.ipv4), 537e3a2b7edSAmir Vadai &key->src, sizeof(key->src)); 538e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 539e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 540e3a2b7edSAmir Vadai &mask->dst, sizeof(mask->dst)); 541e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 542e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 543e3a2b7edSAmir Vadai &key->dst, sizeof(key->dst)); 544de0af0bfSRoi Dayan 545de0af0bfSRoi Dayan if (mask->src || mask->dst) 546de0af0bfSRoi Dayan *min_inline = MLX5_INLINE_MODE_IP; 547e3a2b7edSAmir Vadai } 548e3a2b7edSAmir Vadai 549e3a2b7edSAmir Vadai if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 550e3a2b7edSAmir Vadai struct flow_dissector_key_ipv6_addrs *key = 551e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 552e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_IPV6_ADDRS, 553e3a2b7edSAmir Vadai f->key); 554e3a2b7edSAmir Vadai struct flow_dissector_key_ipv6_addrs *mask = 555e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 556e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_IPV6_ADDRS, 557e3a2b7edSAmir Vadai f->mask); 558e3a2b7edSAmir Vadai 559e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 560e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv6_layout.ipv6), 561e3a2b7edSAmir Vadai &mask->src, sizeof(mask->src)); 562e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 563e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv6_layout.ipv6), 564e3a2b7edSAmir Vadai &key->src, sizeof(key->src)); 565e3a2b7edSAmir Vadai 566e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 567e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 568e3a2b7edSAmir Vadai &mask->dst, sizeof(mask->dst)); 569e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 570e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 571e3a2b7edSAmir Vadai &key->dst, sizeof(key->dst)); 572de0af0bfSRoi Dayan 573de0af0bfSRoi Dayan if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY || 574de0af0bfSRoi Dayan ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY) 575de0af0bfSRoi Dayan *min_inline = MLX5_INLINE_MODE_IP; 576e3a2b7edSAmir Vadai } 577e3a2b7edSAmir Vadai 578e3a2b7edSAmir Vadai if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { 579e3a2b7edSAmir Vadai struct flow_dissector_key_ports *key = 580e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 581e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_PORTS, 582e3a2b7edSAmir Vadai f->key); 583e3a2b7edSAmir Vadai struct flow_dissector_key_ports *mask = 584e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 585e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_PORTS, 586e3a2b7edSAmir Vadai f->mask); 587e3a2b7edSAmir Vadai switch (ip_proto) { 588e3a2b7edSAmir Vadai case IPPROTO_TCP: 589e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 590e3a2b7edSAmir Vadai tcp_sport, ntohs(mask->src)); 591e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 592e3a2b7edSAmir Vadai tcp_sport, ntohs(key->src)); 593e3a2b7edSAmir Vadai 594e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 595e3a2b7edSAmir Vadai tcp_dport, ntohs(mask->dst)); 596e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 597e3a2b7edSAmir Vadai tcp_dport, ntohs(key->dst)); 598e3a2b7edSAmir Vadai break; 599e3a2b7edSAmir Vadai 600e3a2b7edSAmir Vadai case IPPROTO_UDP: 601e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 602e3a2b7edSAmir Vadai udp_sport, ntohs(mask->src)); 603e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 604e3a2b7edSAmir Vadai udp_sport, ntohs(key->src)); 605e3a2b7edSAmir Vadai 606e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 607e3a2b7edSAmir Vadai udp_dport, ntohs(mask->dst)); 608e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 609e3a2b7edSAmir Vadai udp_dport, ntohs(key->dst)); 610e3a2b7edSAmir Vadai break; 611e3a2b7edSAmir Vadai default: 612e3a2b7edSAmir Vadai netdev_err(priv->netdev, 613e3a2b7edSAmir Vadai "Only UDP and TCP transport are supported\n"); 614e3a2b7edSAmir Vadai return -EINVAL; 615e3a2b7edSAmir Vadai } 616de0af0bfSRoi Dayan 617de0af0bfSRoi Dayan if (mask->src || mask->dst) 618de0af0bfSRoi Dayan *min_inline = MLX5_INLINE_MODE_TCP_UDP; 619e3a2b7edSAmir Vadai } 620e3a2b7edSAmir Vadai 621e3a2b7edSAmir Vadai return 0; 622e3a2b7edSAmir Vadai } 623e3a2b7edSAmir Vadai 624de0af0bfSRoi Dayan static int parse_cls_flower(struct mlx5e_priv *priv, 62565ba8fb7SOr Gerlitz struct mlx5e_tc_flow *flow, 626de0af0bfSRoi Dayan struct mlx5_flow_spec *spec, 627de0af0bfSRoi Dayan struct tc_cls_flower_offload *f) 628de0af0bfSRoi Dayan { 629de0af0bfSRoi Dayan struct mlx5_core_dev *dev = priv->mdev; 630de0af0bfSRoi Dayan struct mlx5_eswitch *esw = dev->priv.eswitch; 631de0af0bfSRoi Dayan struct mlx5_eswitch_rep *rep = priv->ppriv; 632de0af0bfSRoi Dayan u8 min_inline; 633de0af0bfSRoi Dayan int err; 634de0af0bfSRoi Dayan 635de0af0bfSRoi Dayan err = __parse_cls_flower(priv, spec, f, &min_inline); 636de0af0bfSRoi Dayan 63765ba8fb7SOr Gerlitz if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) && 638de0af0bfSRoi Dayan rep->vport != FDB_UPLINK_VPORT) { 639de0af0bfSRoi Dayan if (min_inline > esw->offloads.inline_mode) { 640de0af0bfSRoi Dayan netdev_warn(priv->netdev, 641de0af0bfSRoi Dayan "Flow is not offloaded due to min inline setting, required %d actual %d\n", 642de0af0bfSRoi Dayan min_inline, esw->offloads.inline_mode); 643de0af0bfSRoi Dayan return -EOPNOTSUPP; 644de0af0bfSRoi Dayan } 645de0af0bfSRoi Dayan } 646de0af0bfSRoi Dayan 647de0af0bfSRoi Dayan return err; 648de0af0bfSRoi Dayan } 649de0af0bfSRoi Dayan 6505c40348cSOr Gerlitz static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, 651e3a2b7edSAmir Vadai u32 *action, u32 *flow_tag) 652e3a2b7edSAmir Vadai { 653e3a2b7edSAmir Vadai const struct tc_action *a; 65422dc13c8SWANG Cong LIST_HEAD(actions); 655e3a2b7edSAmir Vadai 656e3a2b7edSAmir Vadai if (tc_no_actions(exts)) 657e3a2b7edSAmir Vadai return -EINVAL; 658e3a2b7edSAmir Vadai 659e3a2b7edSAmir Vadai *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 660e3a2b7edSAmir Vadai *action = 0; 661e3a2b7edSAmir Vadai 66222dc13c8SWANG Cong tcf_exts_to_list(exts, &actions); 66322dc13c8SWANG Cong list_for_each_entry(a, &actions, list) { 664e3a2b7edSAmir Vadai /* Only support a single action per rule */ 665e3a2b7edSAmir Vadai if (*action) 666e3a2b7edSAmir Vadai return -EINVAL; 667e3a2b7edSAmir Vadai 668e3a2b7edSAmir Vadai if (is_tcf_gact_shot(a)) { 669e3a2b7edSAmir Vadai *action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 670aad7e08dSAmir Vadai if (MLX5_CAP_FLOWTABLE(priv->mdev, 671aad7e08dSAmir Vadai flow_table_properties_nic_receive.flow_counter)) 672aad7e08dSAmir Vadai *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 673e3a2b7edSAmir Vadai continue; 674e3a2b7edSAmir Vadai } 675e3a2b7edSAmir Vadai 676e3a2b7edSAmir Vadai if (is_tcf_skbedit_mark(a)) { 677e3a2b7edSAmir Vadai u32 mark = tcf_skbedit_mark(a); 678e3a2b7edSAmir Vadai 679e3a2b7edSAmir Vadai if (mark & ~MLX5E_TC_FLOW_ID_MASK) { 680e3a2b7edSAmir Vadai netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n", 681e3a2b7edSAmir Vadai mark); 682e3a2b7edSAmir Vadai return -EINVAL; 683e3a2b7edSAmir Vadai } 684e3a2b7edSAmir Vadai 685e3a2b7edSAmir Vadai *flow_tag = mark; 686e3a2b7edSAmir Vadai *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 687e3a2b7edSAmir Vadai continue; 688e3a2b7edSAmir Vadai } 689e3a2b7edSAmir Vadai 690e3a2b7edSAmir Vadai return -EINVAL; 691e3a2b7edSAmir Vadai } 692e3a2b7edSAmir Vadai 693e3a2b7edSAmir Vadai return 0; 694e3a2b7edSAmir Vadai } 695e3a2b7edSAmir Vadai 69676f7444dSOr Gerlitz static inline int cmp_encap_info(struct ip_tunnel_key *a, 69776f7444dSOr Gerlitz struct ip_tunnel_key *b) 698a54e20b4SHadar Hen Zion { 699a54e20b4SHadar Hen Zion return memcmp(a, b, sizeof(*a)); 700a54e20b4SHadar Hen Zion } 701a54e20b4SHadar Hen Zion 70276f7444dSOr Gerlitz static inline int hash_encap_info(struct ip_tunnel_key *key) 703a54e20b4SHadar Hen Zion { 70476f7444dSOr Gerlitz return jhash(key, sizeof(*key), 0); 705a54e20b4SHadar Hen Zion } 706a54e20b4SHadar Hen Zion 707a54e20b4SHadar Hen Zion static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, 708a54e20b4SHadar Hen Zion struct net_device *mirred_dev, 709a54e20b4SHadar Hen Zion struct net_device **out_dev, 710a54e20b4SHadar Hen Zion struct flowi4 *fl4, 711a54e20b4SHadar Hen Zion struct neighbour **out_n, 712a54e20b4SHadar Hen Zion int *out_ttl) 713a54e20b4SHadar Hen Zion { 7143e621b19SHadar Hen Zion struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 715a54e20b4SHadar Hen Zion struct rtable *rt; 716a54e20b4SHadar Hen Zion struct neighbour *n = NULL; 717a54e20b4SHadar Hen Zion 718a54e20b4SHadar Hen Zion #if IS_ENABLED(CONFIG_INET) 719abeffce9SArnd Bergmann int ret; 720abeffce9SArnd Bergmann 721a54e20b4SHadar Hen Zion rt = ip_route_output_key(dev_net(mirred_dev), fl4); 722abeffce9SArnd Bergmann ret = PTR_ERR_OR_ZERO(rt); 723abeffce9SArnd Bergmann if (ret) 724abeffce9SArnd Bergmann return ret; 725a54e20b4SHadar Hen Zion #else 726a54e20b4SHadar Hen Zion return -EOPNOTSUPP; 727a54e20b4SHadar Hen Zion #endif 7283e621b19SHadar Hen Zion /* if the egress device isn't on the same HW e-switch, we use the uplink */ 7293e621b19SHadar Hen Zion if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) 7303e621b19SHadar Hen Zion *out_dev = mlx5_eswitch_get_uplink_netdev(esw); 7313e621b19SHadar Hen Zion else 7323e621b19SHadar Hen Zion *out_dev = rt->dst.dev; 733a54e20b4SHadar Hen Zion 73475c33da8SOr Gerlitz *out_ttl = ip4_dst_hoplimit(&rt->dst); 735a54e20b4SHadar Hen Zion n = dst_neigh_lookup(&rt->dst, &fl4->daddr); 736a54e20b4SHadar Hen Zion ip_rt_put(rt); 737a54e20b4SHadar Hen Zion if (!n) 738a54e20b4SHadar Hen Zion return -ENOMEM; 739a54e20b4SHadar Hen Zion 740a54e20b4SHadar Hen Zion *out_n = n; 741a54e20b4SHadar Hen Zion return 0; 742a54e20b4SHadar Hen Zion } 743a54e20b4SHadar Hen Zion 744ce99f6b9SOr Gerlitz static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, 745ce99f6b9SOr Gerlitz struct net_device *mirred_dev, 746ce99f6b9SOr Gerlitz struct net_device **out_dev, 747ce99f6b9SOr Gerlitz struct flowi6 *fl6, 748ce99f6b9SOr Gerlitz struct neighbour **out_n, 749ce99f6b9SOr Gerlitz int *out_ttl) 750ce99f6b9SOr Gerlitz { 751ce99f6b9SOr Gerlitz struct neighbour *n = NULL; 752ce99f6b9SOr Gerlitz struct dst_entry *dst; 753ce99f6b9SOr Gerlitz 754ce99f6b9SOr Gerlitz #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) 755ce99f6b9SOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 756ce99f6b9SOr Gerlitz int ret; 757ce99f6b9SOr Gerlitz 758ce99f6b9SOr Gerlitz dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6); 759ce99f6b9SOr Gerlitz ret = dst->error; 760321fa4ffSArnd Bergmann if (ret) { 761ce99f6b9SOr Gerlitz dst_release(dst); 762ce99f6b9SOr Gerlitz return ret; 763ce99f6b9SOr Gerlitz } 764ce99f6b9SOr Gerlitz 765ce99f6b9SOr Gerlitz *out_ttl = ip6_dst_hoplimit(dst); 766ce99f6b9SOr Gerlitz 767ce99f6b9SOr Gerlitz /* if the egress device isn't on the same HW e-switch, we use the uplink */ 768ce99f6b9SOr Gerlitz if (!switchdev_port_same_parent_id(priv->netdev, dst->dev)) 769ce99f6b9SOr Gerlitz *out_dev = mlx5_eswitch_get_uplink_netdev(esw); 770ce99f6b9SOr Gerlitz else 771ce99f6b9SOr Gerlitz *out_dev = dst->dev; 772ce99f6b9SOr Gerlitz #else 773ce99f6b9SOr Gerlitz return -EOPNOTSUPP; 774ce99f6b9SOr Gerlitz #endif 775ce99f6b9SOr Gerlitz 776ce99f6b9SOr Gerlitz n = dst_neigh_lookup(dst, &fl6->daddr); 777ce99f6b9SOr Gerlitz dst_release(dst); 778ce99f6b9SOr Gerlitz if (!n) 779ce99f6b9SOr Gerlitz return -ENOMEM; 780ce99f6b9SOr Gerlitz 781ce99f6b9SOr Gerlitz *out_n = n; 782ce99f6b9SOr Gerlitz return 0; 783ce99f6b9SOr Gerlitz } 784ce99f6b9SOr Gerlitz 785a54e20b4SHadar Hen Zion static int gen_vxlan_header_ipv4(struct net_device *out_dev, 786a54e20b4SHadar Hen Zion char buf[], 787a54e20b4SHadar Hen Zion unsigned char h_dest[ETH_ALEN], 788a54e20b4SHadar Hen Zion int ttl, 789a54e20b4SHadar Hen Zion __be32 daddr, 790a54e20b4SHadar Hen Zion __be32 saddr, 791a54e20b4SHadar Hen Zion __be16 udp_dst_port, 792a54e20b4SHadar Hen Zion __be32 vx_vni) 793a54e20b4SHadar Hen Zion { 794a54e20b4SHadar Hen Zion int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN; 795a54e20b4SHadar Hen Zion struct ethhdr *eth = (struct ethhdr *)buf; 796a54e20b4SHadar Hen Zion struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr)); 797a54e20b4SHadar Hen Zion struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr)); 798a54e20b4SHadar Hen Zion struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); 799a54e20b4SHadar Hen Zion 800a54e20b4SHadar Hen Zion memset(buf, 0, encap_size); 801a54e20b4SHadar Hen Zion 802a54e20b4SHadar Hen Zion ether_addr_copy(eth->h_dest, h_dest); 803a54e20b4SHadar Hen Zion ether_addr_copy(eth->h_source, out_dev->dev_addr); 804a54e20b4SHadar Hen Zion eth->h_proto = htons(ETH_P_IP); 805a54e20b4SHadar Hen Zion 806a54e20b4SHadar Hen Zion ip->daddr = daddr; 807a54e20b4SHadar Hen Zion ip->saddr = saddr; 808a54e20b4SHadar Hen Zion 809a54e20b4SHadar Hen Zion ip->ttl = ttl; 810a54e20b4SHadar Hen Zion ip->protocol = IPPROTO_UDP; 811a54e20b4SHadar Hen Zion ip->version = 0x4; 812a54e20b4SHadar Hen Zion ip->ihl = 0x5; 813a54e20b4SHadar Hen Zion 814a54e20b4SHadar Hen Zion udp->dest = udp_dst_port; 815a54e20b4SHadar Hen Zion vxh->vx_flags = VXLAN_HF_VNI; 816a54e20b4SHadar Hen Zion vxh->vx_vni = vxlan_vni_field(vx_vni); 817a54e20b4SHadar Hen Zion 818a54e20b4SHadar Hen Zion return encap_size; 819a54e20b4SHadar Hen Zion } 820a54e20b4SHadar Hen Zion 821ce99f6b9SOr Gerlitz static int gen_vxlan_header_ipv6(struct net_device *out_dev, 822ce99f6b9SOr Gerlitz char buf[], 823ce99f6b9SOr Gerlitz unsigned char h_dest[ETH_ALEN], 824ce99f6b9SOr Gerlitz int ttl, 825ce99f6b9SOr Gerlitz struct in6_addr *daddr, 826ce99f6b9SOr Gerlitz struct in6_addr *saddr, 827ce99f6b9SOr Gerlitz __be16 udp_dst_port, 828ce99f6b9SOr Gerlitz __be32 vx_vni) 829ce99f6b9SOr Gerlitz { 830ce99f6b9SOr Gerlitz int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN; 831ce99f6b9SOr Gerlitz struct ethhdr *eth = (struct ethhdr *)buf; 832ce99f6b9SOr Gerlitz struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); 833ce99f6b9SOr Gerlitz struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr)); 834ce99f6b9SOr Gerlitz struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); 835ce99f6b9SOr Gerlitz 836ce99f6b9SOr Gerlitz memset(buf, 0, encap_size); 837ce99f6b9SOr Gerlitz 838ce99f6b9SOr Gerlitz ether_addr_copy(eth->h_dest, h_dest); 839ce99f6b9SOr Gerlitz ether_addr_copy(eth->h_source, out_dev->dev_addr); 840ce99f6b9SOr Gerlitz eth->h_proto = htons(ETH_P_IPV6); 841ce99f6b9SOr Gerlitz 842ce99f6b9SOr Gerlitz ip6_flow_hdr(ip6h, 0, 0); 843ce99f6b9SOr Gerlitz /* the HW fills up ipv6 payload len */ 844ce99f6b9SOr Gerlitz ip6h->nexthdr = IPPROTO_UDP; 845ce99f6b9SOr Gerlitz ip6h->hop_limit = ttl; 846ce99f6b9SOr Gerlitz ip6h->daddr = *daddr; 847ce99f6b9SOr Gerlitz ip6h->saddr = *saddr; 848ce99f6b9SOr Gerlitz 849ce99f6b9SOr Gerlitz udp->dest = udp_dst_port; 850ce99f6b9SOr Gerlitz vxh->vx_flags = VXLAN_HF_VNI; 851ce99f6b9SOr Gerlitz vxh->vx_vni = vxlan_vni_field(vx_vni); 852ce99f6b9SOr Gerlitz 853ce99f6b9SOr Gerlitz return encap_size; 854ce99f6b9SOr Gerlitz } 855ce99f6b9SOr Gerlitz 856a54e20b4SHadar Hen Zion static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, 857a54e20b4SHadar Hen Zion struct net_device *mirred_dev, 858a54e20b4SHadar Hen Zion struct mlx5_encap_entry *e, 859a54e20b4SHadar Hen Zion struct net_device **out_dev) 860a54e20b4SHadar Hen Zion { 861a54e20b4SHadar Hen Zion int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 86276f7444dSOr Gerlitz struct ip_tunnel_key *tun_key = &e->tun_info.key; 8639a941117SOr Gerlitz int encap_size, ttl, err; 864a42485ebSOr Gerlitz struct neighbour *n = NULL; 865a54e20b4SHadar Hen Zion struct flowi4 fl4 = {}; 866a54e20b4SHadar Hen Zion char *encap_header; 867a54e20b4SHadar Hen Zion 868a54e20b4SHadar Hen Zion encap_header = kzalloc(max_encap_size, GFP_KERNEL); 869a54e20b4SHadar Hen Zion if (!encap_header) 870a54e20b4SHadar Hen Zion return -ENOMEM; 871a54e20b4SHadar Hen Zion 872a54e20b4SHadar Hen Zion switch (e->tunnel_type) { 873a54e20b4SHadar Hen Zion case MLX5_HEADER_TYPE_VXLAN: 874a54e20b4SHadar Hen Zion fl4.flowi4_proto = IPPROTO_UDP; 87576f7444dSOr Gerlitz fl4.fl4_dport = tun_key->tp_dst; 876a54e20b4SHadar Hen Zion break; 877a54e20b4SHadar Hen Zion default: 878a54e20b4SHadar Hen Zion err = -EOPNOTSUPP; 879a54e20b4SHadar Hen Zion goto out; 880a54e20b4SHadar Hen Zion } 8819a941117SOr Gerlitz fl4.flowi4_tos = tun_key->tos; 88276f7444dSOr Gerlitz fl4.daddr = tun_key->u.ipv4.dst; 8839a941117SOr Gerlitz fl4.saddr = tun_key->u.ipv4.src; 884a54e20b4SHadar Hen Zion 885a54e20b4SHadar Hen Zion err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev, 8869a941117SOr Gerlitz &fl4, &n, &ttl); 887a54e20b4SHadar Hen Zion if (err) 888a54e20b4SHadar Hen Zion goto out; 889a54e20b4SHadar Hen Zion 890a54e20b4SHadar Hen Zion if (!(n->nud_state & NUD_VALID)) { 891a42485ebSOr Gerlitz pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr); 892a42485ebSOr Gerlitz err = -EOPNOTSUPP; 893a54e20b4SHadar Hen Zion goto out; 894a54e20b4SHadar Hen Zion } 895a54e20b4SHadar Hen Zion 89675c33da8SOr Gerlitz e->n = n; 89775c33da8SOr Gerlitz e->out_dev = *out_dev; 89875c33da8SOr Gerlitz 899a54e20b4SHadar Hen Zion neigh_ha_snapshot(e->h_dest, n, *out_dev); 900a54e20b4SHadar Hen Zion 901a54e20b4SHadar Hen Zion switch (e->tunnel_type) { 902a54e20b4SHadar Hen Zion case MLX5_HEADER_TYPE_VXLAN: 903a54e20b4SHadar Hen Zion encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, 904a54e20b4SHadar Hen Zion e->h_dest, ttl, 9059a941117SOr Gerlitz fl4.daddr, 9069a941117SOr Gerlitz fl4.saddr, tun_key->tp_dst, 90776f7444dSOr Gerlitz tunnel_id_to_key32(tun_key->tun_id)); 908a54e20b4SHadar Hen Zion break; 909a54e20b4SHadar Hen Zion default: 910a54e20b4SHadar Hen Zion err = -EOPNOTSUPP; 911a54e20b4SHadar Hen Zion goto out; 912a54e20b4SHadar Hen Zion } 913a54e20b4SHadar Hen Zion 914a54e20b4SHadar Hen Zion err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, 915a54e20b4SHadar Hen Zion encap_size, encap_header, &e->encap_id); 916a54e20b4SHadar Hen Zion out: 917a42485ebSOr Gerlitz if (err && n) 918a42485ebSOr Gerlitz neigh_release(n); 919a54e20b4SHadar Hen Zion kfree(encap_header); 920a54e20b4SHadar Hen Zion return err; 921a54e20b4SHadar Hen Zion } 922a54e20b4SHadar Hen Zion 923ce99f6b9SOr Gerlitz static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, 924ce99f6b9SOr Gerlitz struct net_device *mirred_dev, 925ce99f6b9SOr Gerlitz struct mlx5_encap_entry *e, 926ce99f6b9SOr Gerlitz struct net_device **out_dev) 927ce99f6b9SOr Gerlitz 928ce99f6b9SOr Gerlitz { 929ce99f6b9SOr Gerlitz int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 930ce99f6b9SOr Gerlitz struct ip_tunnel_key *tun_key = &e->tun_info.key; 931ce99f6b9SOr Gerlitz int encap_size, err, ttl = 0; 932ce99f6b9SOr Gerlitz struct neighbour *n = NULL; 933ce99f6b9SOr Gerlitz struct flowi6 fl6 = {}; 934ce99f6b9SOr Gerlitz char *encap_header; 935ce99f6b9SOr Gerlitz 936ce99f6b9SOr Gerlitz encap_header = kzalloc(max_encap_size, GFP_KERNEL); 937ce99f6b9SOr Gerlitz if (!encap_header) 938ce99f6b9SOr Gerlitz return -ENOMEM; 939ce99f6b9SOr Gerlitz 940ce99f6b9SOr Gerlitz switch (e->tunnel_type) { 941ce99f6b9SOr Gerlitz case MLX5_HEADER_TYPE_VXLAN: 942ce99f6b9SOr Gerlitz fl6.flowi6_proto = IPPROTO_UDP; 943ce99f6b9SOr Gerlitz fl6.fl6_dport = tun_key->tp_dst; 944ce99f6b9SOr Gerlitz break; 945ce99f6b9SOr Gerlitz default: 946ce99f6b9SOr Gerlitz err = -EOPNOTSUPP; 947ce99f6b9SOr Gerlitz goto out; 948ce99f6b9SOr Gerlitz } 949ce99f6b9SOr Gerlitz 950ce99f6b9SOr Gerlitz fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); 951ce99f6b9SOr Gerlitz fl6.daddr = tun_key->u.ipv6.dst; 952ce99f6b9SOr Gerlitz fl6.saddr = tun_key->u.ipv6.src; 953ce99f6b9SOr Gerlitz 954ce99f6b9SOr Gerlitz err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev, 955ce99f6b9SOr Gerlitz &fl6, &n, &ttl); 956ce99f6b9SOr Gerlitz if (err) 957ce99f6b9SOr Gerlitz goto out; 958ce99f6b9SOr Gerlitz 959ce99f6b9SOr Gerlitz if (!(n->nud_state & NUD_VALID)) { 960ce99f6b9SOr Gerlitz pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr); 961ce99f6b9SOr Gerlitz err = -EOPNOTSUPP; 962ce99f6b9SOr Gerlitz goto out; 963ce99f6b9SOr Gerlitz } 964ce99f6b9SOr Gerlitz 965ce99f6b9SOr Gerlitz e->n = n; 966ce99f6b9SOr Gerlitz e->out_dev = *out_dev; 967ce99f6b9SOr Gerlitz 968ce99f6b9SOr Gerlitz neigh_ha_snapshot(e->h_dest, n, *out_dev); 969ce99f6b9SOr Gerlitz 970ce99f6b9SOr Gerlitz switch (e->tunnel_type) { 971ce99f6b9SOr Gerlitz case MLX5_HEADER_TYPE_VXLAN: 972ce99f6b9SOr Gerlitz encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header, 973ce99f6b9SOr Gerlitz e->h_dest, ttl, 974ce99f6b9SOr Gerlitz &fl6.daddr, 975ce99f6b9SOr Gerlitz &fl6.saddr, tun_key->tp_dst, 976ce99f6b9SOr Gerlitz tunnel_id_to_key32(tun_key->tun_id)); 977ce99f6b9SOr Gerlitz break; 978ce99f6b9SOr Gerlitz default: 979ce99f6b9SOr Gerlitz err = -EOPNOTSUPP; 980ce99f6b9SOr Gerlitz goto out; 981ce99f6b9SOr Gerlitz } 982ce99f6b9SOr Gerlitz 983ce99f6b9SOr Gerlitz err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, 984ce99f6b9SOr Gerlitz encap_size, encap_header, &e->encap_id); 985ce99f6b9SOr Gerlitz out: 986ce99f6b9SOr Gerlitz if (err && n) 987ce99f6b9SOr Gerlitz neigh_release(n); 988ce99f6b9SOr Gerlitz kfree(encap_header); 989ce99f6b9SOr Gerlitz return err; 990ce99f6b9SOr Gerlitz } 991ce99f6b9SOr Gerlitz 992a54e20b4SHadar Hen Zion static int mlx5e_attach_encap(struct mlx5e_priv *priv, 993a54e20b4SHadar Hen Zion struct ip_tunnel_info *tun_info, 994a54e20b4SHadar Hen Zion struct net_device *mirred_dev, 995776b12b6SOr Gerlitz struct mlx5_esw_flow_attr *attr) 99603a9d11eSOr Gerlitz { 997a54e20b4SHadar Hen Zion struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 998a54e20b4SHadar Hen Zion unsigned short family = ip_tunnel_info_af(tun_info); 999a54e20b4SHadar Hen Zion struct ip_tunnel_key *key = &tun_info->key; 1000a54e20b4SHadar Hen Zion struct mlx5_encap_entry *e; 1001a54e20b4SHadar Hen Zion struct net_device *out_dev; 1002ce99f6b9SOr Gerlitz int tunnel_type, err = -EOPNOTSUPP; 1003a54e20b4SHadar Hen Zion uintptr_t hash_key; 1004a54e20b4SHadar Hen Zion bool found = false; 1005a54e20b4SHadar Hen Zion 10062fcd82e9SOr Gerlitz /* udp dst port must be set */ 1007a54e20b4SHadar Hen Zion if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst))) 10082fcd82e9SOr Gerlitz goto vxlan_encap_offload_err; 1009a54e20b4SHadar Hen Zion 1010cd377663SOr Gerlitz /* setting udp src port isn't supported */ 10112fcd82e9SOr Gerlitz if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) { 10122fcd82e9SOr Gerlitz vxlan_encap_offload_err: 10132fcd82e9SOr Gerlitz netdev_warn(priv->netdev, 10142fcd82e9SOr Gerlitz "must set udp dst port and not set udp src port\n"); 1015cd377663SOr Gerlitz return -EOPNOTSUPP; 10162fcd82e9SOr Gerlitz } 1017cd377663SOr Gerlitz 1018a54e20b4SHadar Hen Zion if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && 1019a54e20b4SHadar Hen Zion MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { 1020a54e20b4SHadar Hen Zion tunnel_type = MLX5_HEADER_TYPE_VXLAN; 1021a54e20b4SHadar Hen Zion } else { 10222fcd82e9SOr Gerlitz netdev_warn(priv->netdev, 10232fcd82e9SOr Gerlitz "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst)); 1024a54e20b4SHadar Hen Zion return -EOPNOTSUPP; 1025a54e20b4SHadar Hen Zion } 1026a54e20b4SHadar Hen Zion 102776f7444dSOr Gerlitz hash_key = hash_encap_info(key); 1028a54e20b4SHadar Hen Zion 1029a54e20b4SHadar Hen Zion hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, 1030a54e20b4SHadar Hen Zion encap_hlist, hash_key) { 103176f7444dSOr Gerlitz if (!cmp_encap_info(&e->tun_info.key, key)) { 1032a54e20b4SHadar Hen Zion found = true; 1033a54e20b4SHadar Hen Zion break; 1034a54e20b4SHadar Hen Zion } 1035a54e20b4SHadar Hen Zion } 1036a54e20b4SHadar Hen Zion 1037a54e20b4SHadar Hen Zion if (found) { 1038a54e20b4SHadar Hen Zion attr->encap = e; 1039a54e20b4SHadar Hen Zion return 0; 1040a54e20b4SHadar Hen Zion } 1041a54e20b4SHadar Hen Zion 1042a54e20b4SHadar Hen Zion e = kzalloc(sizeof(*e), GFP_KERNEL); 1043a54e20b4SHadar Hen Zion if (!e) 1044a54e20b4SHadar Hen Zion return -ENOMEM; 1045a54e20b4SHadar Hen Zion 104676f7444dSOr Gerlitz e->tun_info = *tun_info; 1047a54e20b4SHadar Hen Zion e->tunnel_type = tunnel_type; 1048a54e20b4SHadar Hen Zion INIT_LIST_HEAD(&e->flows); 1049a54e20b4SHadar Hen Zion 1050ce99f6b9SOr Gerlitz if (family == AF_INET) 1051a54e20b4SHadar Hen Zion err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev); 1052ce99f6b9SOr Gerlitz else if (family == AF_INET6) 1053ce99f6b9SOr Gerlitz err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev); 1054ce99f6b9SOr Gerlitz 1055a54e20b4SHadar Hen Zion if (err) 1056a54e20b4SHadar Hen Zion goto out_err; 1057a54e20b4SHadar Hen Zion 1058a54e20b4SHadar Hen Zion attr->encap = e; 1059a54e20b4SHadar Hen Zion hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); 1060a54e20b4SHadar Hen Zion 1061a54e20b4SHadar Hen Zion return err; 1062a54e20b4SHadar Hen Zion 1063a54e20b4SHadar Hen Zion out_err: 1064a54e20b4SHadar Hen Zion kfree(e); 1065a54e20b4SHadar Hen Zion return err; 1066a54e20b4SHadar Hen Zion } 1067a54e20b4SHadar Hen Zion 1068a54e20b4SHadar Hen Zion static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, 1069a54e20b4SHadar Hen Zion struct mlx5e_tc_flow *flow) 1070a54e20b4SHadar Hen Zion { 1071a54e20b4SHadar Hen Zion struct mlx5_esw_flow_attr *attr = flow->attr; 1072a54e20b4SHadar Hen Zion struct ip_tunnel_info *info = NULL; 107303a9d11eSOr Gerlitz const struct tc_action *a; 107422dc13c8SWANG Cong LIST_HEAD(actions); 1075a54e20b4SHadar Hen Zion bool encap = false; 1076a54e20b4SHadar Hen Zion int err; 107703a9d11eSOr Gerlitz 107803a9d11eSOr Gerlitz if (tc_no_actions(exts)) 107903a9d11eSOr Gerlitz return -EINVAL; 108003a9d11eSOr Gerlitz 1081776b12b6SOr Gerlitz memset(attr, 0, sizeof(*attr)); 1082776b12b6SOr Gerlitz attr->in_rep = priv->ppriv; 108303a9d11eSOr Gerlitz 108422dc13c8SWANG Cong tcf_exts_to_list(exts, &actions); 108522dc13c8SWANG Cong list_for_each_entry(a, &actions, list) { 108603a9d11eSOr Gerlitz if (is_tcf_gact_shot(a)) { 10878b32580dSOr Gerlitz attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP | 108803a9d11eSOr Gerlitz MLX5_FLOW_CONTEXT_ACTION_COUNT; 108903a9d11eSOr Gerlitz continue; 109003a9d11eSOr Gerlitz } 109103a9d11eSOr Gerlitz 10925724b8b5SShmulik Ladkani if (is_tcf_mirred_egress_redirect(a)) { 109303a9d11eSOr Gerlitz int ifindex = tcf_mirred_ifindex(a); 109403a9d11eSOr Gerlitz struct net_device *out_dev; 109503a9d11eSOr Gerlitz struct mlx5e_priv *out_priv; 109603a9d11eSOr Gerlitz 109703a9d11eSOr Gerlitz out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); 109803a9d11eSOr Gerlitz 1099a54e20b4SHadar Hen Zion if (switchdev_port_same_parent_id(priv->netdev, 1100a54e20b4SHadar Hen Zion out_dev)) { 1101e37a79e5SMark Bloch attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1102e37a79e5SMark Bloch MLX5_FLOW_CONTEXT_ACTION_COUNT; 110303a9d11eSOr Gerlitz out_priv = netdev_priv(out_dev); 1104776b12b6SOr Gerlitz attr->out_rep = out_priv->ppriv; 1105a54e20b4SHadar Hen Zion } else if (encap) { 1106a54e20b4SHadar Hen Zion err = mlx5e_attach_encap(priv, info, 1107a54e20b4SHadar Hen Zion out_dev, attr); 1108a54e20b4SHadar Hen Zion if (err) 1109a54e20b4SHadar Hen Zion return err; 1110a54e20b4SHadar Hen Zion list_add(&flow->encap, &attr->encap->flows); 1111a54e20b4SHadar Hen Zion attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | 1112a54e20b4SHadar Hen Zion MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1113a54e20b4SHadar Hen Zion MLX5_FLOW_CONTEXT_ACTION_COUNT; 1114a54e20b4SHadar Hen Zion out_priv = netdev_priv(attr->encap->out_dev); 1115a54e20b4SHadar Hen Zion attr->out_rep = out_priv->ppriv; 1116a54e20b4SHadar Hen Zion } else { 1117a54e20b4SHadar Hen Zion pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", 1118a54e20b4SHadar Hen Zion priv->netdev->name, out_dev->name); 1119a54e20b4SHadar Hen Zion return -EINVAL; 1120a54e20b4SHadar Hen Zion } 1121a54e20b4SHadar Hen Zion continue; 1122a54e20b4SHadar Hen Zion } 1123a54e20b4SHadar Hen Zion 1124a54e20b4SHadar Hen Zion if (is_tcf_tunnel_set(a)) { 1125a54e20b4SHadar Hen Zion info = tcf_tunnel_info(a); 1126a54e20b4SHadar Hen Zion if (info) 1127a54e20b4SHadar Hen Zion encap = true; 1128a54e20b4SHadar Hen Zion else 1129a54e20b4SHadar Hen Zion return -EOPNOTSUPP; 113003a9d11eSOr Gerlitz continue; 113103a9d11eSOr Gerlitz } 113203a9d11eSOr Gerlitz 11338b32580dSOr Gerlitz if (is_tcf_vlan(a)) { 113409c91ddfSOr Gerlitz if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { 11358b32580dSOr Gerlitz attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; 113609c91ddfSOr Gerlitz } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { 11378b32580dSOr Gerlitz if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) 11388b32580dSOr Gerlitz return -EOPNOTSUPP; 11398b32580dSOr Gerlitz 11408b32580dSOr Gerlitz attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; 11418b32580dSOr Gerlitz attr->vlan = tcf_vlan_push_vid(a); 114209c91ddfSOr Gerlitz } else { /* action is TCA_VLAN_ACT_MODIFY */ 114309c91ddfSOr Gerlitz return -EOPNOTSUPP; 11448b32580dSOr Gerlitz } 11458b32580dSOr Gerlitz continue; 11468b32580dSOr Gerlitz } 11478b32580dSOr Gerlitz 1148bbd00f7eSHadar Hen Zion if (is_tcf_tunnel_release(a)) { 1149bbd00f7eSHadar Hen Zion attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 1150bbd00f7eSHadar Hen Zion continue; 1151bbd00f7eSHadar Hen Zion } 1152bbd00f7eSHadar Hen Zion 115303a9d11eSOr Gerlitz return -EINVAL; 115403a9d11eSOr Gerlitz } 115503a9d11eSOr Gerlitz return 0; 115603a9d11eSOr Gerlitz } 115703a9d11eSOr Gerlitz 1158e3a2b7edSAmir Vadai int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, 1159e3a2b7edSAmir Vadai struct tc_cls_flower_offload *f) 1160e3a2b7edSAmir Vadai { 1161acff797cSMaor Gottlieb struct mlx5e_tc_table *tc = &priv->fs.tc; 116265ba8fb7SOr Gerlitz int err, attr_size = 0; 1163776b12b6SOr Gerlitz u32 flow_tag, action; 1164e3a2b7edSAmir Vadai struct mlx5e_tc_flow *flow; 1165c5bb1730SMaor Gottlieb struct mlx5_flow_spec *spec; 1166adb4c123SOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 116765ba8fb7SOr Gerlitz u8 flow_flags = 0; 1168e3a2b7edSAmir Vadai 116965ba8fb7SOr Gerlitz if (esw && esw->mode == SRIOV_OFFLOADS) { 117065ba8fb7SOr Gerlitz flow_flags = MLX5E_TC_FLOW_ESWITCH; 117165ba8fb7SOr Gerlitz attr_size = sizeof(struct mlx5_esw_flow_attr); 117265ba8fb7SOr Gerlitz } 1173776b12b6SOr Gerlitz 117465ba8fb7SOr Gerlitz flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL); 1175c5bb1730SMaor Gottlieb spec = mlx5_vzalloc(sizeof(*spec)); 1176c5bb1730SMaor Gottlieb if (!spec || !flow) { 1177e3a2b7edSAmir Vadai err = -ENOMEM; 1178e3a2b7edSAmir Vadai goto err_free; 1179e3a2b7edSAmir Vadai } 1180e3a2b7edSAmir Vadai 1181e3a2b7edSAmir Vadai flow->cookie = f->cookie; 118265ba8fb7SOr Gerlitz flow->flags = flow_flags; 1183e3a2b7edSAmir Vadai 118465ba8fb7SOr Gerlitz err = parse_cls_flower(priv, flow, spec, f); 1185e3a2b7edSAmir Vadai if (err < 0) 1186e3a2b7edSAmir Vadai goto err_free; 1187e3a2b7edSAmir Vadai 118865ba8fb7SOr Gerlitz if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { 1189776b12b6SOr Gerlitz flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1); 1190a54e20b4SHadar Hen Zion err = parse_tc_fdb_actions(priv, f->exts, flow); 1191adb4c123SOr Gerlitz if (err < 0) 1192adb4c123SOr Gerlitz goto err_free; 1193776b12b6SOr Gerlitz flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr); 1194adb4c123SOr Gerlitz } else { 11955c40348cSOr Gerlitz err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag); 1196e3a2b7edSAmir Vadai if (err < 0) 1197e3a2b7edSAmir Vadai goto err_free; 11985c40348cSOr Gerlitz flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag); 1199adb4c123SOr Gerlitz } 1200adb4c123SOr Gerlitz 12015c40348cSOr Gerlitz if (IS_ERR(flow->rule)) { 12025c40348cSOr Gerlitz err = PTR_ERR(flow->rule); 12035e86397aSOr Gerlitz goto err_del_rule; 12045c40348cSOr Gerlitz } 12055c40348cSOr Gerlitz 1206e3a2b7edSAmir Vadai err = rhashtable_insert_fast(&tc->ht, &flow->node, 1207e3a2b7edSAmir Vadai tc->ht_params); 1208e3a2b7edSAmir Vadai if (err) 12095c40348cSOr Gerlitz goto err_del_rule; 1210e3a2b7edSAmir Vadai 1211e3a2b7edSAmir Vadai goto out; 1212e3a2b7edSAmir Vadai 12135c40348cSOr Gerlitz err_del_rule: 12145e86397aSOr Gerlitz mlx5e_tc_del_flow(priv, flow); 1215e3a2b7edSAmir Vadai 1216e3a2b7edSAmir Vadai err_free: 1217e3a2b7edSAmir Vadai kfree(flow); 1218e3a2b7edSAmir Vadai out: 1219c5bb1730SMaor Gottlieb kvfree(spec); 1220e3a2b7edSAmir Vadai return err; 1221e3a2b7edSAmir Vadai } 1222e3a2b7edSAmir Vadai 1223e3a2b7edSAmir Vadai int mlx5e_delete_flower(struct mlx5e_priv *priv, 1224e3a2b7edSAmir Vadai struct tc_cls_flower_offload *f) 1225e3a2b7edSAmir Vadai { 1226e3a2b7edSAmir Vadai struct mlx5e_tc_flow *flow; 1227acff797cSMaor Gottlieb struct mlx5e_tc_table *tc = &priv->fs.tc; 1228e3a2b7edSAmir Vadai 1229e3a2b7edSAmir Vadai flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, 1230e3a2b7edSAmir Vadai tc->ht_params); 1231e3a2b7edSAmir Vadai if (!flow) 1232e3a2b7edSAmir Vadai return -EINVAL; 1233e3a2b7edSAmir Vadai 1234e3a2b7edSAmir Vadai rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params); 1235e3a2b7edSAmir Vadai 1236961e8979SRoi Dayan mlx5e_tc_del_flow(priv, flow); 1237e3a2b7edSAmir Vadai 1238a54e20b4SHadar Hen Zion 1239e3a2b7edSAmir Vadai kfree(flow); 1240e3a2b7edSAmir Vadai 1241e3a2b7edSAmir Vadai return 0; 1242e3a2b7edSAmir Vadai } 1243e3a2b7edSAmir Vadai 1244aad7e08dSAmir Vadai int mlx5e_stats_flower(struct mlx5e_priv *priv, 1245aad7e08dSAmir Vadai struct tc_cls_flower_offload *f) 1246aad7e08dSAmir Vadai { 1247aad7e08dSAmir Vadai struct mlx5e_tc_table *tc = &priv->fs.tc; 1248aad7e08dSAmir Vadai struct mlx5e_tc_flow *flow; 1249aad7e08dSAmir Vadai struct tc_action *a; 1250aad7e08dSAmir Vadai struct mlx5_fc *counter; 125122dc13c8SWANG Cong LIST_HEAD(actions); 1252aad7e08dSAmir Vadai u64 bytes; 1253aad7e08dSAmir Vadai u64 packets; 1254aad7e08dSAmir Vadai u64 lastuse; 1255aad7e08dSAmir Vadai 1256aad7e08dSAmir Vadai flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, 1257aad7e08dSAmir Vadai tc->ht_params); 1258aad7e08dSAmir Vadai if (!flow) 1259aad7e08dSAmir Vadai return -EINVAL; 1260aad7e08dSAmir Vadai 1261aad7e08dSAmir Vadai counter = mlx5_flow_rule_counter(flow->rule); 1262aad7e08dSAmir Vadai if (!counter) 1263aad7e08dSAmir Vadai return 0; 1264aad7e08dSAmir Vadai 1265aad7e08dSAmir Vadai mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 1266aad7e08dSAmir Vadai 1267fed06ee8SOr Gerlitz preempt_disable(); 1268fed06ee8SOr Gerlitz 126922dc13c8SWANG Cong tcf_exts_to_list(f->exts, &actions); 127022dc13c8SWANG Cong list_for_each_entry(a, &actions, list) 1271aad7e08dSAmir Vadai tcf_action_stats_update(a, bytes, packets, lastuse); 1272aad7e08dSAmir Vadai 1273fed06ee8SOr Gerlitz preempt_enable(); 1274fed06ee8SOr Gerlitz 1275aad7e08dSAmir Vadai return 0; 1276aad7e08dSAmir Vadai } 1277aad7e08dSAmir Vadai 1278e8f887acSAmir Vadai static const struct rhashtable_params mlx5e_tc_flow_ht_params = { 1279e8f887acSAmir Vadai .head_offset = offsetof(struct mlx5e_tc_flow, node), 1280e8f887acSAmir Vadai .key_offset = offsetof(struct mlx5e_tc_flow, cookie), 1281e8f887acSAmir Vadai .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie), 1282e8f887acSAmir Vadai .automatic_shrinking = true, 1283e8f887acSAmir Vadai }; 1284e8f887acSAmir Vadai 1285e8f887acSAmir Vadai int mlx5e_tc_init(struct mlx5e_priv *priv) 1286e8f887acSAmir Vadai { 1287acff797cSMaor Gottlieb struct mlx5e_tc_table *tc = &priv->fs.tc; 1288e8f887acSAmir Vadai 1289e8f887acSAmir Vadai tc->ht_params = mlx5e_tc_flow_ht_params; 1290e8f887acSAmir Vadai return rhashtable_init(&tc->ht, &tc->ht_params); 1291e8f887acSAmir Vadai } 1292e8f887acSAmir Vadai 1293e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg) 1294e8f887acSAmir Vadai { 1295e8f887acSAmir Vadai struct mlx5e_tc_flow *flow = ptr; 1296e8f887acSAmir Vadai struct mlx5e_priv *priv = arg; 1297e8f887acSAmir Vadai 1298961e8979SRoi Dayan mlx5e_tc_del_flow(priv, flow); 1299e8f887acSAmir Vadai kfree(flow); 1300e8f887acSAmir Vadai } 1301e8f887acSAmir Vadai 1302e8f887acSAmir Vadai void mlx5e_tc_cleanup(struct mlx5e_priv *priv) 1303e8f887acSAmir Vadai { 1304acff797cSMaor Gottlieb struct mlx5e_tc_table *tc = &priv->fs.tc; 1305e8f887acSAmir Vadai 1306e8f887acSAmir Vadai rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv); 1307e8f887acSAmir Vadai 1308acff797cSMaor Gottlieb if (!IS_ERR_OR_NULL(tc->t)) { 1309acff797cSMaor Gottlieb mlx5_destroy_flow_table(tc->t); 1310acff797cSMaor Gottlieb tc->t = NULL; 1311e8f887acSAmir Vadai } 1312e8f887acSAmir Vadai } 1313