1e8f887acSAmir Vadai /* 2e8f887acSAmir Vadai * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3e8f887acSAmir Vadai * 4e8f887acSAmir Vadai * This software is available to you under a choice of one of two 5e8f887acSAmir Vadai * licenses. You may choose to be licensed under the terms of the GNU 6e8f887acSAmir Vadai * General Public License (GPL) Version 2, available from the file 7e8f887acSAmir Vadai * COPYING in the main directory of this source tree, or the 8e8f887acSAmir Vadai * OpenIB.org BSD license below: 9e8f887acSAmir Vadai * 10e8f887acSAmir Vadai * Redistribution and use in source and binary forms, with or 11e8f887acSAmir Vadai * without modification, are permitted provided that the following 12e8f887acSAmir Vadai * conditions are met: 13e8f887acSAmir Vadai * 14e8f887acSAmir Vadai * - Redistributions of source code must retain the above 15e8f887acSAmir Vadai * copyright notice, this list of conditions and the following 16e8f887acSAmir Vadai * disclaimer. 17e8f887acSAmir Vadai * 18e8f887acSAmir Vadai * - Redistributions in binary form must reproduce the above 19e8f887acSAmir Vadai * copyright notice, this list of conditions and the following 20e8f887acSAmir Vadai * disclaimer in the documentation and/or other materials 21e8f887acSAmir Vadai * provided with the distribution. 22e8f887acSAmir Vadai * 23e8f887acSAmir Vadai * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24e8f887acSAmir Vadai * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25e8f887acSAmir Vadai * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26e8f887acSAmir Vadai * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27e8f887acSAmir Vadai * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28e8f887acSAmir Vadai * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29e8f887acSAmir Vadai * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30e8f887acSAmir Vadai * SOFTWARE. 31e8f887acSAmir Vadai */ 32e8f887acSAmir Vadai 33e3a2b7edSAmir Vadai #include <net/flow_dissector.h> 34e3a2b7edSAmir Vadai #include <net/pkt_cls.h> 35e3a2b7edSAmir Vadai #include <net/tc_act/tc_gact.h> 3612185a9fSAmir Vadai #include <net/tc_act/tc_skbedit.h> 37e8f887acSAmir Vadai #include <linux/mlx5/fs.h> 38e8f887acSAmir Vadai #include <linux/mlx5/device.h> 39e8f887acSAmir Vadai #include <linux/rhashtable.h> 4003a9d11eSOr Gerlitz #include <net/switchdev.h> 4103a9d11eSOr Gerlitz #include <net/tc_act/tc_mirred.h> 42776b12b6SOr Gerlitz #include <net/tc_act/tc_vlan.h> 43bbd00f7eSHadar Hen Zion #include <net/tc_act/tc_tunnel_key.h> 44e8f887acSAmir Vadai #include "en.h" 45e8f887acSAmir Vadai #include "en_tc.h" 4603a9d11eSOr Gerlitz #include "eswitch.h" 47bbd00f7eSHadar Hen Zion #include "vxlan.h" 48e8f887acSAmir Vadai 49e8f887acSAmir Vadai struct mlx5e_tc_flow { 50e8f887acSAmir Vadai struct rhash_head node; 51e8f887acSAmir Vadai u64 cookie; 5274491de9SMark Bloch struct mlx5_flow_handle *rule; 53776b12b6SOr Gerlitz struct mlx5_esw_flow_attr *attr; 54e8f887acSAmir Vadai }; 55e8f887acSAmir Vadai 56acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_ENTRIES 1024 57acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4 58e8f887acSAmir Vadai 5974491de9SMark Bloch static struct mlx5_flow_handle * 6074491de9SMark Bloch mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, 61c5bb1730SMaor Gottlieb struct mlx5_flow_spec *spec, 62e8f887acSAmir Vadai u32 action, u32 flow_tag) 63e8f887acSAmir Vadai { 64aad7e08dSAmir Vadai struct mlx5_core_dev *dev = priv->mdev; 65aad7e08dSAmir Vadai struct mlx5_flow_destination dest = { 0 }; 6666958ed9SHadar Hen Zion struct mlx5_flow_act flow_act = { 6766958ed9SHadar Hen Zion .action = action, 6866958ed9SHadar Hen Zion .flow_tag = flow_tag, 6966958ed9SHadar Hen Zion .encap_id = 0, 7066958ed9SHadar Hen Zion }; 71aad7e08dSAmir Vadai struct mlx5_fc *counter = NULL; 7274491de9SMark Bloch struct mlx5_flow_handle *rule; 73e8f887acSAmir Vadai bool table_created = false; 74e8f887acSAmir Vadai 75aad7e08dSAmir Vadai if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 76aad7e08dSAmir Vadai dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 77aad7e08dSAmir Vadai dest.ft = priv->fs.vlan.ft.t; 7855130287SOr Gerlitz } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 79aad7e08dSAmir Vadai counter = mlx5_fc_create(dev, true); 80aad7e08dSAmir Vadai if (IS_ERR(counter)) 81aad7e08dSAmir Vadai return ERR_CAST(counter); 82aad7e08dSAmir Vadai 83aad7e08dSAmir Vadai dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 84aad7e08dSAmir Vadai dest.counter = counter; 85aad7e08dSAmir Vadai } 86aad7e08dSAmir Vadai 87acff797cSMaor Gottlieb if (IS_ERR_OR_NULL(priv->fs.tc.t)) { 88acff797cSMaor Gottlieb priv->fs.tc.t = 89acff797cSMaor Gottlieb mlx5_create_auto_grouped_flow_table(priv->fs.ns, 90acff797cSMaor Gottlieb MLX5E_TC_PRIO, 91acff797cSMaor Gottlieb MLX5E_TC_TABLE_NUM_ENTRIES, 92acff797cSMaor Gottlieb MLX5E_TC_TABLE_NUM_GROUPS, 93c9f1b073SHadar Hen Zion 0, 0); 94acff797cSMaor Gottlieb if (IS_ERR(priv->fs.tc.t)) { 95e8f887acSAmir Vadai netdev_err(priv->netdev, 96e8f887acSAmir Vadai "Failed to create tc offload table\n"); 97aad7e08dSAmir Vadai rule = ERR_CAST(priv->fs.tc.t); 98aad7e08dSAmir Vadai goto err_create_ft; 99e8f887acSAmir Vadai } 100e8f887acSAmir Vadai 101e8f887acSAmir Vadai table_created = true; 102e8f887acSAmir Vadai } 103e8f887acSAmir Vadai 104c5bb1730SMaor Gottlieb spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 10566958ed9SHadar Hen Zion rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1); 106e8f887acSAmir Vadai 107aad7e08dSAmir Vadai if (IS_ERR(rule)) 108aad7e08dSAmir Vadai goto err_add_rule; 109aad7e08dSAmir Vadai 110aad7e08dSAmir Vadai return rule; 111aad7e08dSAmir Vadai 112aad7e08dSAmir Vadai err_add_rule: 113aad7e08dSAmir Vadai if (table_created) { 114acff797cSMaor Gottlieb mlx5_destroy_flow_table(priv->fs.tc.t); 115acff797cSMaor Gottlieb priv->fs.tc.t = NULL; 116e8f887acSAmir Vadai } 117aad7e08dSAmir Vadai err_create_ft: 118aad7e08dSAmir Vadai mlx5_fc_destroy(dev, counter); 119e8f887acSAmir Vadai 120e8f887acSAmir Vadai return rule; 121e8f887acSAmir Vadai } 122e8f887acSAmir Vadai 12374491de9SMark Bloch static struct mlx5_flow_handle * 12474491de9SMark Bloch mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 125adb4c123SOr Gerlitz struct mlx5_flow_spec *spec, 126776b12b6SOr Gerlitz struct mlx5_esw_flow_attr *attr) 127adb4c123SOr Gerlitz { 128adb4c123SOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1298b32580dSOr Gerlitz int err; 1308b32580dSOr Gerlitz 1318b32580dSOr Gerlitz err = mlx5_eswitch_add_vlan_action(esw, attr); 1328b32580dSOr Gerlitz if (err) 1338b32580dSOr Gerlitz return ERR_PTR(err); 134adb4c123SOr Gerlitz 135776b12b6SOr Gerlitz return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 136adb4c123SOr Gerlitz } 137adb4c123SOr Gerlitz 138e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 13974491de9SMark Bloch struct mlx5_flow_handle *rule, 1408b32580dSOr Gerlitz struct mlx5_esw_flow_attr *attr) 141e8f887acSAmir Vadai { 1428b32580dSOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 143aad7e08dSAmir Vadai struct mlx5_fc *counter = NULL; 144aad7e08dSAmir Vadai 145aad7e08dSAmir Vadai counter = mlx5_flow_rule_counter(rule); 146aad7e08dSAmir Vadai 1478b32580dSOr Gerlitz if (esw && esw->mode == SRIOV_OFFLOADS) 1488b32580dSOr Gerlitz mlx5_eswitch_del_vlan_action(esw, attr); 1498b32580dSOr Gerlitz 15074491de9SMark Bloch mlx5_del_flow_rules(rule); 151e8f887acSAmir Vadai 152aad7e08dSAmir Vadai mlx5_fc_destroy(priv->mdev, counter); 153aad7e08dSAmir Vadai 1545c40348cSOr Gerlitz if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { 155acff797cSMaor Gottlieb mlx5_destroy_flow_table(priv->fs.tc.t); 156acff797cSMaor Gottlieb priv->fs.tc.t = NULL; 157e8f887acSAmir Vadai } 158e8f887acSAmir Vadai } 159e8f887acSAmir Vadai 160bbd00f7eSHadar Hen Zion static void parse_vxlan_attr(struct mlx5_flow_spec *spec, 161bbd00f7eSHadar Hen Zion struct tc_cls_flower_offload *f) 162bbd00f7eSHadar Hen Zion { 163bbd00f7eSHadar Hen Zion void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 164bbd00f7eSHadar Hen Zion outer_headers); 165bbd00f7eSHadar Hen Zion void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 166bbd00f7eSHadar Hen Zion outer_headers); 167bbd00f7eSHadar Hen Zion void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 168bbd00f7eSHadar Hen Zion misc_parameters); 169bbd00f7eSHadar Hen Zion void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 170bbd00f7eSHadar Hen Zion misc_parameters); 171bbd00f7eSHadar Hen Zion 172bbd00f7eSHadar Hen Zion MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); 173bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); 174bbd00f7eSHadar Hen Zion 175bbd00f7eSHadar Hen Zion if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 176bbd00f7eSHadar Hen Zion struct flow_dissector_key_keyid *key = 177bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 178bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_KEYID, 179bbd00f7eSHadar Hen Zion f->key); 180bbd00f7eSHadar Hen Zion struct flow_dissector_key_keyid *mask = 181bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 182bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_KEYID, 183bbd00f7eSHadar Hen Zion f->mask); 184bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni, 185bbd00f7eSHadar Hen Zion be32_to_cpu(mask->keyid)); 186bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, 187bbd00f7eSHadar Hen Zion be32_to_cpu(key->keyid)); 188bbd00f7eSHadar Hen Zion } 189bbd00f7eSHadar Hen Zion } 190bbd00f7eSHadar Hen Zion 191bbd00f7eSHadar Hen Zion static int parse_tunnel_attr(struct mlx5e_priv *priv, 192bbd00f7eSHadar Hen Zion struct mlx5_flow_spec *spec, 193bbd00f7eSHadar Hen Zion struct tc_cls_flower_offload *f) 194bbd00f7eSHadar Hen Zion { 195bbd00f7eSHadar Hen Zion void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 196bbd00f7eSHadar Hen Zion outer_headers); 197bbd00f7eSHadar Hen Zion void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 198bbd00f7eSHadar Hen Zion outer_headers); 199bbd00f7eSHadar Hen Zion 200bbd00f7eSHadar Hen Zion if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { 201bbd00f7eSHadar Hen Zion struct flow_dissector_key_ports *key = 202bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 203bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_PORTS, 204bbd00f7eSHadar Hen Zion f->key); 205bbd00f7eSHadar Hen Zion struct flow_dissector_key_ports *mask = 206bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 207bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_PORTS, 208bbd00f7eSHadar Hen Zion f->mask); 209bbd00f7eSHadar Hen Zion 210bbd00f7eSHadar Hen Zion /* Full udp dst port must be given */ 211bbd00f7eSHadar Hen Zion if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) 212bbd00f7eSHadar Hen Zion return -EOPNOTSUPP; 213bbd00f7eSHadar Hen Zion 214bbd00f7eSHadar Hen Zion /* udp src port isn't supported */ 215bbd00f7eSHadar Hen Zion if (memchr_inv(&mask->src, 0, sizeof(mask->src))) 216bbd00f7eSHadar Hen Zion return -EOPNOTSUPP; 217bbd00f7eSHadar Hen Zion 218bbd00f7eSHadar Hen Zion if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && 219bbd00f7eSHadar Hen Zion MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) 220bbd00f7eSHadar Hen Zion parse_vxlan_attr(spec, f); 221bbd00f7eSHadar Hen Zion else 222bbd00f7eSHadar Hen Zion return -EOPNOTSUPP; 223bbd00f7eSHadar Hen Zion 224bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_c, 225bbd00f7eSHadar Hen Zion udp_dport, ntohs(mask->dst)); 226bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, 227bbd00f7eSHadar Hen Zion udp_dport, ntohs(key->dst)); 228bbd00f7eSHadar Hen Zion 229bbd00f7eSHadar Hen Zion } else { /* udp dst port must be given */ 230bbd00f7eSHadar Hen Zion return -EOPNOTSUPP; 231bbd00f7eSHadar Hen Zion } 232bbd00f7eSHadar Hen Zion 233bbd00f7eSHadar Hen Zion if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { 234bbd00f7eSHadar Hen Zion struct flow_dissector_key_ipv4_addrs *key = 235bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 236bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 237bbd00f7eSHadar Hen Zion f->key); 238bbd00f7eSHadar Hen Zion struct flow_dissector_key_ipv4_addrs *mask = 239bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 240bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 241bbd00f7eSHadar Hen Zion f->mask); 242bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_c, 243bbd00f7eSHadar Hen Zion src_ipv4_src_ipv6.ipv4_layout.ipv4, 244bbd00f7eSHadar Hen Zion ntohl(mask->src)); 245bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, 246bbd00f7eSHadar Hen Zion src_ipv4_src_ipv6.ipv4_layout.ipv4, 247bbd00f7eSHadar Hen Zion ntohl(key->src)); 248bbd00f7eSHadar Hen Zion 249bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_c, 250bbd00f7eSHadar Hen Zion dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 251bbd00f7eSHadar Hen Zion ntohl(mask->dst)); 252bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, 253bbd00f7eSHadar Hen Zion dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 254bbd00f7eSHadar Hen Zion ntohl(key->dst)); 255bbd00f7eSHadar Hen Zion } 256bbd00f7eSHadar Hen Zion 257bbd00f7eSHadar Hen Zion MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); 258bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); 259bbd00f7eSHadar Hen Zion 260bbd00f7eSHadar Hen Zion /* Enforce DMAC when offloading incoming tunneled flows. 261bbd00f7eSHadar Hen Zion * Flow counters require a match on the DMAC. 262bbd00f7eSHadar Hen Zion */ 263bbd00f7eSHadar Hen Zion MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16); 264bbd00f7eSHadar Hen Zion MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0); 265bbd00f7eSHadar Hen Zion ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 266bbd00f7eSHadar Hen Zion dmac_47_16), priv->netdev->dev_addr); 267bbd00f7eSHadar Hen Zion 268bbd00f7eSHadar Hen Zion /* let software handle IP fragments */ 269bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 270bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); 271bbd00f7eSHadar Hen Zion 272bbd00f7eSHadar Hen Zion return 0; 273bbd00f7eSHadar Hen Zion } 274bbd00f7eSHadar Hen Zion 275c5bb1730SMaor Gottlieb static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, 276e3a2b7edSAmir Vadai struct tc_cls_flower_offload *f) 277e3a2b7edSAmir Vadai { 278c5bb1730SMaor Gottlieb void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 279c5bb1730SMaor Gottlieb outer_headers); 280c5bb1730SMaor Gottlieb void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 281c5bb1730SMaor Gottlieb outer_headers); 282e3a2b7edSAmir Vadai u16 addr_type = 0; 283e3a2b7edSAmir Vadai u8 ip_proto = 0; 284e3a2b7edSAmir Vadai 285e3a2b7edSAmir Vadai if (f->dissector->used_keys & 286e3a2b7edSAmir Vadai ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 287e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_BASIC) | 288e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 289095b6cfdSOr Gerlitz BIT(FLOW_DISSECTOR_KEY_VLAN) | 290e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 291e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 292bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_PORTS) | 293bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | 294bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 295bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | 296bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | 297bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) { 298e3a2b7edSAmir Vadai netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", 299e3a2b7edSAmir Vadai f->dissector->used_keys); 300e3a2b7edSAmir Vadai return -EOPNOTSUPP; 301e3a2b7edSAmir Vadai } 302e3a2b7edSAmir Vadai 303bbd00f7eSHadar Hen Zion if ((dissector_uses_key(f->dissector, 304bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) || 305bbd00f7eSHadar Hen Zion dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) || 306bbd00f7eSHadar Hen Zion dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) && 307bbd00f7eSHadar Hen Zion dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 308bbd00f7eSHadar Hen Zion struct flow_dissector_key_control *key = 309bbd00f7eSHadar Hen Zion skb_flow_dissector_target(f->dissector, 310bbd00f7eSHadar Hen Zion FLOW_DISSECTOR_KEY_ENC_CONTROL, 311bbd00f7eSHadar Hen Zion f->key); 312bbd00f7eSHadar Hen Zion switch (key->addr_type) { 313bbd00f7eSHadar Hen Zion case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 314bbd00f7eSHadar Hen Zion if (parse_tunnel_attr(priv, spec, f)) 315bbd00f7eSHadar Hen Zion return -EOPNOTSUPP; 316bbd00f7eSHadar Hen Zion break; 317bbd00f7eSHadar Hen Zion default: 318bbd00f7eSHadar Hen Zion return -EOPNOTSUPP; 319bbd00f7eSHadar Hen Zion } 320bbd00f7eSHadar Hen Zion 321bbd00f7eSHadar Hen Zion /* In decap flow, header pointers should point to the inner 322bbd00f7eSHadar Hen Zion * headers, outer header were already set by parse_tunnel_attr 323bbd00f7eSHadar Hen Zion */ 324bbd00f7eSHadar Hen Zion headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 325bbd00f7eSHadar Hen Zion inner_headers); 326bbd00f7eSHadar Hen Zion headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 327bbd00f7eSHadar Hen Zion inner_headers); 328bbd00f7eSHadar Hen Zion } 329bbd00f7eSHadar Hen Zion 330e3a2b7edSAmir Vadai if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 331e3a2b7edSAmir Vadai struct flow_dissector_key_control *key = 332e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 3331dbd0d37SHadar Hen Zion FLOW_DISSECTOR_KEY_CONTROL, 334e3a2b7edSAmir Vadai f->key); 335e3a2b7edSAmir Vadai addr_type = key->addr_type; 336e3a2b7edSAmir Vadai } 337e3a2b7edSAmir Vadai 338e3a2b7edSAmir Vadai if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 339e3a2b7edSAmir Vadai struct flow_dissector_key_basic *key = 340e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 341e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_BASIC, 342e3a2b7edSAmir Vadai f->key); 343e3a2b7edSAmir Vadai struct flow_dissector_key_basic *mask = 344e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 345e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_BASIC, 346e3a2b7edSAmir Vadai f->mask); 347e3a2b7edSAmir Vadai ip_proto = key->ip_proto; 348e3a2b7edSAmir Vadai 349e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, 350e3a2b7edSAmir Vadai ntohs(mask->n_proto)); 351e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 352e3a2b7edSAmir Vadai ntohs(key->n_proto)); 353e3a2b7edSAmir Vadai 354e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 355e3a2b7edSAmir Vadai mask->ip_proto); 356e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 357e3a2b7edSAmir Vadai key->ip_proto); 358e3a2b7edSAmir Vadai } 359e3a2b7edSAmir Vadai 360e3a2b7edSAmir Vadai if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 361e3a2b7edSAmir Vadai struct flow_dissector_key_eth_addrs *key = 362e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 363e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_ETH_ADDRS, 364e3a2b7edSAmir Vadai f->key); 365e3a2b7edSAmir Vadai struct flow_dissector_key_eth_addrs *mask = 366e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 367e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_ETH_ADDRS, 368e3a2b7edSAmir Vadai f->mask); 369e3a2b7edSAmir Vadai 370e3a2b7edSAmir Vadai ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 371e3a2b7edSAmir Vadai dmac_47_16), 372e3a2b7edSAmir Vadai mask->dst); 373e3a2b7edSAmir Vadai ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 374e3a2b7edSAmir Vadai dmac_47_16), 375e3a2b7edSAmir Vadai key->dst); 376e3a2b7edSAmir Vadai 377e3a2b7edSAmir Vadai ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 378e3a2b7edSAmir Vadai smac_47_16), 379e3a2b7edSAmir Vadai mask->src); 380e3a2b7edSAmir Vadai ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 381e3a2b7edSAmir Vadai smac_47_16), 382e3a2b7edSAmir Vadai key->src); 383e3a2b7edSAmir Vadai } 384e3a2b7edSAmir Vadai 385095b6cfdSOr Gerlitz if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { 386095b6cfdSOr Gerlitz struct flow_dissector_key_vlan *key = 387095b6cfdSOr Gerlitz skb_flow_dissector_target(f->dissector, 388095b6cfdSOr Gerlitz FLOW_DISSECTOR_KEY_VLAN, 389095b6cfdSOr Gerlitz f->key); 390095b6cfdSOr Gerlitz struct flow_dissector_key_vlan *mask = 391095b6cfdSOr Gerlitz skb_flow_dissector_target(f->dissector, 392095b6cfdSOr Gerlitz FLOW_DISSECTOR_KEY_VLAN, 393095b6cfdSOr Gerlitz f->mask); 394095b6cfdSOr Gerlitz if (mask->vlan_id) { 395095b6cfdSOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); 396095b6cfdSOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); 397095b6cfdSOr Gerlitz 398095b6cfdSOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); 399095b6cfdSOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); 400095b6cfdSOr Gerlitz } 401095b6cfdSOr Gerlitz } 402095b6cfdSOr Gerlitz 403e3a2b7edSAmir Vadai if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 404e3a2b7edSAmir Vadai struct flow_dissector_key_ipv4_addrs *key = 405e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 406e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_IPV4_ADDRS, 407e3a2b7edSAmir Vadai f->key); 408e3a2b7edSAmir Vadai struct flow_dissector_key_ipv4_addrs *mask = 409e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 410e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_IPV4_ADDRS, 411e3a2b7edSAmir Vadai f->mask); 412e3a2b7edSAmir Vadai 413e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 414e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv4_layout.ipv4), 415e3a2b7edSAmir Vadai &mask->src, sizeof(mask->src)); 416e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 417e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv4_layout.ipv4), 418e3a2b7edSAmir Vadai &key->src, sizeof(key->src)); 419e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 420e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 421e3a2b7edSAmir Vadai &mask->dst, sizeof(mask->dst)); 422e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 423e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 424e3a2b7edSAmir Vadai &key->dst, sizeof(key->dst)); 425e3a2b7edSAmir Vadai } 426e3a2b7edSAmir Vadai 427e3a2b7edSAmir Vadai if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 428e3a2b7edSAmir Vadai struct flow_dissector_key_ipv6_addrs *key = 429e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 430e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_IPV6_ADDRS, 431e3a2b7edSAmir Vadai f->key); 432e3a2b7edSAmir Vadai struct flow_dissector_key_ipv6_addrs *mask = 433e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 434e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_IPV6_ADDRS, 435e3a2b7edSAmir Vadai f->mask); 436e3a2b7edSAmir Vadai 437e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 438e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv6_layout.ipv6), 439e3a2b7edSAmir Vadai &mask->src, sizeof(mask->src)); 440e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 441e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv6_layout.ipv6), 442e3a2b7edSAmir Vadai &key->src, sizeof(key->src)); 443e3a2b7edSAmir Vadai 444e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 445e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 446e3a2b7edSAmir Vadai &mask->dst, sizeof(mask->dst)); 447e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 448e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 449e3a2b7edSAmir Vadai &key->dst, sizeof(key->dst)); 450e3a2b7edSAmir Vadai } 451e3a2b7edSAmir Vadai 452e3a2b7edSAmir Vadai if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { 453e3a2b7edSAmir Vadai struct flow_dissector_key_ports *key = 454e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 455e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_PORTS, 456e3a2b7edSAmir Vadai f->key); 457e3a2b7edSAmir Vadai struct flow_dissector_key_ports *mask = 458e3a2b7edSAmir Vadai skb_flow_dissector_target(f->dissector, 459e3a2b7edSAmir Vadai FLOW_DISSECTOR_KEY_PORTS, 460e3a2b7edSAmir Vadai f->mask); 461e3a2b7edSAmir Vadai switch (ip_proto) { 462e3a2b7edSAmir Vadai case IPPROTO_TCP: 463e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 464e3a2b7edSAmir Vadai tcp_sport, ntohs(mask->src)); 465e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 466e3a2b7edSAmir Vadai tcp_sport, ntohs(key->src)); 467e3a2b7edSAmir Vadai 468e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 469e3a2b7edSAmir Vadai tcp_dport, ntohs(mask->dst)); 470e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 471e3a2b7edSAmir Vadai tcp_dport, ntohs(key->dst)); 472e3a2b7edSAmir Vadai break; 473e3a2b7edSAmir Vadai 474e3a2b7edSAmir Vadai case IPPROTO_UDP: 475e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 476e3a2b7edSAmir Vadai udp_sport, ntohs(mask->src)); 477e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 478e3a2b7edSAmir Vadai udp_sport, ntohs(key->src)); 479e3a2b7edSAmir Vadai 480e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 481e3a2b7edSAmir Vadai udp_dport, ntohs(mask->dst)); 482e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 483e3a2b7edSAmir Vadai udp_dport, ntohs(key->dst)); 484e3a2b7edSAmir Vadai break; 485e3a2b7edSAmir Vadai default: 486e3a2b7edSAmir Vadai netdev_err(priv->netdev, 487e3a2b7edSAmir Vadai "Only UDP and TCP transport are supported\n"); 488e3a2b7edSAmir Vadai return -EINVAL; 489e3a2b7edSAmir Vadai } 490e3a2b7edSAmir Vadai } 491e3a2b7edSAmir Vadai 492e3a2b7edSAmir Vadai return 0; 493e3a2b7edSAmir Vadai } 494e3a2b7edSAmir Vadai 4955c40348cSOr Gerlitz static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, 496e3a2b7edSAmir Vadai u32 *action, u32 *flow_tag) 497e3a2b7edSAmir Vadai { 498e3a2b7edSAmir Vadai const struct tc_action *a; 49922dc13c8SWANG Cong LIST_HEAD(actions); 500e3a2b7edSAmir Vadai 501e3a2b7edSAmir Vadai if (tc_no_actions(exts)) 502e3a2b7edSAmir Vadai return -EINVAL; 503e3a2b7edSAmir Vadai 504e3a2b7edSAmir Vadai *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 505e3a2b7edSAmir Vadai *action = 0; 506e3a2b7edSAmir Vadai 50722dc13c8SWANG Cong tcf_exts_to_list(exts, &actions); 50822dc13c8SWANG Cong list_for_each_entry(a, &actions, list) { 509e3a2b7edSAmir Vadai /* Only support a single action per rule */ 510e3a2b7edSAmir Vadai if (*action) 511e3a2b7edSAmir Vadai return -EINVAL; 512e3a2b7edSAmir Vadai 513e3a2b7edSAmir Vadai if (is_tcf_gact_shot(a)) { 514e3a2b7edSAmir Vadai *action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 515aad7e08dSAmir Vadai if (MLX5_CAP_FLOWTABLE(priv->mdev, 516aad7e08dSAmir Vadai flow_table_properties_nic_receive.flow_counter)) 517aad7e08dSAmir Vadai *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 518e3a2b7edSAmir Vadai continue; 519e3a2b7edSAmir Vadai } 520e3a2b7edSAmir Vadai 521e3a2b7edSAmir Vadai if (is_tcf_skbedit_mark(a)) { 522e3a2b7edSAmir Vadai u32 mark = tcf_skbedit_mark(a); 523e3a2b7edSAmir Vadai 524e3a2b7edSAmir Vadai if (mark & ~MLX5E_TC_FLOW_ID_MASK) { 525e3a2b7edSAmir Vadai netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n", 526e3a2b7edSAmir Vadai mark); 527e3a2b7edSAmir Vadai return -EINVAL; 528e3a2b7edSAmir Vadai } 529e3a2b7edSAmir Vadai 530e3a2b7edSAmir Vadai *flow_tag = mark; 531e3a2b7edSAmir Vadai *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 532e3a2b7edSAmir Vadai continue; 533e3a2b7edSAmir Vadai } 534e3a2b7edSAmir Vadai 535e3a2b7edSAmir Vadai return -EINVAL; 536e3a2b7edSAmir Vadai } 537e3a2b7edSAmir Vadai 538e3a2b7edSAmir Vadai return 0; 539e3a2b7edSAmir Vadai } 540e3a2b7edSAmir Vadai 54103a9d11eSOr Gerlitz static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, 542776b12b6SOr Gerlitz struct mlx5_esw_flow_attr *attr) 54303a9d11eSOr Gerlitz { 54403a9d11eSOr Gerlitz const struct tc_action *a; 54522dc13c8SWANG Cong LIST_HEAD(actions); 54603a9d11eSOr Gerlitz 54703a9d11eSOr Gerlitz if (tc_no_actions(exts)) 54803a9d11eSOr Gerlitz return -EINVAL; 54903a9d11eSOr Gerlitz 550776b12b6SOr Gerlitz memset(attr, 0, sizeof(*attr)); 551776b12b6SOr Gerlitz attr->in_rep = priv->ppriv; 55203a9d11eSOr Gerlitz 55322dc13c8SWANG Cong tcf_exts_to_list(exts, &actions); 55422dc13c8SWANG Cong list_for_each_entry(a, &actions, list) { 55503a9d11eSOr Gerlitz if (is_tcf_gact_shot(a)) { 5568b32580dSOr Gerlitz attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP | 55703a9d11eSOr Gerlitz MLX5_FLOW_CONTEXT_ACTION_COUNT; 55803a9d11eSOr Gerlitz continue; 55903a9d11eSOr Gerlitz } 56003a9d11eSOr Gerlitz 5615724b8b5SShmulik Ladkani if (is_tcf_mirred_egress_redirect(a)) { 56203a9d11eSOr Gerlitz int ifindex = tcf_mirred_ifindex(a); 56303a9d11eSOr Gerlitz struct net_device *out_dev; 56403a9d11eSOr Gerlitz struct mlx5e_priv *out_priv; 56503a9d11eSOr Gerlitz 56603a9d11eSOr Gerlitz out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); 56703a9d11eSOr Gerlitz 56803a9d11eSOr Gerlitz if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) { 56903a9d11eSOr Gerlitz pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", 57003a9d11eSOr Gerlitz priv->netdev->name, out_dev->name); 57103a9d11eSOr Gerlitz return -EINVAL; 57203a9d11eSOr Gerlitz } 57303a9d11eSOr Gerlitz 574e37a79e5SMark Bloch attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 575e37a79e5SMark Bloch MLX5_FLOW_CONTEXT_ACTION_COUNT; 57603a9d11eSOr Gerlitz out_priv = netdev_priv(out_dev); 577776b12b6SOr Gerlitz attr->out_rep = out_priv->ppriv; 57803a9d11eSOr Gerlitz continue; 57903a9d11eSOr Gerlitz } 58003a9d11eSOr Gerlitz 5818b32580dSOr Gerlitz if (is_tcf_vlan(a)) { 5828b32580dSOr Gerlitz if (tcf_vlan_action(a) == VLAN_F_POP) { 5838b32580dSOr Gerlitz attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; 5848b32580dSOr Gerlitz } else if (tcf_vlan_action(a) == VLAN_F_PUSH) { 5858b32580dSOr Gerlitz if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) 5868b32580dSOr Gerlitz return -EOPNOTSUPP; 5878b32580dSOr Gerlitz 5888b32580dSOr Gerlitz attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; 5898b32580dSOr Gerlitz attr->vlan = tcf_vlan_push_vid(a); 5908b32580dSOr Gerlitz } 5918b32580dSOr Gerlitz continue; 5928b32580dSOr Gerlitz } 5938b32580dSOr Gerlitz 594bbd00f7eSHadar Hen Zion if (is_tcf_tunnel_release(a)) { 595bbd00f7eSHadar Hen Zion attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 596bbd00f7eSHadar Hen Zion continue; 597bbd00f7eSHadar Hen Zion } 598bbd00f7eSHadar Hen Zion 59903a9d11eSOr Gerlitz return -EINVAL; 60003a9d11eSOr Gerlitz } 60103a9d11eSOr Gerlitz return 0; 60203a9d11eSOr Gerlitz } 60303a9d11eSOr Gerlitz 604e3a2b7edSAmir Vadai int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, 605e3a2b7edSAmir Vadai struct tc_cls_flower_offload *f) 606e3a2b7edSAmir Vadai { 607acff797cSMaor Gottlieb struct mlx5e_tc_table *tc = &priv->fs.tc; 608e3a2b7edSAmir Vadai int err = 0; 609776b12b6SOr Gerlitz bool fdb_flow = false; 610776b12b6SOr Gerlitz u32 flow_tag, action; 611e3a2b7edSAmir Vadai struct mlx5e_tc_flow *flow; 612c5bb1730SMaor Gottlieb struct mlx5_flow_spec *spec; 61374491de9SMark Bloch struct mlx5_flow_handle *old = NULL; 614d0debb76SArnd Bergmann struct mlx5_esw_flow_attr *old_attr = NULL; 615adb4c123SOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 616e3a2b7edSAmir Vadai 617776b12b6SOr Gerlitz if (esw && esw->mode == SRIOV_OFFLOADS) 618776b12b6SOr Gerlitz fdb_flow = true; 619776b12b6SOr Gerlitz 620e3a2b7edSAmir Vadai flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, 621e3a2b7edSAmir Vadai tc->ht_params); 622776b12b6SOr Gerlitz if (flow) { 623e3a2b7edSAmir Vadai old = flow->rule; 6248b32580dSOr Gerlitz old_attr = flow->attr; 625776b12b6SOr Gerlitz } else { 626776b12b6SOr Gerlitz if (fdb_flow) 627776b12b6SOr Gerlitz flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr), 628776b12b6SOr Gerlitz GFP_KERNEL); 629e3a2b7edSAmir Vadai else 630e3a2b7edSAmir Vadai flow = kzalloc(sizeof(*flow), GFP_KERNEL); 631776b12b6SOr Gerlitz } 632e3a2b7edSAmir Vadai 633c5bb1730SMaor Gottlieb spec = mlx5_vzalloc(sizeof(*spec)); 634c5bb1730SMaor Gottlieb if (!spec || !flow) { 635e3a2b7edSAmir Vadai err = -ENOMEM; 636e3a2b7edSAmir Vadai goto err_free; 637e3a2b7edSAmir Vadai } 638e3a2b7edSAmir Vadai 639e3a2b7edSAmir Vadai flow->cookie = f->cookie; 640e3a2b7edSAmir Vadai 641c5bb1730SMaor Gottlieb err = parse_cls_flower(priv, spec, f); 642e3a2b7edSAmir Vadai if (err < 0) 643e3a2b7edSAmir Vadai goto err_free; 644e3a2b7edSAmir Vadai 645776b12b6SOr Gerlitz if (fdb_flow) { 646776b12b6SOr Gerlitz flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1); 647776b12b6SOr Gerlitz err = parse_tc_fdb_actions(priv, f->exts, flow->attr); 648adb4c123SOr Gerlitz if (err < 0) 649adb4c123SOr Gerlitz goto err_free; 650776b12b6SOr Gerlitz flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr); 651adb4c123SOr Gerlitz } else { 6525c40348cSOr Gerlitz err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag); 653e3a2b7edSAmir Vadai if (err < 0) 654e3a2b7edSAmir Vadai goto err_free; 6555c40348cSOr Gerlitz flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag); 656adb4c123SOr Gerlitz } 657adb4c123SOr Gerlitz 6585c40348cSOr Gerlitz if (IS_ERR(flow->rule)) { 6595c40348cSOr Gerlitz err = PTR_ERR(flow->rule); 6605c40348cSOr Gerlitz goto err_free; 6615c40348cSOr Gerlitz } 6625c40348cSOr Gerlitz 663e3a2b7edSAmir Vadai err = rhashtable_insert_fast(&tc->ht, &flow->node, 664e3a2b7edSAmir Vadai tc->ht_params); 665e3a2b7edSAmir Vadai if (err) 6665c40348cSOr Gerlitz goto err_del_rule; 667e3a2b7edSAmir Vadai 668e3a2b7edSAmir Vadai if (old) 6698b32580dSOr Gerlitz mlx5e_tc_del_flow(priv, old, old_attr); 670e3a2b7edSAmir Vadai 671e3a2b7edSAmir Vadai goto out; 672e3a2b7edSAmir Vadai 6735c40348cSOr Gerlitz err_del_rule: 67474491de9SMark Bloch mlx5_del_flow_rules(flow->rule); 675e3a2b7edSAmir Vadai 676e3a2b7edSAmir Vadai err_free: 677e3a2b7edSAmir Vadai if (!old) 678e3a2b7edSAmir Vadai kfree(flow); 679e3a2b7edSAmir Vadai out: 680c5bb1730SMaor Gottlieb kvfree(spec); 681e3a2b7edSAmir Vadai return err; 682e3a2b7edSAmir Vadai } 683e3a2b7edSAmir Vadai 684e3a2b7edSAmir Vadai int mlx5e_delete_flower(struct mlx5e_priv *priv, 685e3a2b7edSAmir Vadai struct tc_cls_flower_offload *f) 686e3a2b7edSAmir Vadai { 687e3a2b7edSAmir Vadai struct mlx5e_tc_flow *flow; 688acff797cSMaor Gottlieb struct mlx5e_tc_table *tc = &priv->fs.tc; 689e3a2b7edSAmir Vadai 690e3a2b7edSAmir Vadai flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, 691e3a2b7edSAmir Vadai tc->ht_params); 692e3a2b7edSAmir Vadai if (!flow) 693e3a2b7edSAmir Vadai return -EINVAL; 694e3a2b7edSAmir Vadai 695e3a2b7edSAmir Vadai rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params); 696e3a2b7edSAmir Vadai 6978b32580dSOr Gerlitz mlx5e_tc_del_flow(priv, flow->rule, flow->attr); 698e3a2b7edSAmir Vadai 699e3a2b7edSAmir Vadai kfree(flow); 700e3a2b7edSAmir Vadai 701e3a2b7edSAmir Vadai return 0; 702e3a2b7edSAmir Vadai } 703e3a2b7edSAmir Vadai 704aad7e08dSAmir Vadai int mlx5e_stats_flower(struct mlx5e_priv *priv, 705aad7e08dSAmir Vadai struct tc_cls_flower_offload *f) 706aad7e08dSAmir Vadai { 707aad7e08dSAmir Vadai struct mlx5e_tc_table *tc = &priv->fs.tc; 708aad7e08dSAmir Vadai struct mlx5e_tc_flow *flow; 709aad7e08dSAmir Vadai struct tc_action *a; 710aad7e08dSAmir Vadai struct mlx5_fc *counter; 71122dc13c8SWANG Cong LIST_HEAD(actions); 712aad7e08dSAmir Vadai u64 bytes; 713aad7e08dSAmir Vadai u64 packets; 714aad7e08dSAmir Vadai u64 lastuse; 715aad7e08dSAmir Vadai 716aad7e08dSAmir Vadai flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, 717aad7e08dSAmir Vadai tc->ht_params); 718aad7e08dSAmir Vadai if (!flow) 719aad7e08dSAmir Vadai return -EINVAL; 720aad7e08dSAmir Vadai 721aad7e08dSAmir Vadai counter = mlx5_flow_rule_counter(flow->rule); 722aad7e08dSAmir Vadai if (!counter) 723aad7e08dSAmir Vadai return 0; 724aad7e08dSAmir Vadai 725aad7e08dSAmir Vadai mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 726aad7e08dSAmir Vadai 72722dc13c8SWANG Cong tcf_exts_to_list(f->exts, &actions); 72822dc13c8SWANG Cong list_for_each_entry(a, &actions, list) 729aad7e08dSAmir Vadai tcf_action_stats_update(a, bytes, packets, lastuse); 730aad7e08dSAmir Vadai 731aad7e08dSAmir Vadai return 0; 732aad7e08dSAmir Vadai } 733aad7e08dSAmir Vadai 734e8f887acSAmir Vadai static const struct rhashtable_params mlx5e_tc_flow_ht_params = { 735e8f887acSAmir Vadai .head_offset = offsetof(struct mlx5e_tc_flow, node), 736e8f887acSAmir Vadai .key_offset = offsetof(struct mlx5e_tc_flow, cookie), 737e8f887acSAmir Vadai .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie), 738e8f887acSAmir Vadai .automatic_shrinking = true, 739e8f887acSAmir Vadai }; 740e8f887acSAmir Vadai 741e8f887acSAmir Vadai int mlx5e_tc_init(struct mlx5e_priv *priv) 742e8f887acSAmir Vadai { 743acff797cSMaor Gottlieb struct mlx5e_tc_table *tc = &priv->fs.tc; 744e8f887acSAmir Vadai 745e8f887acSAmir Vadai tc->ht_params = mlx5e_tc_flow_ht_params; 746e8f887acSAmir Vadai return rhashtable_init(&tc->ht, &tc->ht_params); 747e8f887acSAmir Vadai } 748e8f887acSAmir Vadai 749e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg) 750e8f887acSAmir Vadai { 751e8f887acSAmir Vadai struct mlx5e_tc_flow *flow = ptr; 752e8f887acSAmir Vadai struct mlx5e_priv *priv = arg; 753e8f887acSAmir Vadai 7548b32580dSOr Gerlitz mlx5e_tc_del_flow(priv, flow->rule, flow->attr); 755e8f887acSAmir Vadai kfree(flow); 756e8f887acSAmir Vadai } 757e8f887acSAmir Vadai 758e8f887acSAmir Vadai void mlx5e_tc_cleanup(struct mlx5e_priv *priv) 759e8f887acSAmir Vadai { 760acff797cSMaor Gottlieb struct mlx5e_tc_table *tc = &priv->fs.tc; 761e8f887acSAmir Vadai 762e8f887acSAmir Vadai rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv); 763e8f887acSAmir Vadai 764acff797cSMaor Gottlieb if (!IS_ERR_OR_NULL(tc->t)) { 765acff797cSMaor Gottlieb mlx5_destroy_flow_table(tc->t); 766acff797cSMaor Gottlieb tc->t = NULL; 767e8f887acSAmir Vadai } 768e8f887acSAmir Vadai } 769