1e8f887acSAmir Vadai /* 2e8f887acSAmir Vadai * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3e8f887acSAmir Vadai * 4e8f887acSAmir Vadai * This software is available to you under a choice of one of two 5e8f887acSAmir Vadai * licenses. You may choose to be licensed under the terms of the GNU 6e8f887acSAmir Vadai * General Public License (GPL) Version 2, available from the file 7e8f887acSAmir Vadai * COPYING in the main directory of this source tree, or the 8e8f887acSAmir Vadai * OpenIB.org BSD license below: 9e8f887acSAmir Vadai * 10e8f887acSAmir Vadai * Redistribution and use in source and binary forms, with or 11e8f887acSAmir Vadai * without modification, are permitted provided that the following 12e8f887acSAmir Vadai * conditions are met: 13e8f887acSAmir Vadai * 14e8f887acSAmir Vadai * - Redistributions of source code must retain the above 15e8f887acSAmir Vadai * copyright notice, this list of conditions and the following 16e8f887acSAmir Vadai * disclaimer. 17e8f887acSAmir Vadai * 18e8f887acSAmir Vadai * - Redistributions in binary form must reproduce the above 19e8f887acSAmir Vadai * copyright notice, this list of conditions and the following 20e8f887acSAmir Vadai * disclaimer in the documentation and/or other materials 21e8f887acSAmir Vadai * provided with the distribution. 22e8f887acSAmir Vadai * 23e8f887acSAmir Vadai * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24e8f887acSAmir Vadai * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25e8f887acSAmir Vadai * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26e8f887acSAmir Vadai * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27e8f887acSAmir Vadai * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28e8f887acSAmir Vadai * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29e8f887acSAmir Vadai * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30e8f887acSAmir Vadai * SOFTWARE. 31e8f887acSAmir Vadai */ 32e8f887acSAmir Vadai 33e3a2b7edSAmir Vadai #include <net/flow_dissector.h> 34e2394a61SVlad Buslov #include <net/flow_offload.h> 353f7d0eb4SOr Gerlitz #include <net/sch_generic.h> 36e3a2b7edSAmir Vadai #include <net/pkt_cls.h> 37e8f887acSAmir Vadai #include <linux/mlx5/fs.h> 38e8f887acSAmir Vadai #include <linux/mlx5/device.h> 39e8f887acSAmir Vadai #include <linux/rhashtable.h> 405a7e5bcbSVlad Buslov #include <linux/refcount.h> 41db76ca24SVlad Buslov #include <linux/completion.h> 42f6dfb4c3SHadar Hen Zion #include <net/arp.h> 433616d08bSDavid Ahern #include <net/ipv6_stubs.h> 44f828ca6aSEli Cohen #include <net/bareudp.h> 45d34eb2fcSOr Gerlitz #include <net/bonding.h> 46e8f887acSAmir Vadai #include "en.h" 47f0da4daaSChris Mi #include "en/tc/post_act.h" 481d447a39SSaeed Mahameed #include "en_rep.h" 49768c3667SVlad Buslov #include "en/rep/tc.h" 50e2394a61SVlad Buslov #include "en/rep/neigh.h" 51232c0013SHadar Hen Zion #include "en_tc.h" 5203a9d11eSOr Gerlitz #include "eswitch.h" 533f6d08d1SOr Gerlitz #include "fs_core.h" 542c81bfd5SHuy Nguyen #include "en/port.h" 55101f4de9SOz Shlomo #include "en/tc_tun.h" 560a7fcb78SPaul Blakey #include "en/mapping.h" 574c3844d9SPaul Blakey #include "en/tc_ct.h" 58b2fdf3d0SPaul Blakey #include "en/mod_hdr.h" 590d9f9647SVlad Buslov #include "en/tc_tun_encap.h" 600027d70cSChris Mi #include "en/tc/sample.h" 61fad54790SRoi Dayan #include "en/tc/act/act.h" 6206fe52a4SJianbo Liu #include "en/tc/post_meter.h" 6304de7ddaSRoi Dayan #include "lib/devcom.h" 649272e3dfSYevgeny Kliteynik #include "lib/geneve.h" 65ae430332SAriel Levkovich #include "lib/fs_chains.h" 667a978759SDmytro Linkin #include "diag/en_tc_tracepoint.h" 671fe3e316SParav Pandit #include <asm/div64.h> 68016c8946SJakub Kicinski #include "lag/lag.h" 69016c8946SJakub Kicinski #include "lag/mp.h" 70e8f887acSAmir Vadai 71acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4 726a064674SAriel Levkovich #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18) 73e8f887acSAmir Vadai 748f1e0b97SPaul Blakey struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { 758f1e0b97SPaul Blakey [CHAIN_TO_REG] = { 768f1e0b97SPaul Blakey .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, 778f1e0b97SPaul Blakey .moffset = 0, 78ed2fe7baSPaul Blakey .mlen = 16, 798f1e0b97SPaul Blakey }, 8010742efcSVlad Buslov [VPORT_TO_REG] = { 8110742efcSVlad Buslov .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, 82ed2fe7baSPaul Blakey .moffset = 16, 83ed2fe7baSPaul Blakey .mlen = 16, 8410742efcSVlad Buslov }, 850a7fcb78SPaul Blakey [TUNNEL_TO_REG] = { 860a7fcb78SPaul Blakey .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1, 87ed2fe7baSPaul Blakey .moffset = 8, 88ed2fe7baSPaul Blakey .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS, 890a7fcb78SPaul Blakey .soffset = MLX5_BYTE_OFF(fte_match_param, 900a7fcb78SPaul Blakey misc_parameters_2.metadata_reg_c_1), 910a7fcb78SPaul Blakey }, 924c3844d9SPaul Blakey [ZONE_TO_REG] = zone_to_reg_ct, 93a8eb919bSPaul Blakey [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct, 944c3844d9SPaul Blakey [CTSTATE_TO_REG] = ctstate_to_reg_ct, 954c3844d9SPaul Blakey [MARK_TO_REG] = mark_to_reg_ct, 964c3844d9SPaul Blakey [LABELS_TO_REG] = labels_to_reg_ct, 974c3844d9SPaul Blakey [FTEID_TO_REG] = fteid_to_reg_ct, 9839c538d6SCai Huoqing /* For NIC rules we store the restore metadata directly 99c7569097SAriel Levkovich * into reg_b that is passed to SW since we don't 100c7569097SAriel Levkovich * jump between steering domains. 101c7569097SAriel Levkovich */ 102c7569097SAriel Levkovich [NIC_CHAIN_TO_REG] = { 103c7569097SAriel Levkovich .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B, 104c7569097SAriel Levkovich .moffset = 0, 105ed2fe7baSPaul Blakey .mlen = 16, 106c7569097SAriel Levkovich }, 107aedd133dSAriel Levkovich [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct, 10806fe52a4SJianbo Liu [PACKET_COLOR_TO_REG] = packet_color_to_reg, 1098f1e0b97SPaul Blakey }; 1108f1e0b97SPaul Blakey 1119ba33339SRoi Dayan /* To avoid false lock dependency warning set the tc_ht lock 1129ba33339SRoi Dayan * class different than the lock class of the ht being used when deleting 1139ba33339SRoi Dayan * last flow from a group and then deleting a group, we get into del_sw_flow_group() 1149ba33339SRoi Dayan * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but 1159ba33339SRoi Dayan * it's different than the ht->mutex here. 1169ba33339SRoi Dayan */ 1179ba33339SRoi Dayan static struct lock_class_key tc_ht_lock_key; 1189ba33339SRoi Dayan 1190a7fcb78SPaul Blakey static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); 1208300f225SRoi Dayan static void free_flow_post_acts(struct mlx5e_tc_flow *flow); 1210a7fcb78SPaul Blakey 1220a7fcb78SPaul Blakey void 1230a7fcb78SPaul Blakey mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, 1240a7fcb78SPaul Blakey enum mlx5e_tc_attr_to_reg type, 125ed2fe7baSPaul Blakey u32 val, 1260a7fcb78SPaul Blakey u32 mask) 1270a7fcb78SPaul Blakey { 128ed2fe7baSPaul Blakey void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval; 1290a7fcb78SPaul Blakey int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset; 130ed2fe7baSPaul Blakey int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 1310a7fcb78SPaul Blakey int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen; 132ed2fe7baSPaul Blakey u32 max_mask = GENMASK(match_len - 1, 0); 133ed2fe7baSPaul Blakey __be32 curr_mask_be, curr_val_be; 134ed2fe7baSPaul Blakey u32 curr_mask, curr_val; 1350a7fcb78SPaul Blakey 1360a7fcb78SPaul Blakey fmask = headers_c + soffset; 1370a7fcb78SPaul Blakey fval = headers_v + soffset; 1380a7fcb78SPaul Blakey 139ed2fe7baSPaul Blakey memcpy(&curr_mask_be, fmask, 4); 140ed2fe7baSPaul Blakey memcpy(&curr_val_be, fval, 4); 1410a7fcb78SPaul Blakey 142ed2fe7baSPaul Blakey curr_mask = be32_to_cpu(curr_mask_be); 143ed2fe7baSPaul Blakey curr_val = be32_to_cpu(curr_val_be); 144ed2fe7baSPaul Blakey 145ed2fe7baSPaul Blakey //move to correct offset 146ed2fe7baSPaul Blakey WARN_ON(mask > max_mask); 147ed2fe7baSPaul Blakey mask <<= moffset; 148ed2fe7baSPaul Blakey val <<= moffset; 149ed2fe7baSPaul Blakey max_mask <<= moffset; 150ed2fe7baSPaul Blakey 151ed2fe7baSPaul Blakey //zero val and mask 152ed2fe7baSPaul Blakey curr_mask &= ~max_mask; 153ed2fe7baSPaul Blakey curr_val &= ~max_mask; 154ed2fe7baSPaul Blakey 155ed2fe7baSPaul Blakey //add current to mask 156ed2fe7baSPaul Blakey curr_mask |= mask; 157ed2fe7baSPaul Blakey curr_val |= val; 158ed2fe7baSPaul Blakey 159ed2fe7baSPaul Blakey //back to be32 and write 160ed2fe7baSPaul Blakey curr_mask_be = cpu_to_be32(curr_mask); 161ed2fe7baSPaul Blakey curr_val_be = cpu_to_be32(curr_val); 162ed2fe7baSPaul Blakey 163ed2fe7baSPaul Blakey memcpy(fmask, &curr_mask_be, 4); 164ed2fe7baSPaul Blakey memcpy(fval, &curr_val_be, 4); 1650a7fcb78SPaul Blakey 1660a7fcb78SPaul Blakey spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 1670a7fcb78SPaul Blakey } 1680a7fcb78SPaul Blakey 1697e36feebSPaul Blakey void 1707e36feebSPaul Blakey mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec, 1717e36feebSPaul Blakey enum mlx5e_tc_attr_to_reg type, 172ed2fe7baSPaul Blakey u32 *val, 1737e36feebSPaul Blakey u32 *mask) 1747e36feebSPaul Blakey { 175ed2fe7baSPaul Blakey void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval; 1767e36feebSPaul Blakey int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset; 177ed2fe7baSPaul Blakey int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 1787e36feebSPaul Blakey int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen; 179ed2fe7baSPaul Blakey u32 max_mask = GENMASK(match_len - 1, 0); 180ed2fe7baSPaul Blakey __be32 curr_mask_be, curr_val_be; 181ed2fe7baSPaul Blakey u32 curr_mask, curr_val; 1827e36feebSPaul Blakey 1837e36feebSPaul Blakey fmask = headers_c + soffset; 1847e36feebSPaul Blakey fval = headers_v + soffset; 1857e36feebSPaul Blakey 186ed2fe7baSPaul Blakey memcpy(&curr_mask_be, fmask, 4); 187ed2fe7baSPaul Blakey memcpy(&curr_val_be, fval, 4); 1887e36feebSPaul Blakey 189ed2fe7baSPaul Blakey curr_mask = be32_to_cpu(curr_mask_be); 190ed2fe7baSPaul Blakey curr_val = be32_to_cpu(curr_val_be); 191ed2fe7baSPaul Blakey 192ed2fe7baSPaul Blakey *mask = (curr_mask >> moffset) & max_mask; 193ed2fe7baSPaul Blakey *val = (curr_val >> moffset) & max_mask; 1947e36feebSPaul Blakey } 1957e36feebSPaul Blakey 1960a7fcb78SPaul Blakey int 197c7b9038dSVlad Buslov mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, 1980a7fcb78SPaul Blakey struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 199aedd133dSAriel Levkovich enum mlx5_flow_namespace_type ns, 2000a7fcb78SPaul Blakey enum mlx5e_tc_attr_to_reg type, 2010a7fcb78SPaul Blakey u32 data) 2020a7fcb78SPaul Blakey { 2030a7fcb78SPaul Blakey int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 2040a7fcb78SPaul Blakey int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield; 2050a7fcb78SPaul Blakey int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen; 2060a7fcb78SPaul Blakey char *modact; 2070a7fcb78SPaul Blakey int err; 2080a7fcb78SPaul Blakey 2092c0e5cf5SPaul Blakey modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts); 2102c0e5cf5SPaul Blakey if (IS_ERR(modact)) 2112c0e5cf5SPaul Blakey return PTR_ERR(modact); 2120a7fcb78SPaul Blakey 2130a7fcb78SPaul Blakey /* Firmware has 5bit length field and 0 means 32bits */ 214ed2fe7baSPaul Blakey if (mlen == 32) 2150a7fcb78SPaul Blakey mlen = 0; 2160a7fcb78SPaul Blakey 2170a7fcb78SPaul Blakey MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); 2180a7fcb78SPaul Blakey MLX5_SET(set_action_in, modact, field, mfield); 219ed2fe7baSPaul Blakey MLX5_SET(set_action_in, modact, offset, moffset); 220ed2fe7baSPaul Blakey MLX5_SET(set_action_in, modact, length, mlen); 2210a7fcb78SPaul Blakey MLX5_SET(set_action_in, modact, data, data); 222c7b9038dSVlad Buslov err = mod_hdr_acts->num_actions; 2230a7fcb78SPaul Blakey mod_hdr_acts->num_actions++; 2240a7fcb78SPaul Blakey 225c7b9038dSVlad Buslov return err; 2260a7fcb78SPaul Blakey } 2270a7fcb78SPaul Blakey 22827484f71SAriel Levkovich struct mlx5e_tc_int_port_priv * 22927484f71SAriel Levkovich mlx5e_get_int_port_priv(struct mlx5e_priv *priv) 23027484f71SAriel Levkovich { 23127484f71SAriel Levkovich struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 23227484f71SAriel Levkovich struct mlx5_rep_uplink_priv *uplink_priv; 23327484f71SAriel Levkovich struct mlx5e_rep_priv *uplink_rpriv; 23427484f71SAriel Levkovich 23527484f71SAriel Levkovich if (is_mdev_switchdev_mode(priv->mdev)) { 23627484f71SAriel Levkovich uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 23727484f71SAriel Levkovich uplink_priv = &uplink_rpriv->uplink_priv; 23827484f71SAriel Levkovich 23927484f71SAriel Levkovich return uplink_priv->int_port_priv; 24027484f71SAriel Levkovich } 24127484f71SAriel Levkovich 24227484f71SAriel Levkovich return NULL; 24327484f71SAriel Levkovich } 24427484f71SAriel Levkovich 24574e6b2a8SJianbo Liu struct mlx5e_flow_meters * 24674e6b2a8SJianbo Liu mlx5e_get_flow_meters(struct mlx5_core_dev *dev) 24774e6b2a8SJianbo Liu { 24874e6b2a8SJianbo Liu struct mlx5_eswitch *esw = dev->priv.eswitch; 24974e6b2a8SJianbo Liu struct mlx5_rep_uplink_priv *uplink_priv; 25074e6b2a8SJianbo Liu struct mlx5e_rep_priv *uplink_rpriv; 25174e6b2a8SJianbo Liu struct mlx5e_priv *priv; 25274e6b2a8SJianbo Liu 25374e6b2a8SJianbo Liu if (is_mdev_switchdev_mode(dev)) { 25474e6b2a8SJianbo Liu uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 25574e6b2a8SJianbo Liu uplink_priv = &uplink_rpriv->uplink_priv; 25674e6b2a8SJianbo Liu priv = netdev_priv(uplink_rpriv->netdev); 25774e6b2a8SJianbo Liu if (!uplink_priv->flow_meters) 25874e6b2a8SJianbo Liu uplink_priv->flow_meters = 25974e6b2a8SJianbo Liu mlx5e_flow_meters_init(priv, 26074e6b2a8SJianbo Liu MLX5_FLOW_NAMESPACE_FDB, 26174e6b2a8SJianbo Liu uplink_priv->post_act); 26274e6b2a8SJianbo Liu if (!IS_ERR(uplink_priv->flow_meters)) 26374e6b2a8SJianbo Liu return uplink_priv->flow_meters; 26474e6b2a8SJianbo Liu } 26574e6b2a8SJianbo Liu 26674e6b2a8SJianbo Liu return NULL; 26774e6b2a8SJianbo Liu } 26874e6b2a8SJianbo Liu 269aedd133dSAriel Levkovich static struct mlx5_tc_ct_priv * 270aedd133dSAriel Levkovich get_ct_priv(struct mlx5e_priv *priv) 271aedd133dSAriel Levkovich { 272aedd133dSAriel Levkovich struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 273aedd133dSAriel Levkovich struct mlx5_rep_uplink_priv *uplink_priv; 274aedd133dSAriel Levkovich struct mlx5e_rep_priv *uplink_rpriv; 275aedd133dSAriel Levkovich 276e8711402SLeon Romanovsky if (is_mdev_switchdev_mode(priv->mdev)) { 277aedd133dSAriel Levkovich uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 278aedd133dSAriel Levkovich uplink_priv = &uplink_rpriv->uplink_priv; 279aedd133dSAriel Levkovich 280aedd133dSAriel Levkovich return uplink_priv->ct_priv; 281aedd133dSAriel Levkovich } 282aedd133dSAriel Levkovich 283aedd133dSAriel Levkovich return priv->fs.tc.ct; 284aedd133dSAriel Levkovich } 285aedd133dSAriel Levkovich 2860027d70cSChris Mi static struct mlx5e_tc_psample * 287f94d6389SChris Mi get_sample_priv(struct mlx5e_priv *priv) 288f94d6389SChris Mi { 289f94d6389SChris Mi struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 290f94d6389SChris Mi struct mlx5_rep_uplink_priv *uplink_priv; 291f94d6389SChris Mi struct mlx5e_rep_priv *uplink_rpriv; 292f94d6389SChris Mi 293f94d6389SChris Mi if (is_mdev_switchdev_mode(priv->mdev)) { 294f94d6389SChris Mi uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 295f94d6389SChris Mi uplink_priv = &uplink_rpriv->uplink_priv; 296f94d6389SChris Mi 2970027d70cSChris Mi return uplink_priv->tc_psample; 298f94d6389SChris Mi } 299f94d6389SChris Mi 300f94d6389SChris Mi return NULL; 301f94d6389SChris Mi } 302f94d6389SChris Mi 3038300f225SRoi Dayan static struct mlx5e_post_act * 3048300f225SRoi Dayan get_post_action(struct mlx5e_priv *priv) 3058300f225SRoi Dayan { 3068300f225SRoi Dayan struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 3078300f225SRoi Dayan struct mlx5_rep_uplink_priv *uplink_priv; 3088300f225SRoi Dayan struct mlx5e_rep_priv *uplink_rpriv; 3098300f225SRoi Dayan 3108300f225SRoi Dayan if (is_mdev_switchdev_mode(priv->mdev)) { 3118300f225SRoi Dayan uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 3128300f225SRoi Dayan uplink_priv = &uplink_rpriv->uplink_priv; 3138300f225SRoi Dayan 3148300f225SRoi Dayan return uplink_priv->post_act; 3158300f225SRoi Dayan } 3168300f225SRoi Dayan 3178300f225SRoi Dayan return priv->fs.tc.post_act; 3188300f225SRoi Dayan } 3198300f225SRoi Dayan 320aedd133dSAriel Levkovich struct mlx5_flow_handle * 321aedd133dSAriel Levkovich mlx5_tc_rule_insert(struct mlx5e_priv *priv, 322aedd133dSAriel Levkovich struct mlx5_flow_spec *spec, 323aedd133dSAriel Levkovich struct mlx5_flow_attr *attr) 324aedd133dSAriel Levkovich { 325aedd133dSAriel Levkovich struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 326aedd133dSAriel Levkovich 327e8711402SLeon Romanovsky if (is_mdev_switchdev_mode(priv->mdev)) 328aedd133dSAriel Levkovich return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 329aedd133dSAriel Levkovich 330aedd133dSAriel Levkovich return mlx5e_add_offloaded_nic_rule(priv, spec, attr); 331aedd133dSAriel Levkovich } 332aedd133dSAriel Levkovich 333aedd133dSAriel Levkovich void 334aedd133dSAriel Levkovich mlx5_tc_rule_delete(struct mlx5e_priv *priv, 335aedd133dSAriel Levkovich struct mlx5_flow_handle *rule, 336aedd133dSAriel Levkovich struct mlx5_flow_attr *attr) 337aedd133dSAriel Levkovich { 338aedd133dSAriel Levkovich struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 339aedd133dSAriel Levkovich 340e8711402SLeon Romanovsky if (is_mdev_switchdev_mode(priv->mdev)) { 341aedd133dSAriel Levkovich mlx5_eswitch_del_offloaded_rule(esw, rule, attr); 342aedd133dSAriel Levkovich return; 343aedd133dSAriel Levkovich } 344aedd133dSAriel Levkovich 345aedd133dSAriel Levkovich mlx5e_del_offloaded_nic_rule(priv, rule, attr); 346aedd133dSAriel Levkovich } 347aedd133dSAriel Levkovich 348a8d52b02SJianbo Liu static bool 349a8d52b02SJianbo Liu is_flow_meter_action(struct mlx5_flow_attr *attr) 350a8d52b02SJianbo Liu { 351a8d52b02SJianbo Liu return ((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) && 352a8d52b02SJianbo Liu (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)); 353a8d52b02SJianbo Liu } 354a8d52b02SJianbo Liu 355a8d52b02SJianbo Liu static int 356a8d52b02SJianbo Liu mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv, 357a8d52b02SJianbo Liu struct mlx5_flow_attr *attr) 358a8d52b02SJianbo Liu { 359e5b1db27SRoi Dayan struct mlx5e_post_act *post_act = get_post_action(priv); 360e5b1db27SRoi Dayan struct mlx5e_post_meter_priv *post_meter; 361e5b1db27SRoi Dayan enum mlx5_flow_namespace_type ns_type; 362a8d52b02SJianbo Liu struct mlx5e_flow_meter_handle *meter; 363a8d52b02SJianbo Liu 364*f8e9d413SRoi Dayan meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params); 365a8d52b02SJianbo Liu if (IS_ERR(meter)) { 366a8d52b02SJianbo Liu mlx5_core_err(priv->mdev, "Failed to get flow meter\n"); 367a8d52b02SJianbo Liu return PTR_ERR(meter); 368a8d52b02SJianbo Liu } 369a8d52b02SJianbo Liu 370e5b1db27SRoi Dayan ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters); 371b50ce435SRoi Dayan post_meter = mlx5e_post_meter_init(priv, ns_type, post_act, meter->green_counter, 372b50ce435SRoi Dayan meter->red_counter); 373e5b1db27SRoi Dayan if (IS_ERR(post_meter)) { 374e5b1db27SRoi Dayan mlx5_core_err(priv->mdev, "Failed to init post meter\n"); 375e5b1db27SRoi Dayan goto err_meter_init; 376e5b1db27SRoi Dayan } 377e5b1db27SRoi Dayan 378a8d52b02SJianbo Liu attr->meter_attr.meter = meter; 379e5b1db27SRoi Dayan attr->meter_attr.post_meter = post_meter; 380e5b1db27SRoi Dayan attr->dest_ft = mlx5e_post_meter_get_ft(post_meter); 381a8d52b02SJianbo Liu attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 382a8d52b02SJianbo Liu 383a8d52b02SJianbo Liu return 0; 384e5b1db27SRoi Dayan 385e5b1db27SRoi Dayan err_meter_init: 386e5b1db27SRoi Dayan mlx5e_tc_meter_put(meter); 387e5b1db27SRoi Dayan return PTR_ERR(post_meter); 388e5b1db27SRoi Dayan } 389e5b1db27SRoi Dayan 390e5b1db27SRoi Dayan static void 391e5b1db27SRoi Dayan mlx5e_tc_del_flow_meter(struct mlx5_flow_attr *attr) 392e5b1db27SRoi Dayan { 393e5b1db27SRoi Dayan mlx5e_post_meter_cleanup(attr->meter_attr.post_meter); 394e5b1db27SRoi Dayan mlx5e_tc_meter_put(attr->meter_attr.meter); 395a8d52b02SJianbo Liu } 396a8d52b02SJianbo Liu 39784ba8062SRoi Dayan struct mlx5_flow_handle * 39884ba8062SRoi Dayan mlx5e_tc_rule_offload(struct mlx5e_priv *priv, 39984ba8062SRoi Dayan struct mlx5_flow_spec *spec, 40084ba8062SRoi Dayan struct mlx5_flow_attr *attr) 40184ba8062SRoi Dayan { 40284ba8062SRoi Dayan struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 403a8d52b02SJianbo Liu int err; 40484ba8062SRoi Dayan 40584ba8062SRoi Dayan if (attr->flags & MLX5_ATTR_FLAG_CT) { 40684ba8062SRoi Dayan struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = 40784ba8062SRoi Dayan &attr->parse_attr->mod_hdr_acts; 40884ba8062SRoi Dayan 409a572c0a7SRoi Dayan return mlx5_tc_ct_flow_offload(get_ct_priv(priv), 41084ba8062SRoi Dayan spec, attr, 41184ba8062SRoi Dayan mod_hdr_acts); 41284ba8062SRoi Dayan } 41384ba8062SRoi Dayan 41484ba8062SRoi Dayan if (!is_mdev_switchdev_mode(priv->mdev)) 41584ba8062SRoi Dayan return mlx5e_add_offloaded_nic_rule(priv, spec, attr); 41684ba8062SRoi Dayan 41784ba8062SRoi Dayan if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) 41873a3f1bcSRoi Dayan return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr); 41984ba8062SRoi Dayan 420a8d52b02SJianbo Liu if (is_flow_meter_action(attr)) { 421a8d52b02SJianbo Liu err = mlx5e_tc_add_flow_meter(priv, attr); 422a8d52b02SJianbo Liu if (err) 423a8d52b02SJianbo Liu return ERR_PTR(err); 424a8d52b02SJianbo Liu } 425a8d52b02SJianbo Liu 42684ba8062SRoi Dayan return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 42784ba8062SRoi Dayan } 42884ba8062SRoi Dayan 42984ba8062SRoi Dayan void 43084ba8062SRoi Dayan mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv, 431a572c0a7SRoi Dayan struct mlx5_flow_handle *rule, 43284ba8062SRoi Dayan struct mlx5_flow_attr *attr) 43384ba8062SRoi Dayan { 43484ba8062SRoi Dayan struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 43584ba8062SRoi Dayan 43684ba8062SRoi Dayan if (attr->flags & MLX5_ATTR_FLAG_CT) { 437a572c0a7SRoi Dayan mlx5_tc_ct_delete_flow(get_ct_priv(priv), attr); 43884ba8062SRoi Dayan return; 43984ba8062SRoi Dayan } 44084ba8062SRoi Dayan 44184ba8062SRoi Dayan if (!is_mdev_switchdev_mode(priv->mdev)) { 44284ba8062SRoi Dayan mlx5e_del_offloaded_nic_rule(priv, rule, attr); 44384ba8062SRoi Dayan return; 44484ba8062SRoi Dayan } 44584ba8062SRoi Dayan 44684ba8062SRoi Dayan if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) { 44784ba8062SRoi Dayan mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr); 44884ba8062SRoi Dayan return; 44984ba8062SRoi Dayan } 45084ba8062SRoi Dayan 45184ba8062SRoi Dayan mlx5_eswitch_del_offloaded_rule(esw, rule, attr); 452a8d52b02SJianbo Liu 453a8d52b02SJianbo Liu if (attr->meter_attr.meter) 454e5b1db27SRoi Dayan mlx5e_tc_del_flow_meter(attr); 45584ba8062SRoi Dayan } 45684ba8062SRoi Dayan 457c7b9038dSVlad Buslov int 458c7b9038dSVlad Buslov mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, 459c7b9038dSVlad Buslov struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 460c7b9038dSVlad Buslov enum mlx5_flow_namespace_type ns, 461c7b9038dSVlad Buslov enum mlx5e_tc_attr_to_reg type, 462c7b9038dSVlad Buslov u32 data) 463c7b9038dSVlad Buslov { 464c7b9038dSVlad Buslov int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data); 465c7b9038dSVlad Buslov 466c7b9038dSVlad Buslov return ret < 0 ? ret : 0; 467c7b9038dSVlad Buslov } 468c7b9038dSVlad Buslov 469c7b9038dSVlad Buslov void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev, 470c7b9038dSVlad Buslov struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 471c7b9038dSVlad Buslov enum mlx5e_tc_attr_to_reg type, 472c7b9038dSVlad Buslov int act_id, u32 data) 473c7b9038dSVlad Buslov { 474c7b9038dSVlad Buslov int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 475c7b9038dSVlad Buslov int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield; 476c7b9038dSVlad Buslov int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen; 477c7b9038dSVlad Buslov char *modact; 478c7b9038dSVlad Buslov 4792c0e5cf5SPaul Blakey modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id); 480c7b9038dSVlad Buslov 481c7b9038dSVlad Buslov /* Firmware has 5bit length field and 0 means 32bits */ 482ed2fe7baSPaul Blakey if (mlen == 32) 483c7b9038dSVlad Buslov mlen = 0; 484c7b9038dSVlad Buslov 485c7b9038dSVlad Buslov MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); 486c7b9038dSVlad Buslov MLX5_SET(set_action_in, modact, field, mfield); 487ed2fe7baSPaul Blakey MLX5_SET(set_action_in, modact, offset, moffset); 488ed2fe7baSPaul Blakey MLX5_SET(set_action_in, modact, length, mlen); 489c7b9038dSVlad Buslov MLX5_SET(set_action_in, modact, data, data); 490c7b9038dSVlad Buslov } 491c7b9038dSVlad Buslov 49277ab67b7SOr Gerlitz struct mlx5e_hairpin { 49377ab67b7SOr Gerlitz struct mlx5_hairpin *pair; 49477ab67b7SOr Gerlitz 49577ab67b7SOr Gerlitz struct mlx5_core_dev *func_mdev; 4963f6d08d1SOr Gerlitz struct mlx5e_priv *func_priv; 49777ab67b7SOr Gerlitz u32 tdn; 498a6696735SMaxim Mikityanskiy struct mlx5e_tir direct_tir; 4993f6d08d1SOr Gerlitz 5003f6d08d1SOr Gerlitz int num_channels; 5013f6d08d1SOr Gerlitz struct mlx5e_rqt indir_rqt; 502a6696735SMaxim Mikityanskiy struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; 503f4b45940SMaor Gottlieb struct mlx5_ttc_table *ttc; 50477ab67b7SOr Gerlitz }; 50577ab67b7SOr Gerlitz 5065c65c564SOr Gerlitz struct mlx5e_hairpin_entry { 5075c65c564SOr Gerlitz /* a node of a hash table which keeps all the hairpin entries */ 5085c65c564SOr Gerlitz struct hlist_node hairpin_hlist; 5095c65c564SOr Gerlitz 51073edca73SVlad Buslov /* protects flows list */ 51173edca73SVlad Buslov spinlock_t flows_lock; 5125c65c564SOr Gerlitz /* flows sharing the same hairpin */ 5135c65c564SOr Gerlitz struct list_head flows; 514db76ca24SVlad Buslov /* hpe's that were not fully initialized when dead peer update event 515db76ca24SVlad Buslov * function traversed them. 516db76ca24SVlad Buslov */ 517db76ca24SVlad Buslov struct list_head dead_peer_wait_list; 5185c65c564SOr Gerlitz 519d8822868SOr Gerlitz u16 peer_vhca_id; 520106be53bSOr Gerlitz u8 prio; 5215c65c564SOr Gerlitz struct mlx5e_hairpin *hp; 522e4f9abbdSVlad Buslov refcount_t refcnt; 523db76ca24SVlad Buslov struct completion res_ready; 5245c65c564SOr Gerlitz }; 5255c65c564SOr Gerlitz 5265a7e5bcbSVlad Buslov static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 5275a7e5bcbSVlad Buslov struct mlx5e_tc_flow *flow); 5285a7e5bcbSVlad Buslov 5290d9f9647SVlad Buslov struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) 5305a7e5bcbSVlad Buslov { 5315a7e5bcbSVlad Buslov if (!flow || !refcount_inc_not_zero(&flow->refcnt)) 5325a7e5bcbSVlad Buslov return ERR_PTR(-EINVAL); 5335a7e5bcbSVlad Buslov return flow; 5345a7e5bcbSVlad Buslov } 5355a7e5bcbSVlad Buslov 5360d9f9647SVlad Buslov void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) 5375a7e5bcbSVlad Buslov { 5385a7e5bcbSVlad Buslov if (refcount_dec_and_test(&flow->refcnt)) { 5395a7e5bcbSVlad Buslov mlx5e_tc_del_flow(priv, flow); 540c5d326b2SVlad Buslov kfree_rcu(flow, rcu_head); 5415a7e5bcbSVlad Buslov } 5425a7e5bcbSVlad Buslov } 5435a7e5bcbSVlad Buslov 544aedd133dSAriel Levkovich bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow) 545226f2ca3SVlad Buslov { 546226f2ca3SVlad Buslov return flow_flag_test(flow, ESWITCH); 547226f2ca3SVlad Buslov } 548226f2ca3SVlad Buslov 54967d62ee7SRoi Dayan bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow) 55084179981SPaul Blakey { 55184179981SPaul Blakey return flow_flag_test(flow, FT); 55284179981SPaul Blakey } 55384179981SPaul Blakey 5540d9f9647SVlad Buslov bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) 555226f2ca3SVlad Buslov { 556226f2ca3SVlad Buslov return flow_flag_test(flow, OFFLOADED); 557226f2ca3SVlad Buslov } 558226f2ca3SVlad Buslov 559e36db1eeSRoi Dayan int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow) 560d2faae25SVlad Buslov { 561d2faae25SVlad Buslov return mlx5e_is_eswitch_flow(flow) ? 562d2faae25SVlad Buslov MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL; 563d2faae25SVlad Buslov } 564b2fdf3d0SPaul Blakey 565b2fdf3d0SPaul Blakey static struct mod_hdr_tbl * 566b2fdf3d0SPaul Blakey get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) 567b2fdf3d0SPaul Blakey { 568b2fdf3d0SPaul Blakey struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 569b2fdf3d0SPaul Blakey 570e36db1eeSRoi Dayan return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ? 571b2fdf3d0SPaul Blakey &esw->offloads.mod_hdr : 572b2fdf3d0SPaul Blakey &priv->fs.tc.mod_hdr; 573b2fdf3d0SPaul Blakey } 574b2fdf3d0SPaul Blakey 57511c9c548SOr Gerlitz static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, 57611c9c548SOr Gerlitz struct mlx5e_tc_flow *flow, 57711c9c548SOr Gerlitz struct mlx5e_tc_flow_parse_attr *parse_attr) 57811c9c548SOr Gerlitz { 579b2fdf3d0SPaul Blakey struct mlx5_modify_hdr *modify_hdr; 580b2fdf3d0SPaul Blakey struct mlx5e_mod_hdr_handle *mh; 58111c9c548SOr Gerlitz 582b2fdf3d0SPaul Blakey mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow), 583e36db1eeSRoi Dayan mlx5e_get_flow_namespace(flow), 584b2fdf3d0SPaul Blakey &parse_attr->mod_hdr_acts); 585b2fdf3d0SPaul Blakey if (IS_ERR(mh)) 586b2fdf3d0SPaul Blakey return PTR_ERR(mh); 58711c9c548SOr Gerlitz 588b2fdf3d0SPaul Blakey modify_hdr = mlx5e_mod_hdr_get(mh); 589c620b772SAriel Levkovich flow->attr->modify_hdr = modify_hdr; 590b2fdf3d0SPaul Blakey flow->mh = mh; 59111c9c548SOr Gerlitz 59211c9c548SOr Gerlitz return 0; 59311c9c548SOr Gerlitz } 59411c9c548SOr Gerlitz 59511c9c548SOr Gerlitz static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, 59611c9c548SOr Gerlitz struct mlx5e_tc_flow *flow) 59711c9c548SOr Gerlitz { 5985a7e5bcbSVlad Buslov /* flow wasn't fully initialized */ 599dd58edc3SVlad Buslov if (!flow->mh) 6005a7e5bcbSVlad Buslov return; 6015a7e5bcbSVlad Buslov 602b2fdf3d0SPaul Blakey mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow), 603b2fdf3d0SPaul Blakey flow->mh); 604dd58edc3SVlad Buslov flow->mh = NULL; 60511c9c548SOr Gerlitz } 60611c9c548SOr Gerlitz 60777ab67b7SOr Gerlitz static 60877ab67b7SOr Gerlitz struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex) 60977ab67b7SOr Gerlitz { 610b1c2f631SDima Chumak struct mlx5_core_dev *mdev; 61177ab67b7SOr Gerlitz struct net_device *netdev; 61277ab67b7SOr Gerlitz struct mlx5e_priv *priv; 61377ab67b7SOr Gerlitz 614b1c2f631SDima Chumak netdev = dev_get_by_index(net, ifindex); 615b1c2f631SDima Chumak if (!netdev) 616b1c2f631SDima Chumak return ERR_PTR(-ENODEV); 617b1c2f631SDima Chumak 61877ab67b7SOr Gerlitz priv = netdev_priv(netdev); 619b1c2f631SDima Chumak mdev = priv->mdev; 620b1c2f631SDima Chumak dev_put(netdev); 621b1c2f631SDima Chumak 622b1c2f631SDima Chumak /* Mirred tc action holds a refcount on the ifindex net_device (see 623b1c2f631SDima Chumak * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev 624b1c2f631SDima Chumak * after dev_put(netdev), while we're in the context of adding a tc flow. 625b1c2f631SDima Chumak * 626b1c2f631SDima Chumak * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then 627b1c2f631SDima Chumak * stored in a hairpin object, which exists until all flows, that refer to it, get 628b1c2f631SDima Chumak * removed. 629b1c2f631SDima Chumak * 630b1c2f631SDima Chumak * On the other hand, after a hairpin object has been created, the peer net_device may 631b1c2f631SDima Chumak * be removed/unbound while there are still some hairpin flows that are using it. This 632b1c2f631SDima Chumak * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to 633b1c2f631SDima Chumak * NETDEV_UNREGISTER event of the peer net_device. 634b1c2f631SDima Chumak */ 635b1c2f631SDima Chumak return mdev; 63677ab67b7SOr Gerlitz } 63777ab67b7SOr Gerlitz 63877ab67b7SOr Gerlitz static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) 63977ab67b7SOr Gerlitz { 640a6696735SMaxim Mikityanskiy struct mlx5e_tir_builder *builder; 64177ab67b7SOr Gerlitz int err; 64277ab67b7SOr Gerlitz 643a6696735SMaxim Mikityanskiy builder = mlx5e_tir_builder_alloc(false); 644a6696735SMaxim Mikityanskiy if (!builder) 645a6696735SMaxim Mikityanskiy return -ENOMEM; 646a6696735SMaxim Mikityanskiy 64777ab67b7SOr Gerlitz err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn); 64877ab67b7SOr Gerlitz if (err) 649a6696735SMaxim Mikityanskiy goto out; 65077ab67b7SOr Gerlitz 651a6696735SMaxim Mikityanskiy mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]); 652a6696735SMaxim Mikityanskiy err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false); 65377ab67b7SOr Gerlitz if (err) 65477ab67b7SOr Gerlitz goto create_tir_err; 65577ab67b7SOr Gerlitz 656a6696735SMaxim Mikityanskiy out: 657a6696735SMaxim Mikityanskiy mlx5e_tir_builder_free(builder); 658a6696735SMaxim Mikityanskiy return err; 65977ab67b7SOr Gerlitz 66077ab67b7SOr Gerlitz create_tir_err: 66177ab67b7SOr Gerlitz mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); 662a6696735SMaxim Mikityanskiy 663a6696735SMaxim Mikityanskiy goto out; 66477ab67b7SOr Gerlitz } 66577ab67b7SOr Gerlitz 66677ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp) 66777ab67b7SOr Gerlitz { 668a6696735SMaxim Mikityanskiy mlx5e_tir_destroy(&hp->direct_tir); 66977ab67b7SOr Gerlitz mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); 67077ab67b7SOr Gerlitz } 67177ab67b7SOr Gerlitz 6723f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) 6733f6d08d1SOr Gerlitz { 6743f6d08d1SOr Gerlitz struct mlx5e_priv *priv = hp->func_priv; 6753f6d08d1SOr Gerlitz struct mlx5_core_dev *mdev = priv->mdev; 67606e9f13aSMaxim Mikityanskiy struct mlx5e_rss_params_indir *indir; 67706e9f13aSMaxim Mikityanskiy int err; 6783f6d08d1SOr Gerlitz 67906e9f13aSMaxim Mikityanskiy indir = kvmalloc(sizeof(*indir), GFP_KERNEL); 68006e9f13aSMaxim Mikityanskiy if (!indir) 6813f6d08d1SOr Gerlitz return -ENOMEM; 6823f6d08d1SOr Gerlitz 68343befe99SMaxim Mikityanskiy mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels); 68406e9f13aSMaxim Mikityanskiy err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels, 68543ec0f41SMaxim Mikityanskiy mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc, 68643ec0f41SMaxim Mikityanskiy indir); 6873f6d08d1SOr Gerlitz 68806e9f13aSMaxim Mikityanskiy kvfree(indir); 6893f6d08d1SOr Gerlitz return err; 6903f6d08d1SOr Gerlitz } 6913f6d08d1SOr Gerlitz 6923f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp) 6933f6d08d1SOr Gerlitz { 6943f6d08d1SOr Gerlitz struct mlx5e_priv *priv = hp->func_priv; 69543ec0f41SMaxim Mikityanskiy struct mlx5e_rss_params_hash rss_hash; 696d443c6f6SMaor Gottlieb enum mlx5_traffic_types tt, max_tt; 697a6696735SMaxim Mikityanskiy struct mlx5e_tir_builder *builder; 698a6696735SMaxim Mikityanskiy int err = 0; 699a6696735SMaxim Mikityanskiy 700a6696735SMaxim Mikityanskiy builder = mlx5e_tir_builder_alloc(false); 701a6696735SMaxim Mikityanskiy if (!builder) 702a6696735SMaxim Mikityanskiy return -ENOMEM; 703a6696735SMaxim Mikityanskiy 70443ec0f41SMaxim Mikityanskiy rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res); 7053f6d08d1SOr Gerlitz 7063f6d08d1SOr Gerlitz for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { 70765d6b6e5SMaxim Mikityanskiy struct mlx5e_rss_params_traffic_type rss_tt; 708d930ac79SAya Levin 70965d6b6e5SMaxim Mikityanskiy rss_tt = mlx5e_rss_get_default_tt_config(tt); 7103f6d08d1SOr Gerlitz 711a6696735SMaxim Mikityanskiy mlx5e_tir_builder_build_rqt(builder, hp->tdn, 712a6696735SMaxim Mikityanskiy mlx5e_rqt_get_rqtn(&hp->indir_rqt), 713a6696735SMaxim Mikityanskiy false); 71443ec0f41SMaxim Mikityanskiy mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false); 715bbeb53b8SAya Levin 716a6696735SMaxim Mikityanskiy err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false); 7173f6d08d1SOr Gerlitz if (err) { 7183f6d08d1SOr Gerlitz mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err); 7193f6d08d1SOr Gerlitz goto err_destroy_tirs; 7203f6d08d1SOr Gerlitz } 721a6696735SMaxim Mikityanskiy 722a6696735SMaxim Mikityanskiy mlx5e_tir_builder_clear(builder); 7233f6d08d1SOr Gerlitz } 724a6696735SMaxim Mikityanskiy 725a6696735SMaxim Mikityanskiy out: 726a6696735SMaxim Mikityanskiy mlx5e_tir_builder_free(builder); 727a6696735SMaxim Mikityanskiy return err; 7283f6d08d1SOr Gerlitz 7293f6d08d1SOr Gerlitz err_destroy_tirs: 730a6696735SMaxim Mikityanskiy max_tt = tt; 731a6696735SMaxim Mikityanskiy for (tt = 0; tt < max_tt; tt++) 732a6696735SMaxim Mikityanskiy mlx5e_tir_destroy(&hp->indir_tir[tt]); 733a6696735SMaxim Mikityanskiy 734a6696735SMaxim Mikityanskiy goto out; 7353f6d08d1SOr Gerlitz } 7363f6d08d1SOr Gerlitz 7373f6d08d1SOr Gerlitz static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp) 7383f6d08d1SOr Gerlitz { 7393f6d08d1SOr Gerlitz int tt; 7403f6d08d1SOr Gerlitz 7413f6d08d1SOr Gerlitz for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 742a6696735SMaxim Mikityanskiy mlx5e_tir_destroy(&hp->indir_tir[tt]); 7433f6d08d1SOr Gerlitz } 7443f6d08d1SOr Gerlitz 7453f6d08d1SOr Gerlitz static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, 7463f6d08d1SOr Gerlitz struct ttc_params *ttc_params) 7473f6d08d1SOr Gerlitz { 7483f6d08d1SOr Gerlitz struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; 7493f6d08d1SOr Gerlitz int tt; 7503f6d08d1SOr Gerlitz 7513f6d08d1SOr Gerlitz memset(ttc_params, 0, sizeof(*ttc_params)); 7523f6d08d1SOr Gerlitz 753bc29764eSMaor Gottlieb ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev, 754bc29764eSMaor Gottlieb MLX5_FLOW_NAMESPACE_KERNEL); 755bc29764eSMaor Gottlieb for (tt = 0; tt < MLX5_NUM_TT; tt++) { 756bc29764eSMaor Gottlieb ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR; 757bc29764eSMaor Gottlieb ttc_params->dests[tt].tir_num = 758bc29764eSMaor Gottlieb tt == MLX5_TT_ANY ? 759bc29764eSMaor Gottlieb mlx5e_tir_get_tirn(&hp->direct_tir) : 760bc29764eSMaor Gottlieb mlx5e_tir_get_tirn(&hp->indir_tir[tt]); 761bc29764eSMaor Gottlieb } 7623f6d08d1SOr Gerlitz 7633f6d08d1SOr Gerlitz ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; 7643f6d08d1SOr Gerlitz ft_attr->prio = MLX5E_TC_PRIO; 7653f6d08d1SOr Gerlitz } 7663f6d08d1SOr Gerlitz 7673f6d08d1SOr Gerlitz static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp) 7683f6d08d1SOr Gerlitz { 7693f6d08d1SOr Gerlitz struct mlx5e_priv *priv = hp->func_priv; 7703f6d08d1SOr Gerlitz struct ttc_params ttc_params; 7713f6d08d1SOr Gerlitz int err; 7723f6d08d1SOr Gerlitz 7733f6d08d1SOr Gerlitz err = mlx5e_hairpin_create_indirect_rqt(hp); 7743f6d08d1SOr Gerlitz if (err) 7753f6d08d1SOr Gerlitz return err; 7763f6d08d1SOr Gerlitz 7773f6d08d1SOr Gerlitz err = mlx5e_hairpin_create_indirect_tirs(hp); 7783f6d08d1SOr Gerlitz if (err) 7793f6d08d1SOr Gerlitz goto err_create_indirect_tirs; 7803f6d08d1SOr Gerlitz 7813f6d08d1SOr Gerlitz mlx5e_hairpin_set_ttc_params(hp, &ttc_params); 782f4b45940SMaor Gottlieb hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); 783f4b45940SMaor Gottlieb if (IS_ERR(hp->ttc)) { 784f4b45940SMaor Gottlieb err = PTR_ERR(hp->ttc); 7853f6d08d1SOr Gerlitz goto err_create_ttc_table; 786f4b45940SMaor Gottlieb } 7873f6d08d1SOr Gerlitz 7883f6d08d1SOr Gerlitz netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n", 789f4b45940SMaor Gottlieb hp->num_channels, 790f4b45940SMaor Gottlieb mlx5_get_ttc_flow_table(priv->fs.ttc)->id); 7913f6d08d1SOr Gerlitz 7923f6d08d1SOr Gerlitz return 0; 7933f6d08d1SOr Gerlitz 7943f6d08d1SOr Gerlitz err_create_ttc_table: 7953f6d08d1SOr Gerlitz mlx5e_hairpin_destroy_indirect_tirs(hp); 7963f6d08d1SOr Gerlitz err_create_indirect_tirs: 79706e9f13aSMaxim Mikityanskiy mlx5e_rqt_destroy(&hp->indir_rqt); 7983f6d08d1SOr Gerlitz 7993f6d08d1SOr Gerlitz return err; 8003f6d08d1SOr Gerlitz } 8013f6d08d1SOr Gerlitz 8023f6d08d1SOr Gerlitz static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp) 8033f6d08d1SOr Gerlitz { 804f4b45940SMaor Gottlieb mlx5_destroy_ttc_table(hp->ttc); 8053f6d08d1SOr Gerlitz mlx5e_hairpin_destroy_indirect_tirs(hp); 80606e9f13aSMaxim Mikityanskiy mlx5e_rqt_destroy(&hp->indir_rqt); 8073f6d08d1SOr Gerlitz } 8083f6d08d1SOr Gerlitz 80977ab67b7SOr Gerlitz static struct mlx5e_hairpin * 81077ab67b7SOr Gerlitz mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params, 81177ab67b7SOr Gerlitz int peer_ifindex) 81277ab67b7SOr Gerlitz { 81377ab67b7SOr Gerlitz struct mlx5_core_dev *func_mdev, *peer_mdev; 81477ab67b7SOr Gerlitz struct mlx5e_hairpin *hp; 81577ab67b7SOr Gerlitz struct mlx5_hairpin *pair; 81677ab67b7SOr Gerlitz int err; 81777ab67b7SOr Gerlitz 81877ab67b7SOr Gerlitz hp = kzalloc(sizeof(*hp), GFP_KERNEL); 81977ab67b7SOr Gerlitz if (!hp) 82077ab67b7SOr Gerlitz return ERR_PTR(-ENOMEM); 82177ab67b7SOr Gerlitz 82277ab67b7SOr Gerlitz func_mdev = priv->mdev; 82377ab67b7SOr Gerlitz peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); 824b1c2f631SDima Chumak if (IS_ERR(peer_mdev)) { 825b1c2f631SDima Chumak err = PTR_ERR(peer_mdev); 826b1c2f631SDima Chumak goto create_pair_err; 827b1c2f631SDima Chumak } 82877ab67b7SOr Gerlitz 82977ab67b7SOr Gerlitz pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params); 83077ab67b7SOr Gerlitz if (IS_ERR(pair)) { 83177ab67b7SOr Gerlitz err = PTR_ERR(pair); 83277ab67b7SOr Gerlitz goto create_pair_err; 83377ab67b7SOr Gerlitz } 83477ab67b7SOr Gerlitz hp->pair = pair; 83577ab67b7SOr Gerlitz hp->func_mdev = func_mdev; 8363f6d08d1SOr Gerlitz hp->func_priv = priv; 8373f6d08d1SOr Gerlitz hp->num_channels = params->num_channels; 83877ab67b7SOr Gerlitz 83977ab67b7SOr Gerlitz err = mlx5e_hairpin_create_transport(hp); 84077ab67b7SOr Gerlitz if (err) 84177ab67b7SOr Gerlitz goto create_transport_err; 84277ab67b7SOr Gerlitz 8433f6d08d1SOr Gerlitz if (hp->num_channels > 1) { 8443f6d08d1SOr Gerlitz err = mlx5e_hairpin_rss_init(hp); 8453f6d08d1SOr Gerlitz if (err) 8463f6d08d1SOr Gerlitz goto rss_init_err; 8473f6d08d1SOr Gerlitz } 8483f6d08d1SOr Gerlitz 84977ab67b7SOr Gerlitz return hp; 85077ab67b7SOr Gerlitz 8513f6d08d1SOr Gerlitz rss_init_err: 8523f6d08d1SOr Gerlitz mlx5e_hairpin_destroy_transport(hp); 85377ab67b7SOr Gerlitz create_transport_err: 85477ab67b7SOr Gerlitz mlx5_core_hairpin_destroy(hp->pair); 85577ab67b7SOr Gerlitz create_pair_err: 85677ab67b7SOr Gerlitz kfree(hp); 85777ab67b7SOr Gerlitz return ERR_PTR(err); 85877ab67b7SOr Gerlitz } 85977ab67b7SOr Gerlitz 86077ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp) 86177ab67b7SOr Gerlitz { 8623f6d08d1SOr Gerlitz if (hp->num_channels > 1) 8633f6d08d1SOr Gerlitz mlx5e_hairpin_rss_cleanup(hp); 86477ab67b7SOr Gerlitz mlx5e_hairpin_destroy_transport(hp); 86577ab67b7SOr Gerlitz mlx5_core_hairpin_destroy(hp->pair); 86677ab67b7SOr Gerlitz kvfree(hp); 86777ab67b7SOr Gerlitz } 86877ab67b7SOr Gerlitz 869106be53bSOr Gerlitz static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio) 870106be53bSOr Gerlitz { 871106be53bSOr Gerlitz return (peer_vhca_id << 16 | prio); 872106be53bSOr Gerlitz } 873106be53bSOr Gerlitz 8745c65c564SOr Gerlitz static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv, 875106be53bSOr Gerlitz u16 peer_vhca_id, u8 prio) 8765c65c564SOr Gerlitz { 8775c65c564SOr Gerlitz struct mlx5e_hairpin_entry *hpe; 878106be53bSOr Gerlitz u32 hash_key = hash_hairpin_info(peer_vhca_id, prio); 8795c65c564SOr Gerlitz 8805c65c564SOr Gerlitz hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe, 881106be53bSOr Gerlitz hairpin_hlist, hash_key) { 882e4f9abbdSVlad Buslov if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) { 883e4f9abbdSVlad Buslov refcount_inc(&hpe->refcnt); 8845c65c564SOr Gerlitz return hpe; 8855c65c564SOr Gerlitz } 886e4f9abbdSVlad Buslov } 8875c65c564SOr Gerlitz 8885c65c564SOr Gerlitz return NULL; 8895c65c564SOr Gerlitz } 8905c65c564SOr Gerlitz 891e4f9abbdSVlad Buslov static void mlx5e_hairpin_put(struct mlx5e_priv *priv, 892e4f9abbdSVlad Buslov struct mlx5e_hairpin_entry *hpe) 893e4f9abbdSVlad Buslov { 894e4f9abbdSVlad Buslov /* no more hairpin flows for us, release the hairpin pair */ 895b32accdaSVlad Buslov if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock)) 896e4f9abbdSVlad Buslov return; 897b32accdaSVlad Buslov hash_del(&hpe->hairpin_hlist); 898b32accdaSVlad Buslov mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); 899e4f9abbdSVlad Buslov 900db76ca24SVlad Buslov if (!IS_ERR_OR_NULL(hpe->hp)) { 901e4f9abbdSVlad Buslov netdev_dbg(priv->netdev, "del hairpin: peer %s\n", 902e4f9abbdSVlad Buslov dev_name(hpe->hp->pair->peer_mdev->device)); 903e4f9abbdSVlad Buslov 904e4f9abbdSVlad Buslov mlx5e_hairpin_destroy(hpe->hp); 905db76ca24SVlad Buslov } 906db76ca24SVlad Buslov 907db76ca24SVlad Buslov WARN_ON(!list_empty(&hpe->flows)); 908e4f9abbdSVlad Buslov kfree(hpe); 909e4f9abbdSVlad Buslov } 910e4f9abbdSVlad Buslov 911106be53bSOr Gerlitz #define UNKNOWN_MATCH_PRIO 8 912106be53bSOr Gerlitz 913106be53bSOr Gerlitz static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, 914e98bedf5SEli Britstein struct mlx5_flow_spec *spec, u8 *match_prio, 915e98bedf5SEli Britstein struct netlink_ext_ack *extack) 916106be53bSOr Gerlitz { 917106be53bSOr Gerlitz void *headers_c, *headers_v; 918106be53bSOr Gerlitz u8 prio_val, prio_mask = 0; 919106be53bSOr Gerlitz bool vlan_present; 920106be53bSOr Gerlitz 921106be53bSOr Gerlitz #ifdef CONFIG_MLX5_CORE_EN_DCB 922106be53bSOr Gerlitz if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) { 923e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 924e98bedf5SEli Britstein "only PCP trust state supported for hairpin"); 925106be53bSOr Gerlitz return -EOPNOTSUPP; 926106be53bSOr Gerlitz } 927106be53bSOr Gerlitz #endif 928106be53bSOr Gerlitz headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); 929106be53bSOr Gerlitz headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 930106be53bSOr Gerlitz 931106be53bSOr Gerlitz vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag); 932106be53bSOr Gerlitz if (vlan_present) { 933106be53bSOr Gerlitz prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio); 934106be53bSOr Gerlitz prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio); 935106be53bSOr Gerlitz } 936106be53bSOr Gerlitz 937106be53bSOr Gerlitz if (!vlan_present || !prio_mask) { 938106be53bSOr Gerlitz prio_val = UNKNOWN_MATCH_PRIO; 939106be53bSOr Gerlitz } else if (prio_mask != 0x7) { 940e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 941e98bedf5SEli Britstein "masked priority match not supported for hairpin"); 942106be53bSOr Gerlitz return -EOPNOTSUPP; 943106be53bSOr Gerlitz } 944106be53bSOr Gerlitz 945106be53bSOr Gerlitz *match_prio = prio_val; 946106be53bSOr Gerlitz return 0; 947106be53bSOr Gerlitz } 948106be53bSOr Gerlitz 9495c65c564SOr Gerlitz static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, 9505c65c564SOr Gerlitz struct mlx5e_tc_flow *flow, 951e98bedf5SEli Britstein struct mlx5e_tc_flow_parse_attr *parse_attr, 952e98bedf5SEli Britstein struct netlink_ext_ack *extack) 9535c65c564SOr Gerlitz { 95498b66cb1SEli Britstein int peer_ifindex = parse_attr->mirred_ifindex[0]; 9555c65c564SOr Gerlitz struct mlx5_hairpin_params params; 956d8822868SOr Gerlitz struct mlx5_core_dev *peer_mdev; 9575c65c564SOr Gerlitz struct mlx5e_hairpin_entry *hpe; 9585c65c564SOr Gerlitz struct mlx5e_hairpin *hp; 9593f6d08d1SOr Gerlitz u64 link_speed64; 9603f6d08d1SOr Gerlitz u32 link_speed; 961106be53bSOr Gerlitz u8 match_prio; 962d8822868SOr Gerlitz u16 peer_id; 9635c65c564SOr Gerlitz int err; 9645c65c564SOr Gerlitz 965d8822868SOr Gerlitz peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); 966b1c2f631SDima Chumak if (IS_ERR(peer_mdev)) { 967b1c2f631SDima Chumak NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device"); 968b1c2f631SDima Chumak return PTR_ERR(peer_mdev); 969b1c2f631SDima Chumak } 970b1c2f631SDima Chumak 971d8822868SOr Gerlitz if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) { 972e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported"); 9735c65c564SOr Gerlitz return -EOPNOTSUPP; 9745c65c564SOr Gerlitz } 9755c65c564SOr Gerlitz 976d8822868SOr Gerlitz peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id); 977e98bedf5SEli Britstein err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio, 978e98bedf5SEli Britstein extack); 979106be53bSOr Gerlitz if (err) 980106be53bSOr Gerlitz return err; 981b32accdaSVlad Buslov 982b32accdaSVlad Buslov mutex_lock(&priv->fs.tc.hairpin_tbl_lock); 983106be53bSOr Gerlitz hpe = mlx5e_hairpin_get(priv, peer_id, match_prio); 984db76ca24SVlad Buslov if (hpe) { 985db76ca24SVlad Buslov mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); 986db76ca24SVlad Buslov wait_for_completion(&hpe->res_ready); 987db76ca24SVlad Buslov 988db76ca24SVlad Buslov if (IS_ERR(hpe->hp)) { 989db76ca24SVlad Buslov err = -EREMOTEIO; 990db76ca24SVlad Buslov goto out_err; 991db76ca24SVlad Buslov } 9925c65c564SOr Gerlitz goto attach_flow; 993db76ca24SVlad Buslov } 9945c65c564SOr Gerlitz 9955c65c564SOr Gerlitz hpe = kzalloc(sizeof(*hpe), GFP_KERNEL); 996b32accdaSVlad Buslov if (!hpe) { 997db76ca24SVlad Buslov mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); 998db76ca24SVlad Buslov return -ENOMEM; 999b32accdaSVlad Buslov } 10005c65c564SOr Gerlitz 100173edca73SVlad Buslov spin_lock_init(&hpe->flows_lock); 10025c65c564SOr Gerlitz INIT_LIST_HEAD(&hpe->flows); 1003db76ca24SVlad Buslov INIT_LIST_HEAD(&hpe->dead_peer_wait_list); 1004d8822868SOr Gerlitz hpe->peer_vhca_id = peer_id; 1005106be53bSOr Gerlitz hpe->prio = match_prio; 1006e4f9abbdSVlad Buslov refcount_set(&hpe->refcnt, 1); 1007db76ca24SVlad Buslov init_completion(&hpe->res_ready); 1008db76ca24SVlad Buslov 1009db76ca24SVlad Buslov hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, 1010db76ca24SVlad Buslov hash_hairpin_info(peer_id, match_prio)); 1011db76ca24SVlad Buslov mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); 10125c65c564SOr Gerlitz 10136cdc686aSAriel Levkovich params.log_data_size = 16; 10145c65c564SOr Gerlitz params.log_data_size = min_t(u8, params.log_data_size, 10155c65c564SOr Gerlitz MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz)); 10165c65c564SOr Gerlitz params.log_data_size = max_t(u8, params.log_data_size, 10175c65c564SOr Gerlitz MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz)); 10185c65c564SOr Gerlitz 1019eb9180f7SOr Gerlitz params.log_num_packets = params.log_data_size - 1020eb9180f7SOr Gerlitz MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev); 1021eb9180f7SOr Gerlitz params.log_num_packets = min_t(u8, params.log_num_packets, 1022eb9180f7SOr Gerlitz MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets)); 1023eb9180f7SOr Gerlitz 1024eb9180f7SOr Gerlitz params.q_counter = priv->q_counter; 10253f6d08d1SOr Gerlitz /* set hairpin pair per each 50Gbs share of the link */ 10262c81bfd5SHuy Nguyen mlx5e_port_max_linkspeed(priv->mdev, &link_speed); 10273f6d08d1SOr Gerlitz link_speed = max_t(u32, link_speed, 50000); 10283f6d08d1SOr Gerlitz link_speed64 = link_speed; 10293f6d08d1SOr Gerlitz do_div(link_speed64, 50000); 10303f6d08d1SOr Gerlitz params.num_channels = link_speed64; 10313f6d08d1SOr Gerlitz 10325c65c564SOr Gerlitz hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex); 1033db76ca24SVlad Buslov hpe->hp = hp; 1034db76ca24SVlad Buslov complete_all(&hpe->res_ready); 10355c65c564SOr Gerlitz if (IS_ERR(hp)) { 10365c65c564SOr Gerlitz err = PTR_ERR(hp); 1037db76ca24SVlad Buslov goto out_err; 10385c65c564SOr Gerlitz } 10395c65c564SOr Gerlitz 1040eb9180f7SOr Gerlitz netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n", 1041a6696735SMaxim Mikityanskiy mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0], 104227b942fbSParav Pandit dev_name(hp->pair->peer_mdev->device), 1043eb9180f7SOr Gerlitz hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets); 10445c65c564SOr Gerlitz 10455c65c564SOr Gerlitz attach_flow: 10463f6d08d1SOr Gerlitz if (hpe->hp->num_channels > 1) { 1047226f2ca3SVlad Buslov flow_flag_set(flow, HAIRPIN_RSS); 1048f4b45940SMaor Gottlieb flow->attr->nic_attr->hairpin_ft = 1049f4b45940SMaor Gottlieb mlx5_get_ttc_flow_table(hpe->hp->ttc); 10503f6d08d1SOr Gerlitz } else { 1051a6696735SMaxim Mikityanskiy flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir); 10523f6d08d1SOr Gerlitz } 1053b32accdaSVlad Buslov 1054e4f9abbdSVlad Buslov flow->hpe = hpe; 105573edca73SVlad Buslov spin_lock(&hpe->flows_lock); 10565c65c564SOr Gerlitz list_add(&flow->hairpin, &hpe->flows); 105773edca73SVlad Buslov spin_unlock(&hpe->flows_lock); 10583f6d08d1SOr Gerlitz 10595c65c564SOr Gerlitz return 0; 10605c65c564SOr Gerlitz 1061db76ca24SVlad Buslov out_err: 1062db76ca24SVlad Buslov mlx5e_hairpin_put(priv, hpe); 10635c65c564SOr Gerlitz return err; 10645c65c564SOr Gerlitz } 10655c65c564SOr Gerlitz 10665c65c564SOr Gerlitz static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, 10675c65c564SOr Gerlitz struct mlx5e_tc_flow *flow) 10685c65c564SOr Gerlitz { 10695a7e5bcbSVlad Buslov /* flow wasn't fully initialized */ 1070e4f9abbdSVlad Buslov if (!flow->hpe) 10715a7e5bcbSVlad Buslov return; 10725a7e5bcbSVlad Buslov 107373edca73SVlad Buslov spin_lock(&flow->hpe->flows_lock); 10745c65c564SOr Gerlitz list_del(&flow->hairpin); 107573edca73SVlad Buslov spin_unlock(&flow->hpe->flows_lock); 107673edca73SVlad Buslov 1077e4f9abbdSVlad Buslov mlx5e_hairpin_put(priv, flow->hpe); 1078e4f9abbdSVlad Buslov flow->hpe = NULL; 10795c65c564SOr Gerlitz } 10805c65c564SOr Gerlitz 108108247066SAriel Levkovich struct mlx5_flow_handle * 108208247066SAriel Levkovich mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, 108308247066SAriel Levkovich struct mlx5_flow_spec *spec, 1084c620b772SAriel Levkovich struct mlx5_flow_attr *attr) 1085e8f887acSAmir Vadai { 108608247066SAriel Levkovich struct mlx5_flow_context *flow_context = &spec->flow_context; 108767d62ee7SRoi Dayan struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv); 1088c620b772SAriel Levkovich struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr; 10896a064674SAriel Levkovich struct mlx5e_tc_table *tc = &priv->fs.tc; 10905c65c564SOr Gerlitz struct mlx5_flow_destination dest[2] = {}; 109166958ed9SHadar Hen Zion struct mlx5_flow_act flow_act = { 10923bc4b7bfSOr Gerlitz .action = attr->action, 1093bb0ee7dcSJianbo Liu .flags = FLOW_ACT_NO_APPEND, 109466958ed9SHadar Hen Zion }; 109508247066SAriel Levkovich struct mlx5_flow_handle *rule; 1096c7569097SAriel Levkovich struct mlx5_flow_table *ft; 109708247066SAriel Levkovich int dest_ix = 0; 1098e8f887acSAmir Vadai 1099bb0ee7dcSJianbo Liu flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 1100c620b772SAriel Levkovich flow_context->flow_tag = nic_attr->flow_tag; 1101bb0ee7dcSJianbo Liu 1102aedd133dSAriel Levkovich if (attr->dest_ft) { 1103aedd133dSAriel Levkovich dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1104aedd133dSAriel Levkovich dest[dest_ix].ft = attr->dest_ft; 1105aedd133dSAriel Levkovich dest_ix++; 1106aedd133dSAriel Levkovich } else if (nic_attr->hairpin_ft) { 11073f6d08d1SOr Gerlitz dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1108c620b772SAriel Levkovich dest[dest_ix].ft = nic_attr->hairpin_ft; 110908247066SAriel Levkovich dest_ix++; 1110c620b772SAriel Levkovich } else if (nic_attr->hairpin_tirn) { 11115c65c564SOr Gerlitz dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1112c620b772SAriel Levkovich dest[dest_ix].tir_num = nic_attr->hairpin_tirn; 11133f6d08d1SOr Gerlitz dest_ix++; 11143f6d08d1SOr Gerlitz } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 11155c65c564SOr Gerlitz dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1116c7569097SAriel Levkovich if (attr->dest_chain) { 1117c7569097SAriel Levkovich dest[dest_ix].ft = mlx5_chains_get_table(nic_chains, 1118c7569097SAriel Levkovich attr->dest_chain, 1, 1119c7569097SAriel Levkovich MLX5E_TC_FT_LEVEL); 1120c7569097SAriel Levkovich if (IS_ERR(dest[dest_ix].ft)) 1121c7569097SAriel Levkovich return ERR_CAST(dest[dest_ix].ft); 1122c7569097SAriel Levkovich } else { 11236783f0a2SVu Pham dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan); 1124c7569097SAriel Levkovich } 11255c65c564SOr Gerlitz dest_ix++; 11265c65c564SOr Gerlitz } 1127aad7e08dSAmir Vadai 1128c7569097SAriel Levkovich if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && 1129c7569097SAriel Levkovich MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) 1130c7569097SAriel Levkovich flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 1131c7569097SAriel Levkovich 113208247066SAriel Levkovich if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 11335c65c564SOr Gerlitz dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 113408247066SAriel Levkovich dest[dest_ix].counter_id = mlx5_fc_id(attr->counter); 11355c65c564SOr Gerlitz dest_ix++; 1136aad7e08dSAmir Vadai } 1137aad7e08dSAmir Vadai 113808247066SAriel Levkovich if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 11392b688ea5SMaor Gottlieb flow_act.modify_hdr = attr->modify_hdr; 11402f4fe4caSOr Gerlitz 11416a064674SAriel Levkovich mutex_lock(&tc->t_lock); 11426a064674SAriel Levkovich if (IS_ERR_OR_NULL(tc->t)) { 11436a064674SAriel Levkovich /* Create the root table here if doesn't exist yet */ 11446a064674SAriel Levkovich tc->t = 1145c7569097SAriel Levkovich mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL); 114621b9c144SOr Gerlitz 11476a064674SAriel Levkovich if (IS_ERR(tc->t)) { 11486a064674SAriel Levkovich mutex_unlock(&tc->t_lock); 1149e8f887acSAmir Vadai netdev_err(priv->netdev, 1150e8f887acSAmir Vadai "Failed to create tc offload table\n"); 1151c7569097SAriel Levkovich rule = ERR_CAST(priv->fs.tc.t); 1152c7569097SAriel Levkovich goto err_ft_get; 1153e8f887acSAmir Vadai } 1154e8f887acSAmir Vadai } 115508247066SAriel Levkovich mutex_unlock(&tc->t_lock); 1156e8f887acSAmir Vadai 1157aedd133dSAriel Levkovich if (attr->chain || attr->prio) 1158c7569097SAriel Levkovich ft = mlx5_chains_get_table(nic_chains, 1159c7569097SAriel Levkovich attr->chain, attr->prio, 1160c7569097SAriel Levkovich MLX5E_TC_FT_LEVEL); 1161aedd133dSAriel Levkovich else 1162aedd133dSAriel Levkovich ft = attr->ft; 1163aedd133dSAriel Levkovich 1164c7569097SAriel Levkovich if (IS_ERR(ft)) { 1165c7569097SAriel Levkovich rule = ERR_CAST(ft); 1166c7569097SAriel Levkovich goto err_ft_get; 1167c7569097SAriel Levkovich } 1168c7569097SAriel Levkovich 1169c620b772SAriel Levkovich if (attr->outer_match_level != MLX5_MATCH_NONE) 117008247066SAriel Levkovich spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 117138aa51c1SOr Gerlitz 1172c7569097SAriel Levkovich rule = mlx5_add_flow_rules(ft, spec, 11735c65c564SOr Gerlitz &flow_act, dest, dest_ix); 117408247066SAriel Levkovich if (IS_ERR(rule)) 1175c7569097SAriel Levkovich goto err_rule; 117608247066SAriel Levkovich 117708247066SAriel Levkovich return rule; 1178c7569097SAriel Levkovich 1179c7569097SAriel Levkovich err_rule: 1180aedd133dSAriel Levkovich if (attr->chain || attr->prio) 1181c7569097SAriel Levkovich mlx5_chains_put_table(nic_chains, 1182c7569097SAriel Levkovich attr->chain, attr->prio, 1183c7569097SAriel Levkovich MLX5E_TC_FT_LEVEL); 1184c7569097SAriel Levkovich err_ft_get: 1185c7569097SAriel Levkovich if (attr->dest_chain) 1186c7569097SAriel Levkovich mlx5_chains_put_table(nic_chains, 1187c7569097SAriel Levkovich attr->dest_chain, 1, 1188c7569097SAriel Levkovich MLX5E_TC_FT_LEVEL); 1189c7569097SAriel Levkovich 1190c7569097SAriel Levkovich return ERR_CAST(rule); 119108247066SAriel Levkovich } 119208247066SAriel Levkovich 119308247066SAriel Levkovich static int 1194df67ad62SRoi Dayan alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev, 1195df67ad62SRoi Dayan struct mlx5_flow_attr *attr) 1196df67ad62SRoi Dayan 1197df67ad62SRoi Dayan { 1198df67ad62SRoi Dayan struct mlx5_fc *counter; 1199df67ad62SRoi Dayan 1200df67ad62SRoi Dayan counter = mlx5_fc_create(counter_dev, true); 1201df67ad62SRoi Dayan if (IS_ERR(counter)) 1202df67ad62SRoi Dayan return PTR_ERR(counter); 1203df67ad62SRoi Dayan 1204df67ad62SRoi Dayan attr->counter = counter; 1205df67ad62SRoi Dayan return 0; 1206df67ad62SRoi Dayan } 1207df67ad62SRoi Dayan 1208df67ad62SRoi Dayan static int 120908247066SAriel Levkovich mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, 121008247066SAriel Levkovich struct mlx5e_tc_flow *flow, 121108247066SAriel Levkovich struct netlink_ext_ack *extack) 121208247066SAriel Levkovich { 1213c6cfe113SRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr; 1214c620b772SAriel Levkovich struct mlx5_flow_attr *attr = flow->attr; 121508247066SAriel Levkovich struct mlx5_core_dev *dev = priv->mdev; 121608247066SAriel Levkovich int err; 121708247066SAriel Levkovich 1218c6cfe113SRoi Dayan parse_attr = attr->parse_attr; 1219c6cfe113SRoi Dayan 122008247066SAriel Levkovich if (flow_flag_test(flow, HAIRPIN)) { 122108247066SAriel Levkovich err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack); 122208247066SAriel Levkovich if (err) 122308247066SAriel Levkovich return err; 122408247066SAriel Levkovich } 122508247066SAriel Levkovich 122608247066SAriel Levkovich if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1227df67ad62SRoi Dayan err = alloc_flow_attr_counter(dev, attr); 1228df67ad62SRoi Dayan if (err) 1229df67ad62SRoi Dayan return err; 123008247066SAriel Levkovich } 123108247066SAriel Levkovich 123208247066SAriel Levkovich if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 123308247066SAriel Levkovich err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); 12342c0e5cf5SPaul Blakey mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 123508247066SAriel Levkovich if (err) 123608247066SAriel Levkovich return err; 123708247066SAriel Levkovich } 123808247066SAriel Levkovich 123984ba8062SRoi Dayan if (attr->flags & MLX5_ATTR_FLAG_CT) 1240a572c0a7SRoi Dayan flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), &parse_attr->spec, 1241aedd133dSAriel Levkovich attr, &parse_attr->mod_hdr_acts); 1242aedd133dSAriel Levkovich else 124308247066SAriel Levkovich flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec, 124408247066SAriel Levkovich attr); 1245e8f887acSAmir Vadai 1246a2b7189bSzhong jiang return PTR_ERR_OR_ZERO(flow->rule[0]); 1247e8f887acSAmir Vadai } 1248e8f887acSAmir Vadai 124908247066SAriel Levkovich void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv, 1250c7569097SAriel Levkovich struct mlx5_flow_handle *rule, 1251c7569097SAriel Levkovich struct mlx5_flow_attr *attr) 125208247066SAriel Levkovich { 125367d62ee7SRoi Dayan struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv); 1254c7569097SAriel Levkovich 125508247066SAriel Levkovich mlx5_del_flow_rules(rule); 1256c7569097SAriel Levkovich 1257aedd133dSAriel Levkovich if (attr->chain || attr->prio) 1258c7569097SAriel Levkovich mlx5_chains_put_table(nic_chains, attr->chain, attr->prio, 1259c7569097SAriel Levkovich MLX5E_TC_FT_LEVEL); 1260c7569097SAriel Levkovich 1261c7569097SAriel Levkovich if (attr->dest_chain) 1262c7569097SAriel Levkovich mlx5_chains_put_table(nic_chains, attr->dest_chain, 1, 1263c7569097SAriel Levkovich MLX5E_TC_FT_LEVEL); 126408247066SAriel Levkovich } 126508247066SAriel Levkovich 1266d85cdccbSOr Gerlitz static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, 1267d85cdccbSOr Gerlitz struct mlx5e_tc_flow *flow) 1268d85cdccbSOr Gerlitz { 1269c620b772SAriel Levkovich struct mlx5_flow_attr *attr = flow->attr; 12706a064674SAriel Levkovich struct mlx5e_tc_table *tc = &priv->fs.tc; 1271d85cdccbSOr Gerlitz 1272c7569097SAriel Levkovich flow_flag_clear(flow, OFFLOADED); 1273c7569097SAriel Levkovich 127484ba8062SRoi Dayan if (attr->flags & MLX5_ATTR_FLAG_CT) 1275a572c0a7SRoi Dayan mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr); 1276aedd133dSAriel Levkovich else if (!IS_ERR_OR_NULL(flow->rule[0])) 1277aedd133dSAriel Levkovich mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr); 1278aedd133dSAriel Levkovich 1279c7569097SAriel Levkovich /* Remove root table if no rules are left to avoid 1280c7569097SAriel Levkovich * extra steering hops. 1281c7569097SAriel Levkovich */ 1282b6fac0b4SVlad Buslov mutex_lock(&priv->fs.tc.t_lock); 12836a064674SAriel Levkovich if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && 12846a064674SAriel Levkovich !IS_ERR_OR_NULL(tc->t)) { 128567d62ee7SRoi Dayan mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL); 1286d85cdccbSOr Gerlitz priv->fs.tc.t = NULL; 1287d85cdccbSOr Gerlitz } 1288b6fac0b4SVlad Buslov mutex_unlock(&priv->fs.tc.t_lock); 12892f4fe4caSOr Gerlitz 1290513f8f7fSOr Gerlitz if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 12913099eb5aSOr Gerlitz mlx5e_detach_mod_hdr(priv, flow); 12925c65c564SOr Gerlitz 1293972fe492SRoi Dayan if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 1294aedd133dSAriel Levkovich mlx5_fc_destroy(priv->mdev, attr->counter); 1295aedd133dSAriel Levkovich 1296226f2ca3SVlad Buslov if (flow_flag_test(flow, HAIRPIN)) 12975c65c564SOr Gerlitz mlx5e_hairpin_flow_del(priv, flow); 1298c620b772SAriel Levkovich 12998300f225SRoi Dayan free_flow_post_acts(flow); 13008300f225SRoi Dayan 130188d97486SRoi Dayan kvfree(attr->parse_attr); 1302c620b772SAriel Levkovich kfree(flow->attr); 1303d85cdccbSOr Gerlitz } 1304d85cdccbSOr Gerlitz 13050d9f9647SVlad Buslov struct mlx5_flow_handle * 13066d2a3ed0SOr Gerlitz mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, 13076d2a3ed0SOr Gerlitz struct mlx5e_tc_flow *flow, 13086d2a3ed0SOr Gerlitz struct mlx5_flow_spec *spec, 1309c620b772SAriel Levkovich struct mlx5_flow_attr *attr) 13106d2a3ed0SOr Gerlitz { 13116d2a3ed0SOr Gerlitz struct mlx5_flow_handle *rule; 13124c3844d9SPaul Blakey 1313e5d4e1daSRoi Dayan if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) 131489e39467SPaul Blakey return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 131589e39467SPaul Blakey 1316a572c0a7SRoi Dayan rule = mlx5e_tc_rule_offload(flow->priv, spec, attr); 13176d2a3ed0SOr Gerlitz 13186d2a3ed0SOr Gerlitz if (IS_ERR(rule)) 13196d2a3ed0SOr Gerlitz return rule; 13206d2a3ed0SOr Gerlitz 1321c620b772SAriel Levkovich if (attr->esw_attr->split_count) { 13226d2a3ed0SOr Gerlitz flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr); 132384ba8062SRoi Dayan if (IS_ERR(flow->rule[1])) 132484ba8062SRoi Dayan goto err_rule1; 13256d2a3ed0SOr Gerlitz } 13266d2a3ed0SOr Gerlitz 13276d2a3ed0SOr Gerlitz return rule; 132884ba8062SRoi Dayan 132984ba8062SRoi Dayan err_rule1: 1330a572c0a7SRoi Dayan mlx5e_tc_rule_unoffload(flow->priv, rule, attr); 133184ba8062SRoi Dayan return flow->rule[1]; 13326d2a3ed0SOr Gerlitz } 13336d2a3ed0SOr Gerlitz 13340d9f9647SVlad Buslov void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, 13356d2a3ed0SOr Gerlitz struct mlx5e_tc_flow *flow, 1336c620b772SAriel Levkovich struct mlx5_flow_attr *attr) 13376d2a3ed0SOr Gerlitz { 1338226f2ca3SVlad Buslov flow_flag_clear(flow, OFFLOADED); 13396d2a3ed0SOr Gerlitz 1340e5d4e1daSRoi Dayan if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) 134184ba8062SRoi Dayan return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); 134289e39467SPaul Blakey 1343c620b772SAriel Levkovich if (attr->esw_attr->split_count) 13446d2a3ed0SOr Gerlitz mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); 13456d2a3ed0SOr Gerlitz 1346a572c0a7SRoi Dayan mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr); 13476d2a3ed0SOr Gerlitz } 13486d2a3ed0SOr Gerlitz 13490d9f9647SVlad Buslov struct mlx5_flow_handle * 13505dbe906fSPaul Blakey mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, 13515dbe906fSPaul Blakey struct mlx5e_tc_flow *flow, 1352178f69b4SEli Cohen struct mlx5_flow_spec *spec) 13535dbe906fSPaul Blakey { 1354c620b772SAriel Levkovich struct mlx5_flow_attr *slow_attr; 13555dbe906fSPaul Blakey struct mlx5_flow_handle *rule; 13565dbe906fSPaul Blakey 1357c620b772SAriel Levkovich slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); 1358c620b772SAriel Levkovich if (!slow_attr) 1359c620b772SAriel Levkovich return ERR_PTR(-ENOMEM); 13605dbe906fSPaul Blakey 1361c620b772SAriel Levkovich memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ); 1362c620b772SAriel Levkovich slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1363c620b772SAriel Levkovich slow_attr->esw_attr->split_count = 0; 1364e5d4e1daSRoi Dayan slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; 1365c620b772SAriel Levkovich 1366c620b772SAriel Levkovich rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); 13675dbe906fSPaul Blakey if (!IS_ERR(rule)) 1368226f2ca3SVlad Buslov flow_flag_set(flow, SLOW); 13695dbe906fSPaul Blakey 1370c620b772SAriel Levkovich kfree(slow_attr); 1371c620b772SAriel Levkovich 13725dbe906fSPaul Blakey return rule; 13735dbe906fSPaul Blakey } 13745dbe906fSPaul Blakey 13750d9f9647SVlad Buslov void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, 1376178f69b4SEli Cohen struct mlx5e_tc_flow *flow) 13775dbe906fSPaul Blakey { 1378c620b772SAriel Levkovich struct mlx5_flow_attr *slow_attr; 1379178f69b4SEli Cohen 1380c620b772SAriel Levkovich slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); 13815efbe617SAriel Levkovich if (!slow_attr) { 13825efbe617SAriel Levkovich mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n"); 13835efbe617SAriel Levkovich return; 13845efbe617SAriel Levkovich } 1385c620b772SAriel Levkovich 1386c620b772SAriel Levkovich memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ); 1387c620b772SAriel Levkovich slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1388c620b772SAriel Levkovich slow_attr->esw_attr->split_count = 0; 1389e5d4e1daSRoi Dayan slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; 1390c620b772SAriel Levkovich mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); 1391226f2ca3SVlad Buslov flow_flag_clear(flow, SLOW); 1392c620b772SAriel Levkovich kfree(slow_attr); 13935dbe906fSPaul Blakey } 13945dbe906fSPaul Blakey 1395ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this 1396ad86755bSVlad Buslov * function. 1397ad86755bSVlad Buslov */ 1398ad86755bSVlad Buslov static void unready_flow_add(struct mlx5e_tc_flow *flow, 1399ad86755bSVlad Buslov struct list_head *unready_flows) 1400ad86755bSVlad Buslov { 1401ad86755bSVlad Buslov flow_flag_set(flow, NOT_READY); 1402ad86755bSVlad Buslov list_add_tail(&flow->unready, unready_flows); 1403ad86755bSVlad Buslov } 1404ad86755bSVlad Buslov 1405ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this 1406ad86755bSVlad Buslov * function. 1407ad86755bSVlad Buslov */ 1408ad86755bSVlad Buslov static void unready_flow_del(struct mlx5e_tc_flow *flow) 1409ad86755bSVlad Buslov { 1410ad86755bSVlad Buslov list_del(&flow->unready); 1411ad86755bSVlad Buslov flow_flag_clear(flow, NOT_READY); 1412ad86755bSVlad Buslov } 1413ad86755bSVlad Buslov 1414b4a23329SRoi Dayan static void add_unready_flow(struct mlx5e_tc_flow *flow) 1415b4a23329SRoi Dayan { 1416b4a23329SRoi Dayan struct mlx5_rep_uplink_priv *uplink_priv; 1417b4a23329SRoi Dayan struct mlx5e_rep_priv *rpriv; 1418b4a23329SRoi Dayan struct mlx5_eswitch *esw; 1419b4a23329SRoi Dayan 1420b4a23329SRoi Dayan esw = flow->priv->mdev->priv.eswitch; 1421b4a23329SRoi Dayan rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1422b4a23329SRoi Dayan uplink_priv = &rpriv->uplink_priv; 1423b4a23329SRoi Dayan 1424ad86755bSVlad Buslov mutex_lock(&uplink_priv->unready_flows_lock); 1425ad86755bSVlad Buslov unready_flow_add(flow, &uplink_priv->unready_flows); 1426ad86755bSVlad Buslov mutex_unlock(&uplink_priv->unready_flows_lock); 1427b4a23329SRoi Dayan } 1428b4a23329SRoi Dayan 1429b4a23329SRoi Dayan static void remove_unready_flow(struct mlx5e_tc_flow *flow) 1430b4a23329SRoi Dayan { 1431ad86755bSVlad Buslov struct mlx5_rep_uplink_priv *uplink_priv; 1432ad86755bSVlad Buslov struct mlx5e_rep_priv *rpriv; 1433ad86755bSVlad Buslov struct mlx5_eswitch *esw; 1434ad86755bSVlad Buslov 1435ad86755bSVlad Buslov esw = flow->priv->mdev->priv.eswitch; 1436ad86755bSVlad Buslov rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1437ad86755bSVlad Buslov uplink_priv = &rpriv->uplink_priv; 1438ad86755bSVlad Buslov 1439ad86755bSVlad Buslov mutex_lock(&uplink_priv->unready_flows_lock); 1440ad86755bSVlad Buslov unready_flow_del(flow); 1441ad86755bSVlad Buslov mutex_unlock(&uplink_priv->unready_flows_lock); 1442b4a23329SRoi Dayan } 1443b4a23329SRoi Dayan 1444a508728aSVlad Buslov bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev) 144510742efcSVlad Buslov { 144610742efcSVlad Buslov struct mlx5_core_dev *out_mdev, *route_mdev; 144710742efcSVlad Buslov struct mlx5e_priv *out_priv, *route_priv; 144810742efcSVlad Buslov 144910742efcSVlad Buslov out_priv = netdev_priv(out_dev); 145010742efcSVlad Buslov out_mdev = out_priv->mdev; 145110742efcSVlad Buslov route_priv = netdev_priv(route_dev); 145210742efcSVlad Buslov route_mdev = route_priv->mdev; 145310742efcSVlad Buslov 145410742efcSVlad Buslov if (out_mdev->coredev_type != MLX5_COREDEV_PF || 145510742efcSVlad Buslov route_mdev->coredev_type != MLX5_COREDEV_VF) 145610742efcSVlad Buslov return false; 145710742efcSVlad Buslov 1458ab3f3d5eSRoi Dayan return mlx5e_same_hw_devs(out_priv, route_priv); 145910742efcSVlad Buslov } 146010742efcSVlad Buslov 1461a508728aSVlad Buslov int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) 146210742efcSVlad Buslov { 146310742efcSVlad Buslov struct mlx5e_priv *out_priv, *route_priv; 1464f9d196bdSDmytro Linkin struct mlx5_devcom *devcom = NULL; 146510742efcSVlad Buslov struct mlx5_core_dev *route_mdev; 146610742efcSVlad Buslov struct mlx5_eswitch *esw; 146710742efcSVlad Buslov u16 vhca_id; 146810742efcSVlad Buslov int err; 146910742efcSVlad Buslov 147010742efcSVlad Buslov out_priv = netdev_priv(out_dev); 147110742efcSVlad Buslov esw = out_priv->mdev->priv.eswitch; 147210742efcSVlad Buslov route_priv = netdev_priv(route_dev); 147310742efcSVlad Buslov route_mdev = route_priv->mdev; 147410742efcSVlad Buslov 147510742efcSVlad Buslov vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id); 1476f9d196bdSDmytro Linkin if (mlx5_lag_is_active(out_priv->mdev)) { 1477f9d196bdSDmytro Linkin /* In lag case we may get devices from different eswitch instances. 1478f9d196bdSDmytro Linkin * If we failed to get vport num, it means, mostly, that we on the wrong 1479f9d196bdSDmytro Linkin * eswitch. 1480f9d196bdSDmytro Linkin */ 148110742efcSVlad Buslov err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); 1482f9d196bdSDmytro Linkin if (err != -ENOENT) 1483f9d196bdSDmytro Linkin return err; 1484f9d196bdSDmytro Linkin 1485f9d196bdSDmytro Linkin devcom = out_priv->mdev->priv.devcom; 1486f9d196bdSDmytro Linkin esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 1487f9d196bdSDmytro Linkin if (!esw) 1488f9d196bdSDmytro Linkin return -ENODEV; 1489f9d196bdSDmytro Linkin } 1490f9d196bdSDmytro Linkin 1491f9d196bdSDmytro Linkin err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); 1492f9d196bdSDmytro Linkin if (devcom) 1493f9d196bdSDmytro Linkin mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 149410742efcSVlad Buslov return err; 149510742efcSVlad Buslov } 149610742efcSVlad Buslov 1497c7b9038dSVlad Buslov int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, 1498ff993167SRoi Dayan struct mlx5e_tc_flow *flow, 1499ff993167SRoi Dayan struct mlx5_flow_attr *attr) 1500c7b9038dSVlad Buslov { 1501ff993167SRoi Dayan struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; 1502c7b9038dSVlad Buslov struct mlx5_modify_hdr *mod_hdr; 1503c7b9038dSVlad Buslov 1504c7b9038dSVlad Buslov mod_hdr = mlx5_modify_header_alloc(priv->mdev, 1505e36db1eeSRoi Dayan mlx5e_get_flow_namespace(flow), 1506c7b9038dSVlad Buslov mod_hdr_acts->num_actions, 1507c7b9038dSVlad Buslov mod_hdr_acts->actions); 1508c7b9038dSVlad Buslov if (IS_ERR(mod_hdr)) 1509c7b9038dSVlad Buslov return PTR_ERR(mod_hdr); 1510c7b9038dSVlad Buslov 1511ff993167SRoi Dayan WARN_ON(attr->modify_hdr); 1512ff993167SRoi Dayan attr->modify_hdr = mod_hdr; 1513c7b9038dSVlad Buslov 1514c7b9038dSVlad Buslov return 0; 1515c7b9038dSVlad Buslov } 1516c7b9038dSVlad Buslov 1517c83954abSRabie Loulou static int 151839542e23SRoi Dayan set_encap_dests(struct mlx5e_priv *priv, 151939542e23SRoi Dayan struct mlx5e_tc_flow *flow, 1520c118ebc9SRoi Dayan struct mlx5_flow_attr *attr, 152139542e23SRoi Dayan struct netlink_ext_ack *extack, 152239542e23SRoi Dayan bool *encap_valid, 152339542e23SRoi Dayan bool *vf_tun) 152439542e23SRoi Dayan { 152539542e23SRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr; 152639542e23SRoi Dayan struct mlx5_esw_flow_attr *esw_attr; 152739542e23SRoi Dayan struct net_device *encap_dev = NULL; 152839542e23SRoi Dayan struct mlx5e_rep_priv *rpriv; 152939542e23SRoi Dayan struct mlx5e_priv *out_priv; 153039542e23SRoi Dayan int out_index; 153139542e23SRoi Dayan int err = 0; 153239542e23SRoi Dayan 15338300f225SRoi Dayan if (!mlx5e_is_eswitch_flow(flow)) 15348300f225SRoi Dayan return 0; 15358300f225SRoi Dayan 153639542e23SRoi Dayan parse_attr = attr->parse_attr; 153739542e23SRoi Dayan esw_attr = attr->esw_attr; 153839542e23SRoi Dayan *vf_tun = false; 153939542e23SRoi Dayan *encap_valid = true; 154039542e23SRoi Dayan 154139542e23SRoi Dayan for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { 154239542e23SRoi Dayan struct net_device *out_dev; 154339542e23SRoi Dayan int mirred_ifindex; 154439542e23SRoi Dayan 154539542e23SRoi Dayan if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) 154639542e23SRoi Dayan continue; 154739542e23SRoi Dayan 154839542e23SRoi Dayan mirred_ifindex = parse_attr->mirred_ifindex[out_index]; 154939542e23SRoi Dayan out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex); 155039542e23SRoi Dayan if (!out_dev) { 155139542e23SRoi Dayan NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found"); 155239542e23SRoi Dayan err = -ENODEV; 155339542e23SRoi Dayan goto out; 155439542e23SRoi Dayan } 1555c118ebc9SRoi Dayan err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index, 155639542e23SRoi Dayan extack, &encap_dev, encap_valid); 155739542e23SRoi Dayan dev_put(out_dev); 155839542e23SRoi Dayan if (err) 155939542e23SRoi Dayan goto out; 156039542e23SRoi Dayan 156139542e23SRoi Dayan if (esw_attr->dests[out_index].flags & 156239542e23SRoi Dayan MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE && 156339542e23SRoi Dayan !esw_attr->dest_int_port) 156439542e23SRoi Dayan *vf_tun = true; 156539542e23SRoi Dayan 156639542e23SRoi Dayan out_priv = netdev_priv(encap_dev); 156739542e23SRoi Dayan rpriv = out_priv->ppriv; 156839542e23SRoi Dayan esw_attr->dests[out_index].rep = rpriv->rep; 156939542e23SRoi Dayan esw_attr->dests[out_index].mdev = out_priv->mdev; 157039542e23SRoi Dayan } 157139542e23SRoi Dayan 157239542e23SRoi Dayan if (*vf_tun && esw_attr->out_count > 1) { 157339542e23SRoi Dayan NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported"); 157439542e23SRoi Dayan err = -EOPNOTSUPP; 157539542e23SRoi Dayan goto out; 157639542e23SRoi Dayan } 157739542e23SRoi Dayan 157839542e23SRoi Dayan out: 157939542e23SRoi Dayan return err; 158039542e23SRoi Dayan } 158139542e23SRoi Dayan 158239542e23SRoi Dayan static void 158339542e23SRoi Dayan clean_encap_dests(struct mlx5e_priv *priv, 158439542e23SRoi Dayan struct mlx5e_tc_flow *flow, 1585c118ebc9SRoi Dayan struct mlx5_flow_attr *attr, 158639542e23SRoi Dayan bool *vf_tun) 158739542e23SRoi Dayan { 158839542e23SRoi Dayan struct mlx5_esw_flow_attr *esw_attr; 158939542e23SRoi Dayan int out_index; 159039542e23SRoi Dayan 15918300f225SRoi Dayan if (!mlx5e_is_eswitch_flow(flow)) 15928300f225SRoi Dayan return; 15938300f225SRoi Dayan 159439542e23SRoi Dayan esw_attr = attr->esw_attr; 159539542e23SRoi Dayan *vf_tun = false; 159639542e23SRoi Dayan 159739542e23SRoi Dayan for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { 159839542e23SRoi Dayan if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) 159939542e23SRoi Dayan continue; 160039542e23SRoi Dayan 160139542e23SRoi Dayan if (esw_attr->dests[out_index].flags & 160239542e23SRoi Dayan MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE && 160339542e23SRoi Dayan !esw_attr->dest_int_port) 160439542e23SRoi Dayan *vf_tun = true; 160539542e23SRoi Dayan 1606c118ebc9SRoi Dayan mlx5e_detach_encap(priv, flow, attr, out_index); 160739542e23SRoi Dayan kfree(attr->parse_attr->tun_info[out_index]); 160839542e23SRoi Dayan } 160939542e23SRoi Dayan } 161039542e23SRoi Dayan 161139542e23SRoi Dayan static int 161274491de9SMark Bloch mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 1613e98bedf5SEli Britstein struct mlx5e_tc_flow *flow, 1614e98bedf5SEli Britstein struct netlink_ext_ack *extack) 1615adb4c123SOr Gerlitz { 1616adb4c123SOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1617c620b772SAriel Levkovich struct mlx5e_tc_flow_parse_attr *parse_attr; 1618c620b772SAriel Levkovich struct mlx5_flow_attr *attr = flow->attr; 1619c620b772SAriel Levkovich struct mlx5_esw_flow_attr *esw_attr; 162039542e23SRoi Dayan bool vf_tun, encap_valid; 162139ac237cSPaul Blakey u32 max_prio, max_chain; 16220ad060eeSRoi Dayan int err = 0; 16238b32580dSOr Gerlitz 1624b16eb3c8SAriel Levkovich parse_attr = attr->parse_attr; 1625b16eb3c8SAriel Levkovich esw_attr = attr->esw_attr; 1626b16eb3c8SAriel Levkovich 162784179981SPaul Blakey /* We check chain range only for tc flows. 162884179981SPaul Blakey * For ft flows, we checked attr->chain was originally 0 and set it to 162984179981SPaul Blakey * FDB_FT_CHAIN which is outside tc range. 163084179981SPaul Blakey * See mlx5e_rep_setup_ft_cb(). 163184179981SPaul Blakey */ 1632ae430332SAriel Levkovich max_chain = mlx5_chains_get_chain_range(esw_chains(esw)); 163384179981SPaul Blakey if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) { 163461644c3dSRoi Dayan NL_SET_ERR_MSG_MOD(extack, 163561644c3dSRoi Dayan "Requested chain is out of supported range"); 16368914add2SVlad Buslov err = -EOPNOTSUPP; 16378914add2SVlad Buslov goto err_out; 1638bf07aa73SPaul Blakey } 1639bf07aa73SPaul Blakey 1640ae430332SAriel Levkovich max_prio = mlx5_chains_get_prio_range(esw_chains(esw)); 1641bf07aa73SPaul Blakey if (attr->prio > max_prio) { 164261644c3dSRoi Dayan NL_SET_ERR_MSG_MOD(extack, 164361644c3dSRoi Dayan "Requested priority is out of supported range"); 16448914add2SVlad Buslov err = -EOPNOTSUPP; 16458914add2SVlad Buslov goto err_out; 1646bf07aa73SPaul Blakey } 1647bf07aa73SPaul Blakey 1648777bb800SVlad Buslov if (flow_flag_test(flow, TUN_RX)) { 1649777bb800SVlad Buslov err = mlx5e_attach_decap_route(priv, flow); 1650777bb800SVlad Buslov if (err) 16518914add2SVlad Buslov goto err_out; 1652b16eb3c8SAriel Levkovich 16535b209d1aSRoi Dayan if (!attr->chain && esw_attr->int_port && 16545b209d1aSRoi Dayan attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 1655b16eb3c8SAriel Levkovich /* If decap route device is internal port, change the 1656b16eb3c8SAriel Levkovich * source vport value in reg_c0 back to uplink just in 1657b16eb3c8SAriel Levkovich * case the rule performs goto chain > 0. If we have a miss 1658b16eb3c8SAriel Levkovich * on chain > 0 we want the metadata regs to hold the 1659b16eb3c8SAriel Levkovich * chain id so SW will resume handling of this packet 1660b16eb3c8SAriel Levkovich * from the proper chain. 1661b16eb3c8SAriel Levkovich */ 1662b16eb3c8SAriel Levkovich u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw, 1663b16eb3c8SAriel Levkovich esw_attr->in_rep->vport); 1664b16eb3c8SAriel Levkovich 1665b16eb3c8SAriel Levkovich err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts, 1666b16eb3c8SAriel Levkovich MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG, 1667b16eb3c8SAriel Levkovich metadata); 1668b16eb3c8SAriel Levkovich if (err) 166931108d14SChristophe JAILLET goto err_out; 1670077cdda7SRoi Dayan 1671077cdda7SRoi Dayan attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1672b16eb3c8SAriel Levkovich } 1673777bb800SVlad Buslov } 1674777bb800SVlad Buslov 167514e6b038SEli Cohen if (flow_flag_test(flow, L3_TO_L2_DECAP)) { 167614e6b038SEli Cohen err = mlx5e_attach_decap(priv, flow, extack); 167714e6b038SEli Cohen if (err) 16788914add2SVlad Buslov goto err_out; 167914e6b038SEli Cohen } 168014e6b038SEli Cohen 1681166f431eSAriel Levkovich if (netif_is_ovs_master(parse_attr->filter_dev)) { 1682166f431eSAriel Levkovich struct mlx5e_tc_int_port *int_port; 1683166f431eSAriel Levkovich 1684166f431eSAriel Levkovich if (attr->chain) { 1685166f431eSAriel Levkovich NL_SET_ERR_MSG_MOD(extack, 1686166f431eSAriel Levkovich "Internal port rule is only supported on chain 0"); 168731108d14SChristophe JAILLET err = -EOPNOTSUPP; 168831108d14SChristophe JAILLET goto err_out; 1689166f431eSAriel Levkovich } 1690166f431eSAriel Levkovich 1691166f431eSAriel Levkovich if (attr->dest_chain) { 1692166f431eSAriel Levkovich NL_SET_ERR_MSG_MOD(extack, 1693166f431eSAriel Levkovich "Internal port rule offload doesn't support goto action"); 169431108d14SChristophe JAILLET err = -EOPNOTSUPP; 169531108d14SChristophe JAILLET goto err_out; 1696166f431eSAriel Levkovich } 1697166f431eSAriel Levkovich 1698166f431eSAriel Levkovich int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), 1699166f431eSAriel Levkovich parse_attr->filter_dev->ifindex, 1700166f431eSAriel Levkovich flow_flag_test(flow, EGRESS) ? 1701166f431eSAriel Levkovich MLX5E_TC_INT_PORT_EGRESS : 1702166f431eSAriel Levkovich MLX5E_TC_INT_PORT_INGRESS); 170331108d14SChristophe JAILLET if (IS_ERR(int_port)) { 170431108d14SChristophe JAILLET err = PTR_ERR(int_port); 170531108d14SChristophe JAILLET goto err_out; 170631108d14SChristophe JAILLET } 1707166f431eSAriel Levkovich 1708166f431eSAriel Levkovich esw_attr->int_port = int_port; 1709166f431eSAriel Levkovich } 1710166f431eSAriel Levkovich 1711c118ebc9SRoi Dayan err = set_encap_dests(priv, flow, attr, extack, &encap_valid, &vf_tun); 17120ad060eeSRoi Dayan if (err) 17138914add2SVlad Buslov goto err_out; 17140ad060eeSRoi Dayan 17158b32580dSOr Gerlitz err = mlx5_eswitch_add_vlan_action(esw, attr); 1716c83954abSRabie Loulou if (err) 17178914add2SVlad Buslov goto err_out; 1718adb4c123SOr Gerlitz 1719efe6f961SRoi Dayan if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 17208914add2SVlad Buslov if (vf_tun) { 1721ff993167SRoi Dayan err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr); 17228914add2SVlad Buslov if (err) 17238914add2SVlad Buslov goto err_out; 17248914add2SVlad Buslov } else { 17251a9527bbSOr Gerlitz err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); 1726c83954abSRabie Loulou if (err) 17278914add2SVlad Buslov goto err_out; 17288914add2SVlad Buslov } 1729d7e75a32SOr Gerlitz } 1730d7e75a32SOr Gerlitz 1731b8aee822SMark Bloch if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1732df67ad62SRoi Dayan err = alloc_flow_attr_counter(esw_attr->counter_dev, attr); 1733df67ad62SRoi Dayan if (err) 17348914add2SVlad Buslov goto err_out; 17358914add2SVlad Buslov } 1736b8aee822SMark Bloch 17370ad060eeSRoi Dayan /* we get here if one of the following takes place: 17380ad060eeSRoi Dayan * (1) there's no error 17390ad060eeSRoi Dayan * (2) there's an encap action and we don't have valid neigh 17403c37745eSOr Gerlitz */ 17418300f225SRoi Dayan if (!encap_valid || flow_flag_test(flow, SLOW)) 1742178f69b4SEli Cohen flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec); 1743bc1d75faSRoi Dayan else 17446d2a3ed0SOr Gerlitz flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); 17455dbe906fSPaul Blakey 17468914add2SVlad Buslov if (IS_ERR(flow->rule[0])) { 17478914add2SVlad Buslov err = PTR_ERR(flow->rule[0]); 17488914add2SVlad Buslov goto err_out; 17498914add2SVlad Buslov } 1750226f2ca3SVlad Buslov flow_flag_set(flow, OFFLOADED); 1751c83954abSRabie Loulou 17525dbe906fSPaul Blakey return 0; 17538914add2SVlad Buslov 17548914add2SVlad Buslov err_out: 17558914add2SVlad Buslov flow_flag_set(flow, FAILED); 17568914add2SVlad Buslov return err; 1757aa0cbbaeSOr Gerlitz } 1758d85cdccbSOr Gerlitz 17599272e3dfSYevgeny Kliteynik static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow) 17609272e3dfSYevgeny Kliteynik { 1761c620b772SAriel Levkovich struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec; 17629272e3dfSYevgeny Kliteynik void *headers_v = MLX5_ADDR_OF(fte_match_param, 17639272e3dfSYevgeny Kliteynik spec->match_value, 17649272e3dfSYevgeny Kliteynik misc_parameters_3); 17659272e3dfSYevgeny Kliteynik u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3, 17669272e3dfSYevgeny Kliteynik headers_v, 17679272e3dfSYevgeny Kliteynik geneve_tlv_option_0_data); 17689272e3dfSYevgeny Kliteynik 17699272e3dfSYevgeny Kliteynik return !!geneve_tlv_opt_0_data; 17709272e3dfSYevgeny Kliteynik } 17719272e3dfSYevgeny Kliteynik 1772d85cdccbSOr Gerlitz static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, 1773d85cdccbSOr Gerlitz struct mlx5e_tc_flow *flow) 1774d85cdccbSOr Gerlitz { 1775d85cdccbSOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1776c620b772SAriel Levkovich struct mlx5_flow_attr *attr = flow->attr; 1777777bb800SVlad Buslov struct mlx5_esw_flow_attr *esw_attr; 177839542e23SRoi Dayan bool vf_tun; 1779d85cdccbSOr Gerlitz 1780777bb800SVlad Buslov esw_attr = attr->esw_attr; 17810a7fcb78SPaul Blakey mlx5e_put_flow_tunnel_id(flow); 17820a7fcb78SPaul Blakey 178312a240a4SJianbo Liu if (flow_flag_test(flow, NOT_READY)) 1784b4a23329SRoi Dayan remove_unready_flow(flow); 1785ef06c9eeSRoi Dayan 1786226f2ca3SVlad Buslov if (mlx5e_is_offloaded_flow(flow)) { 1787226f2ca3SVlad Buslov if (flow_flag_test(flow, SLOW)) 1788178f69b4SEli Cohen mlx5e_tc_unoffload_from_slow_path(esw, flow); 17895dbe906fSPaul Blakey else 17905dbe906fSPaul Blakey mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); 17915dbe906fSPaul Blakey } 1792362980eaSVlad Buslov complete_all(&flow->del_hw_done); 1793d85cdccbSOr Gerlitz 17949272e3dfSYevgeny Kliteynik if (mlx5_flow_has_geneve_opt(flow)) 17959272e3dfSYevgeny Kliteynik mlx5_geneve_tlv_option_del(priv->mdev->geneve); 17969272e3dfSYevgeny Kliteynik 1797513f8f7fSOr Gerlitz mlx5_eswitch_del_vlan_action(esw, attr); 1798d85cdccbSOr Gerlitz 1799777bb800SVlad Buslov if (flow->decap_route) 1800777bb800SVlad Buslov mlx5e_detach_decap_route(priv, flow); 1801777bb800SVlad Buslov 1802c118ebc9SRoi Dayan clean_encap_dests(priv, flow, attr, &vf_tun); 1803d7e75a32SOr Gerlitz 1804aedd133dSAriel Levkovich mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr); 18054c8594adSRoi Dayan 1806c7b9038dSVlad Buslov if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 18072c0e5cf5SPaul Blakey mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); 18088914add2SVlad Buslov if (vf_tun && attr->modify_hdr) 18098914add2SVlad Buslov mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr); 18108914add2SVlad Buslov else 18111a9527bbSOr Gerlitz mlx5e_detach_mod_hdr(priv, flow); 1812c7b9038dSVlad Buslov } 1813b8aee822SMark Bloch 1814b8aee822SMark Bloch if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 1815777bb800SVlad Buslov mlx5_fc_destroy(esw_attr->counter_dev, attr->counter); 181614e6b038SEli Cohen 1817166f431eSAriel Levkovich if (esw_attr->int_port) 1818166f431eSAriel Levkovich mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port); 1819166f431eSAriel Levkovich 182027484f71SAriel Levkovich if (esw_attr->dest_int_port) 182127484f71SAriel Levkovich mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port); 182227484f71SAriel Levkovich 182314e6b038SEli Cohen if (flow_flag_test(flow, L3_TO_L2_DECAP)) 182414e6b038SEli Cohen mlx5e_detach_decap(priv, flow); 1825c620b772SAriel Levkovich 18268300f225SRoi Dayan free_flow_post_acts(flow); 18278300f225SRoi Dayan 182894db3317SEli Cohen if (flow->attr->lag.count) 182994db3317SEli Cohen mlx5_lag_del_mpesw_rule(esw->dev); 183094db3317SEli Cohen 183188d97486SRoi Dayan kvfree(attr->esw_attr->rx_tun_attr); 183288d97486SRoi Dayan kvfree(attr->parse_attr); 1833c620b772SAriel Levkovich kfree(flow->attr); 1834d85cdccbSOr Gerlitz } 1835d85cdccbSOr Gerlitz 18360d9f9647SVlad Buslov struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) 1837b8aee822SMark Bloch { 18388300f225SRoi Dayan struct mlx5_flow_attr *attr; 18398300f225SRoi Dayan 18408300f225SRoi Dayan attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list); 18418300f225SRoi Dayan return attr->counter; 1842b8aee822SMark Bloch } 1843b8aee822SMark Bloch 18446a06c2f7SVlad Buslov /* Iterate over tmp_list of flows attached to flow_list head. */ 1845021905f8SVlad Buslov void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list) 18466a06c2f7SVlad Buslov { 18476a06c2f7SVlad Buslov struct mlx5e_tc_flow *flow, *tmp; 18486a06c2f7SVlad Buslov 18496a06c2f7SVlad Buslov list_for_each_entry_safe(flow, tmp, flow_list, tmp_list) 18506a06c2f7SVlad Buslov mlx5e_flow_put(priv, flow); 18516a06c2f7SVlad Buslov } 18526a06c2f7SVlad Buslov 185304de7ddaSRoi Dayan static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) 185404de7ddaSRoi Dayan { 185504de7ddaSRoi Dayan struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; 185604de7ddaSRoi Dayan 1857226f2ca3SVlad Buslov if (!flow_flag_test(flow, ESWITCH) || 1858226f2ca3SVlad Buslov !flow_flag_test(flow, DUP)) 185904de7ddaSRoi Dayan return; 186004de7ddaSRoi Dayan 186104de7ddaSRoi Dayan mutex_lock(&esw->offloads.peer_mutex); 186204de7ddaSRoi Dayan list_del(&flow->peer); 186304de7ddaSRoi Dayan mutex_unlock(&esw->offloads.peer_mutex); 186404de7ddaSRoi Dayan 1865226f2ca3SVlad Buslov flow_flag_clear(flow, DUP); 186604de7ddaSRoi Dayan 1867eb252c3aSRoi Dayan if (refcount_dec_and_test(&flow->peer_flow->refcnt)) { 186804de7ddaSRoi Dayan mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); 1869a23dae79SRoi Dayan kfree(flow->peer_flow); 1870eb252c3aSRoi Dayan } 1871eb252c3aSRoi Dayan 187204de7ddaSRoi Dayan flow->peer_flow = NULL; 187304de7ddaSRoi Dayan } 187404de7ddaSRoi Dayan 187504de7ddaSRoi Dayan static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) 187604de7ddaSRoi Dayan { 187704de7ddaSRoi Dayan struct mlx5_core_dev *dev = flow->priv->mdev; 187804de7ddaSRoi Dayan struct mlx5_devcom *devcom = dev->priv.devcom; 187904de7ddaSRoi Dayan struct mlx5_eswitch *peer_esw; 188004de7ddaSRoi Dayan 188104de7ddaSRoi Dayan peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 188204de7ddaSRoi Dayan if (!peer_esw) 188304de7ddaSRoi Dayan return; 188404de7ddaSRoi Dayan 188504de7ddaSRoi Dayan __mlx5e_tc_del_fdb_peer_flow(flow); 188604de7ddaSRoi Dayan mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 188704de7ddaSRoi Dayan } 188804de7ddaSRoi Dayan 1889e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 1890961e8979SRoi Dayan struct mlx5e_tc_flow *flow) 1891e8f887acSAmir Vadai { 1892226f2ca3SVlad Buslov if (mlx5e_is_eswitch_flow(flow)) { 189304de7ddaSRoi Dayan mlx5e_tc_del_fdb_peer_flow(flow); 1894d85cdccbSOr Gerlitz mlx5e_tc_del_fdb_flow(priv, flow); 189504de7ddaSRoi Dayan } else { 1896d85cdccbSOr Gerlitz mlx5e_tc_del_nic_flow(priv, flow); 1897e8f887acSAmir Vadai } 189804de7ddaSRoi Dayan } 1899e8f887acSAmir Vadai 1900ee950e5dSChris Mi static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f) 1901bbd00f7eSHadar Hen Zion { 1902f9e30088SPablo Neira Ayuso struct flow_rule *rule = flow_cls_offload_flow_rule(f); 19030a7fcb78SPaul Blakey struct flow_action *flow_action = &rule->action; 19040a7fcb78SPaul Blakey const struct flow_action_entry *act; 19050a7fcb78SPaul Blakey int i; 1906bbd00f7eSHadar Hen Zion 1907ee950e5dSChris Mi if (chain) 1908ee950e5dSChris Mi return false; 1909ee950e5dSChris Mi 19100a7fcb78SPaul Blakey flow_action_for_each(i, act, flow_action) { 19110a7fcb78SPaul Blakey switch (act->id) { 19120a7fcb78SPaul Blakey case FLOW_ACTION_GOTO: 19130a7fcb78SPaul Blakey return true; 1914ee950e5dSChris Mi case FLOW_ACTION_SAMPLE: 1915ee950e5dSChris Mi return true; 19160a7fcb78SPaul Blakey default: 19170a7fcb78SPaul Blakey continue; 1918fe1587a7SDmytro Linkin } 19192e72eb43SOr Gerlitz } 1920bbd00f7eSHadar Hen Zion 19210a7fcb78SPaul Blakey return false; 19220a7fcb78SPaul Blakey } 1923bcef735cSOr Gerlitz 19240a7fcb78SPaul Blakey static int 19250a7fcb78SPaul Blakey enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv, 19260a7fcb78SPaul Blakey struct flow_dissector_key_enc_opts *opts, 19270a7fcb78SPaul Blakey struct netlink_ext_ack *extack, 19280a7fcb78SPaul Blakey bool *dont_care) 19290a7fcb78SPaul Blakey { 19300a7fcb78SPaul Blakey struct geneve_opt *opt; 19310a7fcb78SPaul Blakey int off = 0; 1932bcef735cSOr Gerlitz 19330a7fcb78SPaul Blakey *dont_care = true; 1934bcef735cSOr Gerlitz 19350a7fcb78SPaul Blakey while (opts->len > off) { 19360a7fcb78SPaul Blakey opt = (struct geneve_opt *)&opts->data[off]; 1937e98bedf5SEli Britstein 19380a7fcb78SPaul Blakey if (!(*dont_care) || opt->opt_class || opt->type || 19390a7fcb78SPaul Blakey memchr_inv(opt->opt_data, 0, opt->length * 4)) { 19400a7fcb78SPaul Blakey *dont_care = false; 19410a7fcb78SPaul Blakey 1942c51323eeSSaeed Mahameed if (opt->opt_class != htons(U16_MAX) || 1943d7a42ad0SRoi Dayan opt->type != U8_MAX) { 1944c50775d0SRoi Dayan NL_SET_ERR_MSG_MOD(extack, 19450a7fcb78SPaul Blakey "Partial match of tunnel options in chain > 0 isn't supported"); 19460a7fcb78SPaul Blakey netdev_warn(priv->netdev, 19470a7fcb78SPaul Blakey "Partial match of tunnel options in chain > 0 isn't supported"); 1948e98bedf5SEli Britstein return -EOPNOTSUPP; 1949e98bedf5SEli Britstein } 1950bcef735cSOr Gerlitz } 1951bcef735cSOr Gerlitz 19520a7fcb78SPaul Blakey off += sizeof(struct geneve_opt) + opt->length * 4; 1953bbd00f7eSHadar Hen Zion } 1954bbd00f7eSHadar Hen Zion 1955bbd00f7eSHadar Hen Zion return 0; 1956bbd00f7eSHadar Hen Zion } 1957bbd00f7eSHadar Hen Zion 19580a7fcb78SPaul Blakey #define COPY_DISSECTOR(rule, diss_key, dst)\ 19590a7fcb78SPaul Blakey ({ \ 19600a7fcb78SPaul Blakey struct flow_rule *__rule = (rule);\ 19610a7fcb78SPaul Blakey typeof(dst) __dst = dst;\ 19620a7fcb78SPaul Blakey \ 19630a7fcb78SPaul Blakey memcpy(__dst,\ 19640a7fcb78SPaul Blakey skb_flow_dissector_target(__rule->match.dissector,\ 19650a7fcb78SPaul Blakey diss_key,\ 19660a7fcb78SPaul Blakey __rule->match.key),\ 19670a7fcb78SPaul Blakey sizeof(*__dst));\ 19680a7fcb78SPaul Blakey }) 19690a7fcb78SPaul Blakey 19700a7fcb78SPaul Blakey static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv, 19710a7fcb78SPaul Blakey struct mlx5e_tc_flow *flow, 19720a7fcb78SPaul Blakey struct flow_cls_offload *f, 19730a7fcb78SPaul Blakey struct net_device *filter_dev) 19748377629eSEli Britstein { 19750a7fcb78SPaul Blakey struct flow_rule *rule = flow_cls_offload_flow_rule(f); 19760a7fcb78SPaul Blakey struct netlink_ext_ack *extack = f->common.extack; 19770a7fcb78SPaul Blakey struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts; 19780a7fcb78SPaul Blakey struct flow_match_enc_opts enc_opts_match; 1979d7a42ad0SRoi Dayan struct tunnel_match_enc_opts tun_enc_opts; 19800a7fcb78SPaul Blakey struct mlx5_rep_uplink_priv *uplink_priv; 1981c620b772SAriel Levkovich struct mlx5_flow_attr *attr = flow->attr; 19820a7fcb78SPaul Blakey struct mlx5e_rep_priv *uplink_rpriv; 19830a7fcb78SPaul Blakey struct tunnel_match_key tunnel_key; 19840a7fcb78SPaul Blakey bool enc_opts_is_dont_care = true; 19850a7fcb78SPaul Blakey u32 tun_id, enc_opts_id = 0; 19860a7fcb78SPaul Blakey struct mlx5_eswitch *esw; 19870a7fcb78SPaul Blakey u32 value, mask; 19880a7fcb78SPaul Blakey int err; 19890a7fcb78SPaul Blakey 19900a7fcb78SPaul Blakey esw = priv->mdev->priv.eswitch; 19910a7fcb78SPaul Blakey uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 19920a7fcb78SPaul Blakey uplink_priv = &uplink_rpriv->uplink_priv; 19930a7fcb78SPaul Blakey 19940a7fcb78SPaul Blakey memset(&tunnel_key, 0, sizeof(tunnel_key)); 19950a7fcb78SPaul Blakey COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, 19960a7fcb78SPaul Blakey &tunnel_key.enc_control); 19970a7fcb78SPaul Blakey if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 19980a7fcb78SPaul Blakey COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 19990a7fcb78SPaul Blakey &tunnel_key.enc_ipv4); 20000a7fcb78SPaul Blakey else 20010a7fcb78SPaul Blakey COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, 20020a7fcb78SPaul Blakey &tunnel_key.enc_ipv6); 20030a7fcb78SPaul Blakey COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip); 20040a7fcb78SPaul Blakey COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, 20050a7fcb78SPaul Blakey &tunnel_key.enc_tp); 20060a7fcb78SPaul Blakey COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, 20070a7fcb78SPaul Blakey &tunnel_key.enc_key_id); 20080a7fcb78SPaul Blakey tunnel_key.filter_ifindex = filter_dev->ifindex; 20090a7fcb78SPaul Blakey 20100a7fcb78SPaul Blakey err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id); 20110a7fcb78SPaul Blakey if (err) 20120a7fcb78SPaul Blakey return err; 20130a7fcb78SPaul Blakey 20140a7fcb78SPaul Blakey flow_rule_match_enc_opts(rule, &enc_opts_match); 20150a7fcb78SPaul Blakey err = enc_opts_is_dont_care_or_full_match(priv, 20160a7fcb78SPaul Blakey enc_opts_match.mask, 20170a7fcb78SPaul Blakey extack, 20180a7fcb78SPaul Blakey &enc_opts_is_dont_care); 20190a7fcb78SPaul Blakey if (err) 20200a7fcb78SPaul Blakey goto err_enc_opts; 20210a7fcb78SPaul Blakey 20220a7fcb78SPaul Blakey if (!enc_opts_is_dont_care) { 2023d7a42ad0SRoi Dayan memset(&tun_enc_opts, 0, sizeof(tun_enc_opts)); 2024d7a42ad0SRoi Dayan memcpy(&tun_enc_opts.key, enc_opts_match.key, 2025d7a42ad0SRoi Dayan sizeof(*enc_opts_match.key)); 2026d7a42ad0SRoi Dayan memcpy(&tun_enc_opts.mask, enc_opts_match.mask, 2027d7a42ad0SRoi Dayan sizeof(*enc_opts_match.mask)); 2028d7a42ad0SRoi Dayan 20290a7fcb78SPaul Blakey err = mapping_add(uplink_priv->tunnel_enc_opts_mapping, 2030d7a42ad0SRoi Dayan &tun_enc_opts, &enc_opts_id); 20310a7fcb78SPaul Blakey if (err) 20320a7fcb78SPaul Blakey goto err_enc_opts; 20330a7fcb78SPaul Blakey } 20340a7fcb78SPaul Blakey 20350a7fcb78SPaul Blakey value = tun_id << ENC_OPTS_BITS | enc_opts_id; 20360a7fcb78SPaul Blakey mask = enc_opts_id ? TUNNEL_ID_MASK : 20370a7fcb78SPaul Blakey (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK); 20380a7fcb78SPaul Blakey 20390a7fcb78SPaul Blakey if (attr->chain) { 20400a7fcb78SPaul Blakey mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec, 20410a7fcb78SPaul Blakey TUNNEL_TO_REG, value, mask); 20420a7fcb78SPaul Blakey } else { 20430a7fcb78SPaul Blakey mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; 20440a7fcb78SPaul Blakey err = mlx5e_tc_match_to_reg_set(priv->mdev, 2045aedd133dSAriel Levkovich mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB, 20460a7fcb78SPaul Blakey TUNNEL_TO_REG, value); 20470a7fcb78SPaul Blakey if (err) 20480a7fcb78SPaul Blakey goto err_set; 20490a7fcb78SPaul Blakey 20500a7fcb78SPaul Blakey attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 20510a7fcb78SPaul Blakey } 20520a7fcb78SPaul Blakey 205373a3f1bcSRoi Dayan flow->attr->tunnel_id = value; 20540a7fcb78SPaul Blakey return 0; 20550a7fcb78SPaul Blakey 20560a7fcb78SPaul Blakey err_set: 20570a7fcb78SPaul Blakey if (enc_opts_id) 20580a7fcb78SPaul Blakey mapping_remove(uplink_priv->tunnel_enc_opts_mapping, 20590a7fcb78SPaul Blakey enc_opts_id); 20600a7fcb78SPaul Blakey err_enc_opts: 20610a7fcb78SPaul Blakey mapping_remove(uplink_priv->tunnel_mapping, tun_id); 20620a7fcb78SPaul Blakey return err; 20630a7fcb78SPaul Blakey } 20640a7fcb78SPaul Blakey 20650a7fcb78SPaul Blakey static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow) 20660a7fcb78SPaul Blakey { 206773a3f1bcSRoi Dayan u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK; 206873a3f1bcSRoi Dayan u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS; 20690a7fcb78SPaul Blakey struct mlx5_rep_uplink_priv *uplink_priv; 20700a7fcb78SPaul Blakey struct mlx5e_rep_priv *uplink_rpriv; 20710a7fcb78SPaul Blakey struct mlx5_eswitch *esw; 20720a7fcb78SPaul Blakey 20730a7fcb78SPaul Blakey esw = flow->priv->mdev->priv.eswitch; 20740a7fcb78SPaul Blakey uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 20750a7fcb78SPaul Blakey uplink_priv = &uplink_rpriv->uplink_priv; 20760a7fcb78SPaul Blakey 20770a7fcb78SPaul Blakey if (tun_id) 20780a7fcb78SPaul Blakey mapping_remove(uplink_priv->tunnel_mapping, tun_id); 20790a7fcb78SPaul Blakey if (enc_opts_id) 20800a7fcb78SPaul Blakey mapping_remove(uplink_priv->tunnel_enc_opts_mapping, 20810a7fcb78SPaul Blakey enc_opts_id); 20820a7fcb78SPaul Blakey } 20830a7fcb78SPaul Blakey 2084fca53304SEli Britstein void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev, 2085fca53304SEli Britstein struct flow_match_basic *match, bool outer, 2086fca53304SEli Britstein void *headers_c, void *headers_v) 20874a5d5d73SEli Britstein { 2088fca53304SEli Britstein bool ip_version_cap; 2089fca53304SEli Britstein 2090fca53304SEli Britstein ip_version_cap = outer ? 2091fca53304SEli Britstein MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2092fca53304SEli Britstein ft_field_support.outer_ip_version) : 2093fca53304SEli Britstein MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2094fca53304SEli Britstein ft_field_support.inner_ip_version); 2095fca53304SEli Britstein 2096fca53304SEli Britstein if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) && 2097fca53304SEli Britstein (match->key->n_proto == htons(ETH_P_IP) || 2098fca53304SEli Britstein match->key->n_proto == htons(ETH_P_IPV6))) { 2099fca53304SEli Britstein MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version); 2100fca53304SEli Britstein MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 2101fca53304SEli Britstein match->key->n_proto == htons(ETH_P_IP) ? 4 : 6); 2102fca53304SEli Britstein } else { 21034a5d5d73SEli Britstein MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, 21044a5d5d73SEli Britstein ntohs(match->mask->n_proto)); 21054a5d5d73SEli Britstein MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 21064a5d5d73SEli Britstein ntohs(match->key->n_proto)); 21074a5d5d73SEli Britstein } 2108fca53304SEli Britstein } 21094a5d5d73SEli Britstein 21100d9f9647SVlad Buslov u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer) 2111a508728aSVlad Buslov { 2112a508728aSVlad Buslov void *headers_v; 2113a508728aSVlad Buslov u16 ethertype; 2114a508728aSVlad Buslov u8 ip_version; 2115a508728aSVlad Buslov 2116a508728aSVlad Buslov if (outer) 2117a508728aSVlad Buslov headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 2118a508728aSVlad Buslov else 2119a508728aSVlad Buslov headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers); 2120a508728aSVlad Buslov 2121a508728aSVlad Buslov ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version); 2122a508728aSVlad Buslov /* Return ip_version converted from ethertype anyway */ 2123a508728aSVlad Buslov if (!ip_version) { 2124a508728aSVlad Buslov ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); 2125a508728aSVlad Buslov if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP) 2126a508728aSVlad Buslov ip_version = 4; 2127a508728aSVlad Buslov else if (ethertype == ETH_P_IPV6) 2128a508728aSVlad Buslov ip_version = 6; 2129a508728aSVlad Buslov } 2130a508728aSVlad Buslov return ip_version; 2131a508728aSVlad Buslov } 2132a508728aSVlad Buslov 2133b6dfff21SPaul Blakey /* Tunnel device follows RFC 6040, see include/net/inet_ecn.h. 2134b6dfff21SPaul Blakey * And changes inner ip_ecn depending on inner and outer ip_ecn as follows: 2135b6dfff21SPaul Blakey * +---------+----------------------------------------+ 2136b6dfff21SPaul Blakey * |Arriving | Arriving Outer Header | 2137b6dfff21SPaul Blakey * | Inner +---------+---------+---------+----------+ 2138b6dfff21SPaul Blakey * | Header | Not-ECT | ECT(0) | ECT(1) | CE | 2139b6dfff21SPaul Blakey * +---------+---------+---------+---------+----------+ 2140b6dfff21SPaul Blakey * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> | 2141b6dfff21SPaul Blakey * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* | 2142b6dfff21SPaul Blakey * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* | 2143b6dfff21SPaul Blakey * | CE | CE | CE | CE | CE | 2144b6dfff21SPaul Blakey * +---------+---------+---------+---------+----------+ 2145b6dfff21SPaul Blakey * 2146b6dfff21SPaul Blakey * Tc matches on inner after decapsulation on tunnel device, but hw offload matches 2147b6dfff21SPaul Blakey * the inner ip_ecn value before hardware decap action. 2148b6dfff21SPaul Blakey * 2149b6dfff21SPaul Blakey * Cells marked are changed from original inner packet ip_ecn value during decap, and 2150b6dfff21SPaul Blakey * so matching those values on inner ip_ecn before decap will fail. 2151b6dfff21SPaul Blakey * 2152b6dfff21SPaul Blakey * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn, 2153b6dfff21SPaul Blakey * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE, 2154b6dfff21SPaul Blakey * and such we can drop the inner ip_ecn=CE match. 2155b6dfff21SPaul Blakey */ 2156b6dfff21SPaul Blakey 2157b6dfff21SPaul Blakey static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv, 2158b6dfff21SPaul Blakey struct flow_cls_offload *f, 2159b6dfff21SPaul Blakey bool *match_inner_ecn) 2160b6dfff21SPaul Blakey { 2161b6dfff21SPaul Blakey u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0; 2162b6dfff21SPaul Blakey struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2163b6dfff21SPaul Blakey struct netlink_ext_ack *extack = f->common.extack; 2164b6dfff21SPaul Blakey struct flow_match_ip match; 2165b6dfff21SPaul Blakey 2166b6dfff21SPaul Blakey *match_inner_ecn = true; 2167b6dfff21SPaul Blakey 2168b6dfff21SPaul Blakey if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { 2169b6dfff21SPaul Blakey flow_rule_match_enc_ip(rule, &match); 2170b6dfff21SPaul Blakey outer_ecn_key = match.key->tos & INET_ECN_MASK; 2171b6dfff21SPaul Blakey outer_ecn_mask = match.mask->tos & INET_ECN_MASK; 2172b6dfff21SPaul Blakey } 2173b6dfff21SPaul Blakey 2174b6dfff21SPaul Blakey if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 2175b6dfff21SPaul Blakey flow_rule_match_ip(rule, &match); 2176b6dfff21SPaul Blakey inner_ecn_key = match.key->tos & INET_ECN_MASK; 2177b6dfff21SPaul Blakey inner_ecn_mask = match.mask->tos & INET_ECN_MASK; 2178b6dfff21SPaul Blakey } 2179b6dfff21SPaul Blakey 2180b6dfff21SPaul Blakey if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) { 2181b6dfff21SPaul Blakey NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported"); 2182b6dfff21SPaul Blakey netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported"); 2183b6dfff21SPaul Blakey return -EOPNOTSUPP; 2184b6dfff21SPaul Blakey } 2185b6dfff21SPaul Blakey 2186b6dfff21SPaul Blakey if (!outer_ecn_mask) { 2187b6dfff21SPaul Blakey if (!inner_ecn_mask) 2188b6dfff21SPaul Blakey return 0; 2189b6dfff21SPaul Blakey 2190b6dfff21SPaul Blakey NL_SET_ERR_MSG_MOD(extack, 2191b6dfff21SPaul Blakey "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported"); 2192b6dfff21SPaul Blakey netdev_warn(priv->netdev, 2193b6dfff21SPaul Blakey "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported"); 2194b6dfff21SPaul Blakey return -EOPNOTSUPP; 2195b6dfff21SPaul Blakey } 2196b6dfff21SPaul Blakey 2197b6dfff21SPaul Blakey if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) { 2198b6dfff21SPaul Blakey NL_SET_ERR_MSG_MOD(extack, 2199b6dfff21SPaul Blakey "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported"); 2200b6dfff21SPaul Blakey netdev_warn(priv->netdev, 2201b6dfff21SPaul Blakey "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported"); 2202b6dfff21SPaul Blakey return -EOPNOTSUPP; 2203b6dfff21SPaul Blakey } 2204b6dfff21SPaul Blakey 2205b6dfff21SPaul Blakey if (!inner_ecn_mask) 2206b6dfff21SPaul Blakey return 0; 2207b6dfff21SPaul Blakey 2208b6dfff21SPaul Blakey /* Both inner and outer have full mask on ecn */ 2209b6dfff21SPaul Blakey 2210b6dfff21SPaul Blakey if (outer_ecn_key == INET_ECN_ECT_1) { 2211b6dfff21SPaul Blakey /* inner ecn might change by DECAP action */ 2212b6dfff21SPaul Blakey 2213b6dfff21SPaul Blakey NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported"); 2214b6dfff21SPaul Blakey netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported"); 2215b6dfff21SPaul Blakey return -EOPNOTSUPP; 2216b6dfff21SPaul Blakey } 2217b6dfff21SPaul Blakey 2218b6dfff21SPaul Blakey if (outer_ecn_key != INET_ECN_CE) 2219b6dfff21SPaul Blakey return 0; 2220b6dfff21SPaul Blakey 2221b6dfff21SPaul Blakey if (inner_ecn_key != INET_ECN_CE) { 2222b6dfff21SPaul Blakey /* Can't happen in software, as packet ecn will be changed to CE after decap */ 2223b6dfff21SPaul Blakey NL_SET_ERR_MSG_MOD(extack, 2224b6dfff21SPaul Blakey "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported"); 2225b6dfff21SPaul Blakey netdev_warn(priv->netdev, 2226b6dfff21SPaul Blakey "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported"); 2227b6dfff21SPaul Blakey return -EOPNOTSUPP; 2228b6dfff21SPaul Blakey } 2229b6dfff21SPaul Blakey 2230b6dfff21SPaul Blakey /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase, 2231b6dfff21SPaul Blakey * drop match on inner ecn 2232b6dfff21SPaul Blakey */ 2233b6dfff21SPaul Blakey *match_inner_ecn = false; 2234b6dfff21SPaul Blakey 2235b6dfff21SPaul Blakey return 0; 2236b6dfff21SPaul Blakey } 2237b6dfff21SPaul Blakey 22380a7fcb78SPaul Blakey static int parse_tunnel_attr(struct mlx5e_priv *priv, 22390a7fcb78SPaul Blakey struct mlx5e_tc_flow *flow, 22400a7fcb78SPaul Blakey struct mlx5_flow_spec *spec, 22410a7fcb78SPaul Blakey struct flow_cls_offload *f, 22420a7fcb78SPaul Blakey struct net_device *filter_dev, 22430a7fcb78SPaul Blakey u8 *match_level, 22440a7fcb78SPaul Blakey bool *match_inner) 22450a7fcb78SPaul Blakey { 2246a508728aSVlad Buslov struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev); 22470a7fcb78SPaul Blakey struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 22480a7fcb78SPaul Blakey struct netlink_ext_ack *extack = f->common.extack; 22490a7fcb78SPaul Blakey bool needs_mapping, sets_mapping; 22500a7fcb78SPaul Blakey int err; 22510a7fcb78SPaul Blakey 22520885ae1aSAbhiram R N if (!mlx5e_is_eswitch_flow(flow)) { 22530885ae1aSAbhiram R N NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported"); 22540a7fcb78SPaul Blakey return -EOPNOTSUPP; 22550885ae1aSAbhiram R N } 22560a7fcb78SPaul Blakey 2257c620b772SAriel Levkovich needs_mapping = !!flow->attr->chain; 2258ee950e5dSChris Mi sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f); 22590a7fcb78SPaul Blakey *match_inner = !needs_mapping; 22600a7fcb78SPaul Blakey 22610a7fcb78SPaul Blakey if ((needs_mapping || sets_mapping) && 2262636bb968SPaul Blakey !mlx5_eswitch_reg_c1_loopback_enabled(esw)) { 2263c50775d0SRoi Dayan NL_SET_ERR_MSG_MOD(extack, 2264636bb968SPaul Blakey "Chains on tunnel devices isn't supported without register loopback support"); 22650a7fcb78SPaul Blakey netdev_warn(priv->netdev, 2266636bb968SPaul Blakey "Chains on tunnel devices isn't supported without register loopback support"); 22670a7fcb78SPaul Blakey return -EOPNOTSUPP; 22680a7fcb78SPaul Blakey } 22690a7fcb78SPaul Blakey 2270c620b772SAriel Levkovich if (!flow->attr->chain) { 22710a7fcb78SPaul Blakey err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, 22720a7fcb78SPaul Blakey match_level); 22730a7fcb78SPaul Blakey if (err) { 22740a7fcb78SPaul Blakey NL_SET_ERR_MSG_MOD(extack, 22750a7fcb78SPaul Blakey "Failed to parse tunnel attributes"); 22760a7fcb78SPaul Blakey netdev_warn(priv->netdev, 22770a7fcb78SPaul Blakey "Failed to parse tunnel attributes"); 22780a7fcb78SPaul Blakey return err; 22790a7fcb78SPaul Blakey } 22800a7fcb78SPaul Blakey 228114e6b038SEli Cohen /* With mpls over udp we decapsulate using packet reformat 228214e6b038SEli Cohen * object 228314e6b038SEli Cohen */ 228414e6b038SEli Cohen if (!netif_is_bareudp(filter_dev)) 2285c620b772SAriel Levkovich flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 2286a508728aSVlad Buslov err = mlx5e_tc_set_attr_rx_tun(flow, spec); 2287a508728aSVlad Buslov if (err) 2288a508728aSVlad Buslov return err; 2289a508728aSVlad Buslov } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { 2290a508728aSVlad Buslov struct mlx5_flow_spec *tmp_spec; 2291a508728aSVlad Buslov 2292a508728aSVlad Buslov tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL); 2293a508728aSVlad Buslov if (!tmp_spec) { 2294a508728aSVlad Buslov NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec"); 2295a508728aSVlad Buslov netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec"); 2296a508728aSVlad Buslov return -ENOMEM; 2297a508728aSVlad Buslov } 2298a508728aSVlad Buslov memcpy(tmp_spec, spec, sizeof(*tmp_spec)); 2299a508728aSVlad Buslov 2300a508728aSVlad Buslov err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level); 2301a508728aSVlad Buslov if (err) { 2302a508728aSVlad Buslov kvfree(tmp_spec); 2303a508728aSVlad Buslov NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes"); 2304a508728aSVlad Buslov netdev_warn(priv->netdev, "Failed to parse tunnel attributes"); 2305a508728aSVlad Buslov return err; 2306a508728aSVlad Buslov } 2307a508728aSVlad Buslov err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec); 2308a508728aSVlad Buslov kvfree(tmp_spec); 2309a508728aSVlad Buslov if (err) 2310a508728aSVlad Buslov return err; 23110a7fcb78SPaul Blakey } 23120a7fcb78SPaul Blakey 23130a7fcb78SPaul Blakey if (!needs_mapping && !sets_mapping) 23140a7fcb78SPaul Blakey return 0; 23150a7fcb78SPaul Blakey 23160a7fcb78SPaul Blakey return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev); 23170a7fcb78SPaul Blakey } 23180a7fcb78SPaul Blakey 23190a7fcb78SPaul Blakey static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec) 23200a7fcb78SPaul Blakey { 23210a7fcb78SPaul Blakey return MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 23220a7fcb78SPaul Blakey inner_headers); 23230a7fcb78SPaul Blakey } 23240a7fcb78SPaul Blakey 23250a7fcb78SPaul Blakey static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec) 23260a7fcb78SPaul Blakey { 23270a7fcb78SPaul Blakey return MLX5_ADDR_OF(fte_match_param, spec->match_value, 23280a7fcb78SPaul Blakey inner_headers); 23290a7fcb78SPaul Blakey } 23300a7fcb78SPaul Blakey 23310a7fcb78SPaul Blakey static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec) 23320a7fcb78SPaul Blakey { 23330a7fcb78SPaul Blakey return MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 23340a7fcb78SPaul Blakey outer_headers); 23350a7fcb78SPaul Blakey } 23360a7fcb78SPaul Blakey 23370a7fcb78SPaul Blakey static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec) 23380a7fcb78SPaul Blakey { 23390a7fcb78SPaul Blakey return MLX5_ADDR_OF(fte_match_param, spec->match_value, 23408377629eSEli Britstein outer_headers); 23418377629eSEli Britstein } 23428377629eSEli Britstein 23438ee72638SRoi Dayan void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec) 23448377629eSEli Britstein { 23458377629eSEli Britstein return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? 23460a7fcb78SPaul Blakey get_match_inner_headers_value(spec) : 23470a7fcb78SPaul Blakey get_match_outer_headers_value(spec); 23480a7fcb78SPaul Blakey } 23490a7fcb78SPaul Blakey 23508ee72638SRoi Dayan void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec) 23510a7fcb78SPaul Blakey { 23520a7fcb78SPaul Blakey return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? 23530a7fcb78SPaul Blakey get_match_inner_headers_criteria(spec) : 23540a7fcb78SPaul Blakey get_match_outer_headers_criteria(spec); 23558377629eSEli Britstein } 23568377629eSEli Britstein 23576d65bc64Swenxu static int mlx5e_flower_parse_meta(struct net_device *filter_dev, 23586d65bc64Swenxu struct flow_cls_offload *f) 23596d65bc64Swenxu { 23606d65bc64Swenxu struct flow_rule *rule = flow_cls_offload_flow_rule(f); 23616d65bc64Swenxu struct netlink_ext_ack *extack = f->common.extack; 23626d65bc64Swenxu struct net_device *ingress_dev; 23636d65bc64Swenxu struct flow_match_meta match; 23646d65bc64Swenxu 23656d65bc64Swenxu if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) 23666d65bc64Swenxu return 0; 23676d65bc64Swenxu 23686d65bc64Swenxu flow_rule_match_meta(rule, &match); 2369e3e0f9b2Swenxu if (!match.mask->ingress_ifindex) 2370e3e0f9b2Swenxu return 0; 2371e3e0f9b2Swenxu 23726d65bc64Swenxu if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 23736d65bc64Swenxu NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); 2374a683012aSPablo Neira Ayuso return -EOPNOTSUPP; 23756d65bc64Swenxu } 23766d65bc64Swenxu 23776d65bc64Swenxu ingress_dev = __dev_get_by_index(dev_net(filter_dev), 23786d65bc64Swenxu match.key->ingress_ifindex); 23796d65bc64Swenxu if (!ingress_dev) { 23806d65bc64Swenxu NL_SET_ERR_MSG_MOD(extack, 23816d65bc64Swenxu "Can't find the ingress port to match on"); 2382a683012aSPablo Neira Ayuso return -ENOENT; 23836d65bc64Swenxu } 23846d65bc64Swenxu 23856d65bc64Swenxu if (ingress_dev != filter_dev) { 23866d65bc64Swenxu NL_SET_ERR_MSG_MOD(extack, 23876d65bc64Swenxu "Can't match on the ingress filter port"); 2388a683012aSPablo Neira Ayuso return -EOPNOTSUPP; 23896d65bc64Swenxu } 23906d65bc64Swenxu 23916d65bc64Swenxu return 0; 23926d65bc64Swenxu } 23936d65bc64Swenxu 239472046a91SEli Cohen static bool skip_key_basic(struct net_device *filter_dev, 239572046a91SEli Cohen struct flow_cls_offload *f) 239672046a91SEli Cohen { 239772046a91SEli Cohen /* When doing mpls over udp decap, the user needs to provide 239872046a91SEli Cohen * MPLS_UC as the protocol in order to be able to match on mpls 239972046a91SEli Cohen * label fields. However, the actual ethertype is IP so we want to 240072046a91SEli Cohen * avoid matching on this, otherwise we'll fail the match. 240172046a91SEli Cohen */ 240272046a91SEli Cohen if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0) 240372046a91SEli Cohen return true; 240472046a91SEli Cohen 240572046a91SEli Cohen return false; 240672046a91SEli Cohen } 240772046a91SEli Cohen 2408de0af0bfSRoi Dayan static int __parse_cls_flower(struct mlx5e_priv *priv, 24090a7fcb78SPaul Blakey struct mlx5e_tc_flow *flow, 2410de0af0bfSRoi Dayan struct mlx5_flow_spec *spec, 2411f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 241254c177caSOz Shlomo struct net_device *filter_dev, 241393b3586eSHuy Nguyen u8 *inner_match_level, u8 *outer_match_level) 2414e3a2b7edSAmir Vadai { 2415e98bedf5SEli Britstein struct netlink_ext_ack *extack = f->common.extack; 2416c5bb1730SMaor Gottlieb void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2417c5bb1730SMaor Gottlieb outer_headers); 2418c5bb1730SMaor Gottlieb void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2419c5bb1730SMaor Gottlieb outer_headers); 2420699e96ddSJianbo Liu void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2421699e96ddSJianbo Liu misc_parameters); 2422699e96ddSJianbo Liu void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2423699e96ddSJianbo Liu misc_parameters); 2424a3222a2dSMaor Dickman void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2425a3222a2dSMaor Dickman misc_parameters_3); 2426a3222a2dSMaor Dickman void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2427a3222a2dSMaor Dickman misc_parameters_3); 2428f9e30088SPablo Neira Ayuso struct flow_rule *rule = flow_cls_offload_flow_rule(f); 24298f256622SPablo Neira Ayuso struct flow_dissector *dissector = rule->match.dissector; 2430afe93f71SRoi Dayan enum fs_flow_table_type fs_type; 2431b6dfff21SPaul Blakey bool match_inner_ecn = true; 2432e3a2b7edSAmir Vadai u16 addr_type = 0; 2433e3a2b7edSAmir Vadai u8 ip_proto = 0; 243493b3586eSHuy Nguyen u8 *match_level; 24356d65bc64Swenxu int err; 2436e3a2b7edSAmir Vadai 2437afe93f71SRoi Dayan fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX; 243893b3586eSHuy Nguyen match_level = outer_match_level; 2439de0af0bfSRoi Dayan 24408f256622SPablo Neira Ayuso if (dissector->used_keys & 24413d144578SVlad Buslov ~(BIT(FLOW_DISSECTOR_KEY_META) | 24423d144578SVlad Buslov BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2443e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_BASIC) | 2444e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2445095b6cfdSOr Gerlitz BIT(FLOW_DISSECTOR_KEY_VLAN) | 2446699e96ddSJianbo Liu BIT(FLOW_DISSECTOR_KEY_CVLAN) | 2447e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2448e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2449bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_PORTS) | 2450bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | 2451bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 2452bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | 2453bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | 2454e77834ecSOr Gerlitz BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | 2455fd7da28bSOr Gerlitz BIT(FLOW_DISSECTOR_KEY_TCP) | 2456bcef735cSOr Gerlitz BIT(FLOW_DISSECTOR_KEY_IP) | 24574c3844d9SPaul Blakey BIT(FLOW_DISSECTOR_KEY_CT) | 24589272e3dfSYevgeny Kliteynik BIT(FLOW_DISSECTOR_KEY_ENC_IP) | 245972046a91SEli Cohen BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | 2460a3222a2dSMaor Dickman BIT(FLOW_DISSECTOR_KEY_ICMP) | 246172046a91SEli Cohen BIT(FLOW_DISSECTOR_KEY_MPLS))) { 2462e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); 246348470a90SMaor Dickman netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n", 24648f256622SPablo Neira Ayuso dissector->used_keys); 2465e3a2b7edSAmir Vadai return -EOPNOTSUPP; 2466e3a2b7edSAmir Vadai } 2467e3a2b7edSAmir Vadai 2468075973c7SVlad Buslov if (mlx5e_get_tc_tun(filter_dev)) { 24690a7fcb78SPaul Blakey bool match_inner = false; 2470bbd00f7eSHadar Hen Zion 24710a7fcb78SPaul Blakey err = parse_tunnel_attr(priv, flow, spec, f, filter_dev, 24720a7fcb78SPaul Blakey outer_match_level, &match_inner); 24730a7fcb78SPaul Blakey if (err) 24740a7fcb78SPaul Blakey return err; 24750a7fcb78SPaul Blakey 24760a7fcb78SPaul Blakey if (match_inner) { 24770a7fcb78SPaul Blakey /* header pointers should point to the inner headers 24780a7fcb78SPaul Blakey * if the packet was decapsulated already. 24790a7fcb78SPaul Blakey * outer headers are set by parse_tunnel_attr. 2480bbd00f7eSHadar Hen Zion */ 248193b3586eSHuy Nguyen match_level = inner_match_level; 24820a7fcb78SPaul Blakey headers_c = get_match_inner_headers_criteria(spec); 24830a7fcb78SPaul Blakey headers_v = get_match_inner_headers_value(spec); 24840a7fcb78SPaul Blakey } 2485b6dfff21SPaul Blakey 2486b6dfff21SPaul Blakey err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn); 2487b6dfff21SPaul Blakey if (err) 2488b6dfff21SPaul Blakey return err; 2489bbd00f7eSHadar Hen Zion } 2490bbd00f7eSHadar Hen Zion 24916d65bc64Swenxu err = mlx5e_flower_parse_meta(filter_dev, f); 24926d65bc64Swenxu if (err) 24936d65bc64Swenxu return err; 24946d65bc64Swenxu 249572046a91SEli Cohen if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) && 249672046a91SEli Cohen !skip_key_basic(filter_dev, f)) { 24978f256622SPablo Neira Ayuso struct flow_match_basic match; 2498e3a2b7edSAmir Vadai 24998f256622SPablo Neira Ayuso flow_rule_match_basic(rule, &match); 2500fca53304SEli Britstein mlx5e_tc_set_ethertype(priv->mdev, &match, 2501fca53304SEli Britstein match_level == outer_match_level, 2502fca53304SEli Britstein headers_c, headers_v); 25038f256622SPablo Neira Ayuso 25048f256622SPablo Neira Ayuso if (match.mask->n_proto) 2505d708f902SOr Gerlitz *match_level = MLX5_MATCH_L2; 2506e3a2b7edSAmir Vadai } 250735a605dbSEli Britstein if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) || 250835a605dbSEli Britstein is_vlan_dev(filter_dev)) { 250935a605dbSEli Britstein struct flow_dissector_key_vlan filter_dev_mask; 251035a605dbSEli Britstein struct flow_dissector_key_vlan filter_dev_key; 25118f256622SPablo Neira Ayuso struct flow_match_vlan match; 25128f256622SPablo Neira Ayuso 251335a605dbSEli Britstein if (is_vlan_dev(filter_dev)) { 251435a605dbSEli Britstein match.key = &filter_dev_key; 251535a605dbSEli Britstein match.key->vlan_id = vlan_dev_vlan_id(filter_dev); 251635a605dbSEli Britstein match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev); 251735a605dbSEli Britstein match.key->vlan_priority = 0; 251835a605dbSEli Britstein match.mask = &filter_dev_mask; 251935a605dbSEli Britstein memset(match.mask, 0xff, sizeof(*match.mask)); 252035a605dbSEli Britstein match.mask->vlan_priority = 0; 252135a605dbSEli Britstein } else { 25228f256622SPablo Neira Ayuso flow_rule_match_vlan(rule, &match); 252335a605dbSEli Britstein } 25248f256622SPablo Neira Ayuso if (match.mask->vlan_id || 25258f256622SPablo Neira Ayuso match.mask->vlan_priority || 25268f256622SPablo Neira Ayuso match.mask->vlan_tpid) { 25278f256622SPablo Neira Ayuso if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { 2528699e96ddSJianbo Liu MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2529699e96ddSJianbo Liu svlan_tag, 1); 2530699e96ddSJianbo Liu MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2531699e96ddSJianbo Liu svlan_tag, 1); 2532699e96ddSJianbo Liu } else { 2533699e96ddSJianbo Liu MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2534699e96ddSJianbo Liu cvlan_tag, 1); 2535699e96ddSJianbo Liu MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2536699e96ddSJianbo Liu cvlan_tag, 1); 2537699e96ddSJianbo Liu } 2538095b6cfdSOr Gerlitz 25398f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, 25408f256622SPablo Neira Ayuso match.mask->vlan_id); 25418f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, 25428f256622SPablo Neira Ayuso match.key->vlan_id); 2543358d79a4SOr Gerlitz 25448f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, 25458f256622SPablo Neira Ayuso match.mask->vlan_priority); 25468f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, 25478f256622SPablo Neira Ayuso match.key->vlan_priority); 254854782900SOr Gerlitz 2549d708f902SOr Gerlitz *match_level = MLX5_MATCH_L2; 2550ada09af9SVlad Buslov 2551ada09af9SVlad Buslov if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) && 2552ada09af9SVlad Buslov match.mask->vlan_eth_type && 2553ada09af9SVlad Buslov MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, 2554ada09af9SVlad Buslov ft_field_support.outer_second_vid, 2555ada09af9SVlad Buslov fs_type)) { 2556ada09af9SVlad Buslov MLX5_SET(fte_match_set_misc, misc_c, 2557ada09af9SVlad Buslov outer_second_cvlan_tag, 1); 2558ada09af9SVlad Buslov spec->match_criteria_enable |= 2559ada09af9SVlad Buslov MLX5_MATCH_MISC_PARAMETERS; 2560ada09af9SVlad Buslov } 2561095b6cfdSOr Gerlitz } 2562d3a80bb5SOr Gerlitz } else if (*match_level != MLX5_MATCH_NONE) { 2563fc603294SMark Bloch /* cvlan_tag enabled in match criteria and 2564fc603294SMark Bloch * disabled in match value means both S & C tags 2565fc603294SMark Bloch * don't exist (untagged of both) 2566fc603294SMark Bloch */ 2567cee26487SJianbo Liu MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); 2568d3a80bb5SOr Gerlitz *match_level = MLX5_MATCH_L2; 2569095b6cfdSOr Gerlitz } 2570095b6cfdSOr Gerlitz 25718f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 25728f256622SPablo Neira Ayuso struct flow_match_vlan match; 25738f256622SPablo Neira Ayuso 257412d5cbf8SJianbo Liu flow_rule_match_cvlan(rule, &match); 25758f256622SPablo Neira Ayuso if (match.mask->vlan_id || 25768f256622SPablo Neira Ayuso match.mask->vlan_priority || 25778f256622SPablo Neira Ayuso match.mask->vlan_tpid) { 2578afe93f71SRoi Dayan if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid, 2579afe93f71SRoi Dayan fs_type)) { 2580afe93f71SRoi Dayan NL_SET_ERR_MSG_MOD(extack, 2581afe93f71SRoi Dayan "Matching on CVLAN is not supported"); 2582afe93f71SRoi Dayan return -EOPNOTSUPP; 2583afe93f71SRoi Dayan } 2584afe93f71SRoi Dayan 25858f256622SPablo Neira Ayuso if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { 2586699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_c, 2587699e96ddSJianbo Liu outer_second_svlan_tag, 1); 2588699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_v, 2589699e96ddSJianbo Liu outer_second_svlan_tag, 1); 2590699e96ddSJianbo Liu } else { 2591699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_c, 2592699e96ddSJianbo Liu outer_second_cvlan_tag, 1); 2593699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_v, 2594699e96ddSJianbo Liu outer_second_cvlan_tag, 1); 2595699e96ddSJianbo Liu } 2596699e96ddSJianbo Liu 2597699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid, 25988f256622SPablo Neira Ayuso match.mask->vlan_id); 2599699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid, 26008f256622SPablo Neira Ayuso match.key->vlan_id); 2601699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio, 26028f256622SPablo Neira Ayuso match.mask->vlan_priority); 2603699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio, 26048f256622SPablo Neira Ayuso match.key->vlan_priority); 2605699e96ddSJianbo Liu 2606699e96ddSJianbo Liu *match_level = MLX5_MATCH_L2; 26070faddfe6SJianbo Liu spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 2608699e96ddSJianbo Liu } 2609699e96ddSJianbo Liu } 2610699e96ddSJianbo Liu 26118f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 26128f256622SPablo Neira Ayuso struct flow_match_eth_addrs match; 261354782900SOr Gerlitz 26148f256622SPablo Neira Ayuso flow_rule_match_eth_addrs(rule, &match); 2615d3a80bb5SOr Gerlitz ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2616d3a80bb5SOr Gerlitz dmac_47_16), 26178f256622SPablo Neira Ayuso match.mask->dst); 2618d3a80bb5SOr Gerlitz ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2619d3a80bb5SOr Gerlitz dmac_47_16), 26208f256622SPablo Neira Ayuso match.key->dst); 2621d3a80bb5SOr Gerlitz 2622d3a80bb5SOr Gerlitz ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2623d3a80bb5SOr Gerlitz smac_47_16), 26248f256622SPablo Neira Ayuso match.mask->src); 2625d3a80bb5SOr Gerlitz ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2626d3a80bb5SOr Gerlitz smac_47_16), 26278f256622SPablo Neira Ayuso match.key->src); 2628d3a80bb5SOr Gerlitz 26298f256622SPablo Neira Ayuso if (!is_zero_ether_addr(match.mask->src) || 26308f256622SPablo Neira Ayuso !is_zero_ether_addr(match.mask->dst)) 2631d708f902SOr Gerlitz *match_level = MLX5_MATCH_L2; 263254782900SOr Gerlitz } 263354782900SOr Gerlitz 26348f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 26358f256622SPablo Neira Ayuso struct flow_match_control match; 263654782900SOr Gerlitz 26378f256622SPablo Neira Ayuso flow_rule_match_control(rule, &match); 26388f256622SPablo Neira Ayuso addr_type = match.key->addr_type; 263954782900SOr Gerlitz 264054782900SOr Gerlitz /* the HW doesn't support frag first/later */ 26410885ae1aSAbhiram R N if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 26420885ae1aSAbhiram R N NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported"); 264354782900SOr Gerlitz return -EOPNOTSUPP; 26440885ae1aSAbhiram R N } 264554782900SOr Gerlitz 26468f256622SPablo Neira Ayuso if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 264754782900SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 264854782900SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 26498f256622SPablo Neira Ayuso match.key->flags & FLOW_DIS_IS_FRAGMENT); 265054782900SOr Gerlitz 265154782900SOr Gerlitz /* the HW doesn't need L3 inline to match on frag=no */ 26528f256622SPablo Neira Ayuso if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT)) 265383621b7dSOr Gerlitz *match_level = MLX5_MATCH_L2; 265454782900SOr Gerlitz /* *** L2 attributes parsing up to here *** */ 265554782900SOr Gerlitz else 265683621b7dSOr Gerlitz *match_level = MLX5_MATCH_L3; 265754782900SOr Gerlitz } 265854782900SOr Gerlitz } 265954782900SOr Gerlitz 26608f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 26618f256622SPablo Neira Ayuso struct flow_match_basic match; 26628f256622SPablo Neira Ayuso 26638f256622SPablo Neira Ayuso flow_rule_match_basic(rule, &match); 26648f256622SPablo Neira Ayuso ip_proto = match.key->ip_proto; 266554782900SOr Gerlitz 266654782900SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 26678f256622SPablo Neira Ayuso match.mask->ip_proto); 266854782900SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 26698f256622SPablo Neira Ayuso match.key->ip_proto); 267054782900SOr Gerlitz 26718f256622SPablo Neira Ayuso if (match.mask->ip_proto) 2672d708f902SOr Gerlitz *match_level = MLX5_MATCH_L3; 267354782900SOr Gerlitz } 267454782900SOr Gerlitz 2675e3a2b7edSAmir Vadai if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 26768f256622SPablo Neira Ayuso struct flow_match_ipv4_addrs match; 2677e3a2b7edSAmir Vadai 26788f256622SPablo Neira Ayuso flow_rule_match_ipv4_addrs(rule, &match); 2679e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2680e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv4_layout.ipv4), 26818f256622SPablo Neira Ayuso &match.mask->src, sizeof(match.mask->src)); 2682e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2683e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv4_layout.ipv4), 26848f256622SPablo Neira Ayuso &match.key->src, sizeof(match.key->src)); 2685e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2686e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 26878f256622SPablo Neira Ayuso &match.mask->dst, sizeof(match.mask->dst)); 2688e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2689e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 26908f256622SPablo Neira Ayuso &match.key->dst, sizeof(match.key->dst)); 2691de0af0bfSRoi Dayan 26928f256622SPablo Neira Ayuso if (match.mask->src || match.mask->dst) 2693d708f902SOr Gerlitz *match_level = MLX5_MATCH_L3; 2694e3a2b7edSAmir Vadai } 2695e3a2b7edSAmir Vadai 2696e3a2b7edSAmir Vadai if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 26978f256622SPablo Neira Ayuso struct flow_match_ipv6_addrs match; 2698e3a2b7edSAmir Vadai 26998f256622SPablo Neira Ayuso flow_rule_match_ipv6_addrs(rule, &match); 2700e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2701e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv6_layout.ipv6), 27028f256622SPablo Neira Ayuso &match.mask->src, sizeof(match.mask->src)); 2703e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2704e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv6_layout.ipv6), 27058f256622SPablo Neira Ayuso &match.key->src, sizeof(match.key->src)); 2706e3a2b7edSAmir Vadai 2707e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2708e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 27098f256622SPablo Neira Ayuso &match.mask->dst, sizeof(match.mask->dst)); 2710e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2711e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 27128f256622SPablo Neira Ayuso &match.key->dst, sizeof(match.key->dst)); 2713de0af0bfSRoi Dayan 27148f256622SPablo Neira Ayuso if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY || 27158f256622SPablo Neira Ayuso ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY) 2716d708f902SOr Gerlitz *match_level = MLX5_MATCH_L3; 2717e3a2b7edSAmir Vadai } 2718e3a2b7edSAmir Vadai 27198f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 27208f256622SPablo Neira Ayuso struct flow_match_ip match; 27211f97a526SOr Gerlitz 27228f256622SPablo Neira Ayuso flow_rule_match_ip(rule, &match); 2723b6dfff21SPaul Blakey if (match_inner_ecn) { 27248f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, 27258f256622SPablo Neira Ayuso match.mask->tos & 0x3); 27268f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, 27278f256622SPablo Neira Ayuso match.key->tos & 0x3); 2728b6dfff21SPaul Blakey } 27291f97a526SOr Gerlitz 27308f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, 27318f256622SPablo Neira Ayuso match.mask->tos >> 2); 27328f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, 27338f256622SPablo Neira Ayuso match.key->tos >> 2); 27341f97a526SOr Gerlitz 27358f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, 27368f256622SPablo Neira Ayuso match.mask->ttl); 27378f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, 27388f256622SPablo Neira Ayuso match.key->ttl); 27391f97a526SOr Gerlitz 27408f256622SPablo Neira Ayuso if (match.mask->ttl && 2741a8ade55fSOr Gerlitz !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, 2742e98bedf5SEli Britstein ft_field_support.outer_ipv4_ttl)) { 2743e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 2744e98bedf5SEli Britstein "Matching on TTL is not supported"); 27451f97a526SOr Gerlitz return -EOPNOTSUPP; 2746e98bedf5SEli Britstein } 2747a8ade55fSOr Gerlitz 27488f256622SPablo Neira Ayuso if (match.mask->tos || match.mask->ttl) 2749d708f902SOr Gerlitz *match_level = MLX5_MATCH_L3; 27501f97a526SOr Gerlitz } 27511f97a526SOr Gerlitz 275254782900SOr Gerlitz /* *** L3 attributes parsing up to here *** */ 275354782900SOr Gerlitz 27548f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 27558f256622SPablo Neira Ayuso struct flow_match_ports match; 27568f256622SPablo Neira Ayuso 27578f256622SPablo Neira Ayuso flow_rule_match_ports(rule, &match); 2758e3a2b7edSAmir Vadai switch (ip_proto) { 2759e3a2b7edSAmir Vadai case IPPROTO_TCP: 2760e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 27618f256622SPablo Neira Ayuso tcp_sport, ntohs(match.mask->src)); 2762e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 27638f256622SPablo Neira Ayuso tcp_sport, ntohs(match.key->src)); 2764e3a2b7edSAmir Vadai 2765e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 27668f256622SPablo Neira Ayuso tcp_dport, ntohs(match.mask->dst)); 2767e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 27688f256622SPablo Neira Ayuso tcp_dport, ntohs(match.key->dst)); 2769e3a2b7edSAmir Vadai break; 2770e3a2b7edSAmir Vadai 2771e3a2b7edSAmir Vadai case IPPROTO_UDP: 2772e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 27738f256622SPablo Neira Ayuso udp_sport, ntohs(match.mask->src)); 2774e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 27758f256622SPablo Neira Ayuso udp_sport, ntohs(match.key->src)); 2776e3a2b7edSAmir Vadai 2777e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 27788f256622SPablo Neira Ayuso udp_dport, ntohs(match.mask->dst)); 2779e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 27808f256622SPablo Neira Ayuso udp_dport, ntohs(match.key->dst)); 2781e3a2b7edSAmir Vadai break; 2782e3a2b7edSAmir Vadai default: 2783e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 2784e98bedf5SEli Britstein "Only UDP and TCP transports are supported for L4 matching"); 2785e3a2b7edSAmir Vadai netdev_err(priv->netdev, 2786e3a2b7edSAmir Vadai "Only UDP and TCP transport are supported\n"); 2787e3a2b7edSAmir Vadai return -EINVAL; 2788e3a2b7edSAmir Vadai } 2789de0af0bfSRoi Dayan 27908f256622SPablo Neira Ayuso if (match.mask->src || match.mask->dst) 2791d708f902SOr Gerlitz *match_level = MLX5_MATCH_L4; 2792e3a2b7edSAmir Vadai } 2793e3a2b7edSAmir Vadai 27948f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 27958f256622SPablo Neira Ayuso struct flow_match_tcp match; 2796e77834ecSOr Gerlitz 27978f256622SPablo Neira Ayuso flow_rule_match_tcp(rule, &match); 2798e77834ecSOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags, 27998f256622SPablo Neira Ayuso ntohs(match.mask->flags)); 2800e77834ecSOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags, 28018f256622SPablo Neira Ayuso ntohs(match.key->flags)); 2802e77834ecSOr Gerlitz 28038f256622SPablo Neira Ayuso if (match.mask->flags) 2804d708f902SOr Gerlitz *match_level = MLX5_MATCH_L4; 2805e77834ecSOr Gerlitz } 2806a3222a2dSMaor Dickman if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { 2807a3222a2dSMaor Dickman struct flow_match_icmp match; 2808e77834ecSOr Gerlitz 2809a3222a2dSMaor Dickman flow_rule_match_icmp(rule, &match); 2810a3222a2dSMaor Dickman switch (ip_proto) { 2811a3222a2dSMaor Dickman case IPPROTO_ICMP: 2812a3222a2dSMaor Dickman if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & 28130885ae1aSAbhiram R N MLX5_FLEX_PROTO_ICMP)) { 28140885ae1aSAbhiram R N NL_SET_ERR_MSG_MOD(extack, 28150885ae1aSAbhiram R N "Match on Flex protocols for ICMP is not supported"); 2816a3222a2dSMaor Dickman return -EOPNOTSUPP; 28170885ae1aSAbhiram R N } 2818a3222a2dSMaor Dickman MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type, 2819a3222a2dSMaor Dickman match.mask->type); 2820a3222a2dSMaor Dickman MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type, 2821a3222a2dSMaor Dickman match.key->type); 2822a3222a2dSMaor Dickman MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code, 2823a3222a2dSMaor Dickman match.mask->code); 2824a3222a2dSMaor Dickman MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code, 2825a3222a2dSMaor Dickman match.key->code); 2826a3222a2dSMaor Dickman break; 2827a3222a2dSMaor Dickman case IPPROTO_ICMPV6: 2828a3222a2dSMaor Dickman if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & 28290885ae1aSAbhiram R N MLX5_FLEX_PROTO_ICMPV6)) { 28300885ae1aSAbhiram R N NL_SET_ERR_MSG_MOD(extack, 28310885ae1aSAbhiram R N "Match on Flex protocols for ICMPV6 is not supported"); 2832a3222a2dSMaor Dickman return -EOPNOTSUPP; 28330885ae1aSAbhiram R N } 2834a3222a2dSMaor Dickman MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type, 2835a3222a2dSMaor Dickman match.mask->type); 2836a3222a2dSMaor Dickman MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type, 2837a3222a2dSMaor Dickman match.key->type); 2838a3222a2dSMaor Dickman MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code, 2839a3222a2dSMaor Dickman match.mask->code); 2840a3222a2dSMaor Dickman MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code, 2841a3222a2dSMaor Dickman match.key->code); 2842a3222a2dSMaor Dickman break; 2843a3222a2dSMaor Dickman default: 2844a3222a2dSMaor Dickman NL_SET_ERR_MSG_MOD(extack, 2845a3222a2dSMaor Dickman "Code and type matching only with ICMP and ICMPv6"); 2846a3222a2dSMaor Dickman netdev_err(priv->netdev, 2847a3222a2dSMaor Dickman "Code and type matching only with ICMP and ICMPv6\n"); 2848a3222a2dSMaor Dickman return -EINVAL; 2849a3222a2dSMaor Dickman } 2850a3222a2dSMaor Dickman if (match.mask->code || match.mask->type) { 2851a3222a2dSMaor Dickman *match_level = MLX5_MATCH_L4; 2852a3222a2dSMaor Dickman spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3; 2853a3222a2dSMaor Dickman } 2854a3222a2dSMaor Dickman } 285539c538d6SCai Huoqing /* Currently supported only for MPLS over UDP */ 28567d6c86e3SAlaa Hleihel if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) && 28577d6c86e3SAlaa Hleihel !netif_is_bareudp(filter_dev)) { 28587d6c86e3SAlaa Hleihel NL_SET_ERR_MSG_MOD(extack, 28597d6c86e3SAlaa Hleihel "Matching on MPLS is supported only for MPLS over UDP"); 28607d6c86e3SAlaa Hleihel netdev_err(priv->netdev, 28617d6c86e3SAlaa Hleihel "Matching on MPLS is supported only for MPLS over UDP\n"); 28627d6c86e3SAlaa Hleihel return -EOPNOTSUPP; 28637d6c86e3SAlaa Hleihel } 28647d6c86e3SAlaa Hleihel 2865e3a2b7edSAmir Vadai return 0; 2866e3a2b7edSAmir Vadai } 2867e3a2b7edSAmir Vadai 2868de0af0bfSRoi Dayan static int parse_cls_flower(struct mlx5e_priv *priv, 286965ba8fb7SOr Gerlitz struct mlx5e_tc_flow *flow, 2870de0af0bfSRoi Dayan struct mlx5_flow_spec *spec, 2871f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 287254c177caSOz Shlomo struct net_device *filter_dev) 2873de0af0bfSRoi Dayan { 287493b3586eSHuy Nguyen u8 inner_match_level, outer_match_level, non_tunnel_match_level; 2875e98bedf5SEli Britstein struct netlink_ext_ack *extack = f->common.extack; 2876de0af0bfSRoi Dayan struct mlx5_core_dev *dev = priv->mdev; 2877de0af0bfSRoi Dayan struct mlx5_eswitch *esw = dev->priv.eswitch; 28781d447a39SSaeed Mahameed struct mlx5e_rep_priv *rpriv = priv->ppriv; 28791d447a39SSaeed Mahameed struct mlx5_eswitch_rep *rep; 2880226f2ca3SVlad Buslov bool is_eswitch_flow; 2881de0af0bfSRoi Dayan int err; 2882de0af0bfSRoi Dayan 288393b3586eSHuy Nguyen inner_match_level = MLX5_MATCH_NONE; 288493b3586eSHuy Nguyen outer_match_level = MLX5_MATCH_NONE; 288593b3586eSHuy Nguyen 28860a7fcb78SPaul Blakey err = __parse_cls_flower(priv, flow, spec, f, filter_dev, 28870a7fcb78SPaul Blakey &inner_match_level, &outer_match_level); 288893b3586eSHuy Nguyen non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ? 288993b3586eSHuy Nguyen outer_match_level : inner_match_level; 2890de0af0bfSRoi Dayan 2891226f2ca3SVlad Buslov is_eswitch_flow = mlx5e_is_eswitch_flow(flow); 2892226f2ca3SVlad Buslov if (!err && is_eswitch_flow) { 28931d447a39SSaeed Mahameed rep = rpriv->rep; 2894b05af6aaSBodong Wang if (rep->vport != MLX5_VPORT_UPLINK && 28951d447a39SSaeed Mahameed (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && 289693b3586eSHuy Nguyen esw->offloads.inline_mode < non_tunnel_match_level)) { 2897e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 2898e98bedf5SEli Britstein "Flow is not offloaded due to min inline setting"); 2899de0af0bfSRoi Dayan netdev_warn(priv->netdev, 2900de0af0bfSRoi Dayan "Flow is not offloaded due to min inline setting, required %d actual %d\n", 290193b3586eSHuy Nguyen non_tunnel_match_level, esw->offloads.inline_mode); 2902de0af0bfSRoi Dayan return -EOPNOTSUPP; 2903de0af0bfSRoi Dayan } 2904de0af0bfSRoi Dayan } 2905de0af0bfSRoi Dayan 2906c620b772SAriel Levkovich flow->attr->inner_match_level = inner_match_level; 2907c620b772SAriel Levkovich flow->attr->outer_match_level = outer_match_level; 2908c620b772SAriel Levkovich 290938aa51c1SOr Gerlitz 2910de0af0bfSRoi Dayan return err; 2911de0af0bfSRoi Dayan } 2912de0af0bfSRoi Dayan 2913d79b6df6SOr Gerlitz struct mlx5_fields { 2914d79b6df6SOr Gerlitz u8 field; 291588f30bbcSDmytro Linkin u8 field_bsize; 291688f30bbcSDmytro Linkin u32 field_mask; 2917d79b6df6SOr Gerlitz u32 offset; 291827c11b6bSEli Britstein u32 match_offset; 2919d79b6df6SOr Gerlitz }; 2920d79b6df6SOr Gerlitz 292188f30bbcSDmytro Linkin #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \ 292288f30bbcSDmytro Linkin {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \ 292327c11b6bSEli Britstein offsetof(struct pedit_headers, field) + (off), \ 292427c11b6bSEli Britstein MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)} 292527c11b6bSEli Britstein 29262ef86872SEli Britstein /* masked values are the same and there are no rewrites that do not have a 29272ef86872SEli Britstein * match. 29282ef86872SEli Britstein */ 29292ef86872SEli Britstein #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \ 29302ef86872SEli Britstein type matchmaskx = *(type *)(matchmaskp); \ 29312ef86872SEli Britstein type matchvalx = *(type *)(matchvalp); \ 29322ef86872SEli Britstein type maskx = *(type *)(maskp); \ 29332ef86872SEli Britstein type valx = *(type *)(valp); \ 29342ef86872SEli Britstein \ 29352ef86872SEli Britstein (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \ 29362ef86872SEli Britstein matchmaskx)); \ 29372ef86872SEli Britstein }) 29382ef86872SEli Britstein 293927c11b6bSEli Britstein static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp, 294088f30bbcSDmytro Linkin void *matchmaskp, u8 bsize) 294127c11b6bSEli Britstein { 294227c11b6bSEli Britstein bool same = false; 294327c11b6bSEli Britstein 294488f30bbcSDmytro Linkin switch (bsize) { 294588f30bbcSDmytro Linkin case 8: 29462ef86872SEli Britstein same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp); 294727c11b6bSEli Britstein break; 294888f30bbcSDmytro Linkin case 16: 29492ef86872SEli Britstein same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp); 295027c11b6bSEli Britstein break; 295188f30bbcSDmytro Linkin case 32: 29522ef86872SEli Britstein same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp); 295327c11b6bSEli Britstein break; 295427c11b6bSEli Britstein } 295527c11b6bSEli Britstein 295627c11b6bSEli Britstein return same; 295727c11b6bSEli Britstein } 2958a8e4f0c4SOr Gerlitz 2959d79b6df6SOr Gerlitz static struct mlx5_fields fields[] = { 296088f30bbcSDmytro Linkin OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16), 296188f30bbcSDmytro Linkin OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0), 296288f30bbcSDmytro Linkin OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16), 296388f30bbcSDmytro Linkin OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0), 296488f30bbcSDmytro Linkin OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype), 296588f30bbcSDmytro Linkin OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid), 2966d79b6df6SOr Gerlitz 2967ab9341b5SDmytro Linkin OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp), 296888f30bbcSDmytro Linkin OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit), 296988f30bbcSDmytro Linkin OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4), 297088f30bbcSDmytro Linkin OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2971d79b6df6SOr Gerlitz 297288f30bbcSDmytro Linkin OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0, 297327c11b6bSEli Britstein src_ipv4_src_ipv6.ipv6_layout.ipv6[0]), 297488f30bbcSDmytro Linkin OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0, 297527c11b6bSEli Britstein src_ipv4_src_ipv6.ipv6_layout.ipv6[4]), 297688f30bbcSDmytro Linkin OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0, 297727c11b6bSEli Britstein src_ipv4_src_ipv6.ipv6_layout.ipv6[8]), 297888f30bbcSDmytro Linkin OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0, 297927c11b6bSEli Britstein src_ipv4_src_ipv6.ipv6_layout.ipv6[12]), 298088f30bbcSDmytro Linkin OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0, 298127c11b6bSEli Britstein dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]), 298288f30bbcSDmytro Linkin OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0, 298327c11b6bSEli Britstein dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]), 298488f30bbcSDmytro Linkin OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0, 298527c11b6bSEli Britstein dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]), 298688f30bbcSDmytro Linkin OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0, 298727c11b6bSEli Britstein dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]), 298888f30bbcSDmytro Linkin OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit), 2989748cde9aSMaor Dickman OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp), 2990d79b6df6SOr Gerlitz 299188f30bbcSDmytro Linkin OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport), 299288f30bbcSDmytro Linkin OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport), 299388f30bbcSDmytro Linkin /* in linux iphdr tcp_flags is 8 bits long */ 299488f30bbcSDmytro Linkin OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags), 2995d79b6df6SOr Gerlitz 299688f30bbcSDmytro Linkin OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport), 299788f30bbcSDmytro Linkin OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport), 2998d79b6df6SOr Gerlitz }; 2999d79b6df6SOr Gerlitz 300082198d8bSMaor Dickman static unsigned long mask_to_le(unsigned long mask, int size) 300182198d8bSMaor Dickman { 300282198d8bSMaor Dickman __be32 mask_be32; 300382198d8bSMaor Dickman __be16 mask_be16; 300482198d8bSMaor Dickman 300582198d8bSMaor Dickman if (size == 32) { 300682198d8bSMaor Dickman mask_be32 = (__force __be32)(mask); 300782198d8bSMaor Dickman mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); 300882198d8bSMaor Dickman } else if (size == 16) { 300982198d8bSMaor Dickman mask_be32 = (__force __be32)(mask); 301082198d8bSMaor Dickman mask_be16 = *(__be16 *)&mask_be32; 301182198d8bSMaor Dickman mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); 301282198d8bSMaor Dickman } 301382198d8bSMaor Dickman 301482198d8bSMaor Dickman return mask; 301582198d8bSMaor Dickman } 301609bf9792SRoi Dayan 30176ae4a6a5SPaul Blakey static int offload_pedit_fields(struct mlx5e_priv *priv, 30186ae4a6a5SPaul Blakey int namespace, 3019e98bedf5SEli Britstein struct mlx5e_tc_flow_parse_attr *parse_attr, 302027c11b6bSEli Britstein u32 *action_flags, 3021e98bedf5SEli Britstein struct netlink_ext_ack *extack) 3022d79b6df6SOr Gerlitz { 3023d79b6df6SOr Gerlitz struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; 302409bf9792SRoi Dayan struct pedit_headers_action *hdrs = parse_attr->hdrs; 302588f30bbcSDmytro Linkin void *headers_c, *headers_v, *action, *vals_p; 302688f30bbcSDmytro Linkin u32 *s_masks_p, *a_masks_p, s_mask, a_mask; 30276ae4a6a5SPaul Blakey struct mlx5e_tc_mod_hdr_acts *mod_acts; 302882198d8bSMaor Dickman unsigned long mask, field_mask; 30292c0e5cf5SPaul Blakey int i, first, last, next_z; 30302c0e5cf5SPaul Blakey struct mlx5_fields *f; 303188f30bbcSDmytro Linkin u8 cmd; 303288f30bbcSDmytro Linkin 30336ae4a6a5SPaul Blakey mod_acts = &parse_attr->mod_hdr_acts; 30348ee72638SRoi Dayan headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec); 30358ee72638SRoi Dayan headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec); 3036d79b6df6SOr Gerlitz 303773867881SPablo Neira Ayuso set_masks = &hdrs[0].masks; 303873867881SPablo Neira Ayuso add_masks = &hdrs[1].masks; 303973867881SPablo Neira Ayuso set_vals = &hdrs[0].vals; 304073867881SPablo Neira Ayuso add_vals = &hdrs[1].vals; 3041d79b6df6SOr Gerlitz 3042d79b6df6SOr Gerlitz for (i = 0; i < ARRAY_SIZE(fields); i++) { 304327c11b6bSEli Britstein bool skip; 304427c11b6bSEli Britstein 3045d79b6df6SOr Gerlitz f = &fields[i]; 3046d79b6df6SOr Gerlitz /* avoid seeing bits set from previous iterations */ 3047e3ca4e05SOr Gerlitz s_mask = 0; 3048e3ca4e05SOr Gerlitz a_mask = 0; 3049d79b6df6SOr Gerlitz 3050d79b6df6SOr Gerlitz s_masks_p = (void *)set_masks + f->offset; 3051d79b6df6SOr Gerlitz a_masks_p = (void *)add_masks + f->offset; 3052d79b6df6SOr Gerlitz 305388f30bbcSDmytro Linkin s_mask = *s_masks_p & f->field_mask; 305488f30bbcSDmytro Linkin a_mask = *a_masks_p & f->field_mask; 3055d79b6df6SOr Gerlitz 3056d79b6df6SOr Gerlitz if (!s_mask && !a_mask) /* nothing to offload here */ 3057d79b6df6SOr Gerlitz continue; 3058d79b6df6SOr Gerlitz 3059d79b6df6SOr Gerlitz if (s_mask && a_mask) { 3060e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 3061e98bedf5SEli Britstein "can't set and add to the same HW field"); 306261b6a6c3SCai Huoqing netdev_warn(priv->netdev, 306361b6a6c3SCai Huoqing "mlx5: can't set and add to the same HW field (%x)\n", 306461b6a6c3SCai Huoqing f->field); 3065d79b6df6SOr Gerlitz return -EOPNOTSUPP; 3066d79b6df6SOr Gerlitz } 3067d79b6df6SOr Gerlitz 306827c11b6bSEli Britstein skip = false; 3069d79b6df6SOr Gerlitz if (s_mask) { 307027c11b6bSEli Britstein void *match_mask = headers_c + f->match_offset; 307127c11b6bSEli Britstein void *match_val = headers_v + f->match_offset; 307227c11b6bSEli Britstein 3073d79b6df6SOr Gerlitz cmd = MLX5_ACTION_TYPE_SET; 3074d79b6df6SOr Gerlitz mask = s_mask; 3075d79b6df6SOr Gerlitz vals_p = (void *)set_vals + f->offset; 307627c11b6bSEli Britstein /* don't rewrite if we have a match on the same value */ 307727c11b6bSEli Britstein if (cmp_val_mask(vals_p, s_masks_p, match_val, 307888f30bbcSDmytro Linkin match_mask, f->field_bsize)) 307927c11b6bSEli Britstein skip = true; 3080d79b6df6SOr Gerlitz /* clear to denote we consumed this field */ 308188f30bbcSDmytro Linkin *s_masks_p &= ~f->field_mask; 3082d79b6df6SOr Gerlitz } else { 3083d79b6df6SOr Gerlitz cmd = MLX5_ACTION_TYPE_ADD; 3084d79b6df6SOr Gerlitz mask = a_mask; 3085d79b6df6SOr Gerlitz vals_p = (void *)add_vals + f->offset; 308627c11b6bSEli Britstein /* add 0 is no change */ 308788f30bbcSDmytro Linkin if ((*(u32 *)vals_p & f->field_mask) == 0) 308827c11b6bSEli Britstein skip = true; 3089d79b6df6SOr Gerlitz /* clear to denote we consumed this field */ 309088f30bbcSDmytro Linkin *a_masks_p &= ~f->field_mask; 3091d79b6df6SOr Gerlitz } 309227c11b6bSEli Britstein if (skip) 309327c11b6bSEli Britstein continue; 3094d79b6df6SOr Gerlitz 309582198d8bSMaor Dickman mask = mask_to_le(mask, f->field_bsize); 30962b64bebaSOr Gerlitz 309788f30bbcSDmytro Linkin first = find_first_bit(&mask, f->field_bsize); 309888f30bbcSDmytro Linkin next_z = find_next_zero_bit(&mask, f->field_bsize, first); 309988f30bbcSDmytro Linkin last = find_last_bit(&mask, f->field_bsize); 31002b64bebaSOr Gerlitz if (first < next_z && next_z < last) { 3101e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 3102e98bedf5SEli Britstein "rewrite of few sub-fields isn't supported"); 310361b6a6c3SCai Huoqing netdev_warn(priv->netdev, 310461b6a6c3SCai Huoqing "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", 3105d79b6df6SOr Gerlitz mask); 3106d79b6df6SOr Gerlitz return -EOPNOTSUPP; 3107d79b6df6SOr Gerlitz } 3108d79b6df6SOr Gerlitz 31092c0e5cf5SPaul Blakey action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts); 31102c0e5cf5SPaul Blakey if (IS_ERR(action)) { 31116ae4a6a5SPaul Blakey NL_SET_ERR_MSG_MOD(extack, 31126ae4a6a5SPaul Blakey "too many pedit actions, can't offload"); 31136ae4a6a5SPaul Blakey mlx5_core_warn(priv->mdev, 31146ae4a6a5SPaul Blakey "mlx5: parsed %d pedit actions, can't do more\n", 31156ae4a6a5SPaul Blakey mod_acts->num_actions); 31162c0e5cf5SPaul Blakey return PTR_ERR(action); 31176ae4a6a5SPaul Blakey } 31186ae4a6a5SPaul Blakey 3119d79b6df6SOr Gerlitz MLX5_SET(set_action_in, action, action_type, cmd); 3120d79b6df6SOr Gerlitz MLX5_SET(set_action_in, action, field, f->field); 3121d79b6df6SOr Gerlitz 3122d79b6df6SOr Gerlitz if (cmd == MLX5_ACTION_TYPE_SET) { 312388f30bbcSDmytro Linkin int start; 312488f30bbcSDmytro Linkin 312582198d8bSMaor Dickman field_mask = mask_to_le(f->field_mask, f->field_bsize); 312682198d8bSMaor Dickman 312788f30bbcSDmytro Linkin /* if field is bit sized it can start not from first bit */ 312882198d8bSMaor Dickman start = find_first_bit(&field_mask, f->field_bsize); 312988f30bbcSDmytro Linkin 313088f30bbcSDmytro Linkin MLX5_SET(set_action_in, action, offset, first - start); 3131d79b6df6SOr Gerlitz /* length is num of bits to be written, zero means length of 32 */ 31322b64bebaSOr Gerlitz MLX5_SET(set_action_in, action, length, (last - first + 1)); 3133d79b6df6SOr Gerlitz } 3134d79b6df6SOr Gerlitz 313588f30bbcSDmytro Linkin if (f->field_bsize == 32) 31362b64bebaSOr Gerlitz MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first); 313788f30bbcSDmytro Linkin else if (f->field_bsize == 16) 31382b64bebaSOr Gerlitz MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first); 313988f30bbcSDmytro Linkin else if (f->field_bsize == 8) 31402b64bebaSOr Gerlitz MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first); 3141d79b6df6SOr Gerlitz 31426ae4a6a5SPaul Blakey ++mod_acts->num_actions; 3143d79b6df6SOr Gerlitz } 3144d79b6df6SOr Gerlitz 3145d79b6df6SOr Gerlitz return 0; 3146d79b6df6SOr Gerlitz } 3147d79b6df6SOr Gerlitz 3148d79b6df6SOr Gerlitz static const struct pedit_headers zero_masks = {}; 3149d79b6df6SOr Gerlitz 3150918ed7bfSRoi Dayan static int verify_offload_pedit_fields(struct mlx5e_priv *priv, 3151c500c86bSPablo Neira Ayuso struct mlx5e_tc_flow_parse_attr *parse_attr, 3152c500c86bSPablo Neira Ayuso struct netlink_ext_ack *extack) 3153c500c86bSPablo Neira Ayuso { 3154c500c86bSPablo Neira Ayuso struct pedit_headers *cmd_masks; 3155c500c86bSPablo Neira Ayuso u8 cmd; 3156c500c86bSPablo Neira Ayuso 3157d79b6df6SOr Gerlitz for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) { 315809bf9792SRoi Dayan cmd_masks = &parse_attr->hdrs[cmd].masks; 3159d79b6df6SOr Gerlitz if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) { 316009bf9792SRoi Dayan NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field"); 3161b3a433deSOr Gerlitz netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd); 3162d79b6df6SOr Gerlitz print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS, 3163d79b6df6SOr Gerlitz 16, 1, cmd_masks, sizeof(zero_masks), true); 3164918ed7bfSRoi Dayan return -EOPNOTSUPP; 3165918ed7bfSRoi Dayan } 3166918ed7bfSRoi Dayan } 3167918ed7bfSRoi Dayan 3168918ed7bfSRoi Dayan return 0; 3169918ed7bfSRoi Dayan } 3170918ed7bfSRoi Dayan 3171918ed7bfSRoi Dayan static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, 3172918ed7bfSRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr, 3173918ed7bfSRoi Dayan u32 *action_flags, 3174918ed7bfSRoi Dayan struct netlink_ext_ack *extack) 3175918ed7bfSRoi Dayan { 3176918ed7bfSRoi Dayan int err; 3177918ed7bfSRoi Dayan 3178918ed7bfSRoi Dayan err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack); 3179918ed7bfSRoi Dayan if (err) 3180d79b6df6SOr Gerlitz goto out_dealloc_parsed_actions; 3181918ed7bfSRoi Dayan 3182918ed7bfSRoi Dayan err = verify_offload_pedit_fields(priv, parse_attr, extack); 3183918ed7bfSRoi Dayan if (err) 3184918ed7bfSRoi Dayan goto out_dealloc_parsed_actions; 3185d79b6df6SOr Gerlitz 3186d79b6df6SOr Gerlitz return 0; 3187d79b6df6SOr Gerlitz 3188d79b6df6SOr Gerlitz out_dealloc_parsed_actions: 31892c0e5cf5SPaul Blakey mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 3190d79b6df6SOr Gerlitz return err; 3191d79b6df6SOr Gerlitz } 3192d79b6df6SOr Gerlitz 31938998576bSDmytro Linkin struct ip_ttl_word { 31948998576bSDmytro Linkin __u8 ttl; 31958998576bSDmytro Linkin __u8 protocol; 31968998576bSDmytro Linkin __sum16 check; 31978998576bSDmytro Linkin }; 31988998576bSDmytro Linkin 31998998576bSDmytro Linkin struct ipv6_hoplimit_word { 32008998576bSDmytro Linkin __be16 payload_len; 32018998576bSDmytro Linkin __u8 nexthdr; 32028998576bSDmytro Linkin __u8 hop_limit; 32038998576bSDmytro Linkin }; 32048998576bSDmytro Linkin 32051836d780SRoi Dayan static bool 32061836d780SRoi Dayan is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow, 32071836d780SRoi Dayan bool *modify_ip_header, bool *modify_tuple, 32084c3844d9SPaul Blakey struct netlink_ext_ack *extack) 32098998576bSDmytro Linkin { 32108998576bSDmytro Linkin u32 mask, offset; 32118998576bSDmytro Linkin u8 htype; 32128998576bSDmytro Linkin 32138998576bSDmytro Linkin htype = act->mangle.htype; 32148998576bSDmytro Linkin offset = act->mangle.offset; 32158998576bSDmytro Linkin mask = ~act->mangle.mask; 32168998576bSDmytro Linkin /* For IPv4 & IPv6 header check 4 byte word, 32178998576bSDmytro Linkin * to determine that modified fields 32188998576bSDmytro Linkin * are NOT ttl & hop_limit only. 32198998576bSDmytro Linkin */ 32208998576bSDmytro Linkin if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) { 32218998576bSDmytro Linkin struct ip_ttl_word *ttl_word = 32228998576bSDmytro Linkin (struct ip_ttl_word *)&mask; 32238998576bSDmytro Linkin 32248998576bSDmytro Linkin if (offset != offsetof(struct iphdr, ttl) || 32258998576bSDmytro Linkin ttl_word->protocol || 32268998576bSDmytro Linkin ttl_word->check) { 32274c3844d9SPaul Blakey *modify_ip_header = true; 32284c3844d9SPaul Blakey } 32294c3844d9SPaul Blakey 32307e36feebSPaul Blakey if (offset >= offsetof(struct iphdr, saddr)) 32317e36feebSPaul Blakey *modify_tuple = true; 32327e36feebSPaul Blakey 32337e36feebSPaul Blakey if (ct_flow && *modify_tuple) { 32344c3844d9SPaul Blakey NL_SET_ERR_MSG_MOD(extack, 32354c3844d9SPaul Blakey "can't offload re-write of ipv4 address with action ct"); 32361836d780SRoi Dayan return false; 32378998576bSDmytro Linkin } 32388998576bSDmytro Linkin } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { 32398998576bSDmytro Linkin struct ipv6_hoplimit_word *hoplimit_word = 32408998576bSDmytro Linkin (struct ipv6_hoplimit_word *)&mask; 32418998576bSDmytro Linkin 32428998576bSDmytro Linkin if (offset != offsetof(struct ipv6hdr, payload_len) || 32438998576bSDmytro Linkin hoplimit_word->payload_len || 32448998576bSDmytro Linkin hoplimit_word->nexthdr) { 32454c3844d9SPaul Blakey *modify_ip_header = true; 32468998576bSDmytro Linkin } 32474c3844d9SPaul Blakey 32487e36feebSPaul Blakey if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) 32497e36feebSPaul Blakey *modify_tuple = true; 32507e36feebSPaul Blakey 32517e36feebSPaul Blakey if (ct_flow && *modify_tuple) { 32524c3844d9SPaul Blakey NL_SET_ERR_MSG_MOD(extack, 32534c3844d9SPaul Blakey "can't offload re-write of ipv6 address with action ct"); 32541836d780SRoi Dayan return false; 32558998576bSDmytro Linkin } 32567e36feebSPaul Blakey } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP || 32577e36feebSPaul Blakey htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) { 32587e36feebSPaul Blakey *modify_tuple = true; 32597e36feebSPaul Blakey if (ct_flow) { 32604c3844d9SPaul Blakey NL_SET_ERR_MSG_MOD(extack, 32614c3844d9SPaul Blakey "can't offload re-write of transport header ports with action ct"); 32621836d780SRoi Dayan return false; 32634c3844d9SPaul Blakey } 32647e36feebSPaul Blakey } 32654c3844d9SPaul Blakey 32661836d780SRoi Dayan return true; 32678998576bSDmytro Linkin } 32688998576bSDmytro Linkin 326996b5b458SDima Chumak static bool modify_tuple_supported(bool modify_tuple, bool ct_clear, 327096b5b458SDima Chumak bool ct_flow, struct netlink_ext_ack *extack, 327196b5b458SDima Chumak struct mlx5e_priv *priv, 327296b5b458SDima Chumak struct mlx5_flow_spec *spec) 327396b5b458SDima Chumak { 327496b5b458SDima Chumak if (!modify_tuple || ct_clear) 327596b5b458SDima Chumak return true; 327696b5b458SDima Chumak 327796b5b458SDima Chumak if (ct_flow) { 327896b5b458SDima Chumak NL_SET_ERR_MSG_MOD(extack, 327996b5b458SDima Chumak "can't offload tuple modification with non-clear ct()"); 328096b5b458SDima Chumak netdev_info(priv->netdev, 328196b5b458SDima Chumak "can't offload tuple modification with non-clear ct()"); 328296b5b458SDima Chumak return false; 328396b5b458SDima Chumak } 328496b5b458SDima Chumak 328596b5b458SDima Chumak /* Add ct_state=-trk match so it will be offloaded for non ct flows 328696b5b458SDima Chumak * (or after clear action), as otherwise, since the tuple is changed, 328796b5b458SDima Chumak * we can't restore ct state 328896b5b458SDima Chumak */ 328996b5b458SDima Chumak if (mlx5_tc_ct_add_no_trk_match(spec)) { 329096b5b458SDima Chumak NL_SET_ERR_MSG_MOD(extack, 329196b5b458SDima Chumak "can't offload tuple modification with ct matches and no ct(clear) action"); 329296b5b458SDima Chumak netdev_info(priv->netdev, 329396b5b458SDima Chumak "can't offload tuple modification with ct matches and no ct(clear) action"); 329496b5b458SDima Chumak return false; 329596b5b458SDima Chumak } 329696b5b458SDima Chumak 329796b5b458SDima Chumak return true; 329896b5b458SDima Chumak } 329996b5b458SDima Chumak 33003d486ec4SOz Shlomo static bool modify_header_match_supported(struct mlx5e_priv *priv, 33013d486ec4SOz Shlomo struct mlx5_flow_spec *spec, 330273867881SPablo Neira Ayuso struct flow_action *flow_action, 33034c3844d9SPaul Blakey u32 actions, bool ct_flow, 33047e36feebSPaul Blakey bool ct_clear, 3305e98bedf5SEli Britstein struct netlink_ext_ack *extack) 3306bdd66ac0SOr Gerlitz { 330773867881SPablo Neira Ayuso const struct flow_action_entry *act; 33087e36feebSPaul Blakey bool modify_ip_header, modify_tuple; 3309fca53304SEli Britstein void *headers_c; 3310bdd66ac0SOr Gerlitz void *headers_v; 3311bdd66ac0SOr Gerlitz u16 ethertype; 33128998576bSDmytro Linkin u8 ip_proto; 33131836d780SRoi Dayan int i; 3314bdd66ac0SOr Gerlitz 33158ee72638SRoi Dayan headers_c = mlx5e_get_match_headers_criteria(actions, spec); 33168ee72638SRoi Dayan headers_v = mlx5e_get_match_headers_value(actions, spec); 3317bdd66ac0SOr Gerlitz ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); 3318bdd66ac0SOr Gerlitz 3319bdd66ac0SOr Gerlitz /* for non-IP we only re-write MACs, so we're okay */ 3320fca53304SEli Britstein if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 && 3321fca53304SEli Britstein ethertype != ETH_P_IP && ethertype != ETH_P_IPV6) 3322bdd66ac0SOr Gerlitz goto out_ok; 3323bdd66ac0SOr Gerlitz 3324bdd66ac0SOr Gerlitz modify_ip_header = false; 33257e36feebSPaul Blakey modify_tuple = false; 332673867881SPablo Neira Ayuso flow_action_for_each(i, act, flow_action) { 332773867881SPablo Neira Ayuso if (act->id != FLOW_ACTION_MANGLE && 332873867881SPablo Neira Ayuso act->id != FLOW_ACTION_ADD) 3329bdd66ac0SOr Gerlitz continue; 3330bdd66ac0SOr Gerlitz 33311836d780SRoi Dayan if (!is_action_keys_supported(act, ct_flow, 33327e36feebSPaul Blakey &modify_ip_header, 33331836d780SRoi Dayan &modify_tuple, extack)) 33341836d780SRoi Dayan return false; 3335bdd66ac0SOr Gerlitz } 3336bdd66ac0SOr Gerlitz 333796b5b458SDima Chumak if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack, 333896b5b458SDima Chumak priv, spec)) 33397e36feebSPaul Blakey return false; 33407e36feebSPaul Blakey 3341bdd66ac0SOr Gerlitz ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); 33421ccef350SJianbo Liu if (modify_ip_header && ip_proto != IPPROTO_TCP && 33431ccef350SJianbo Liu ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { 3344e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 3345e98bedf5SEli Britstein "can't offload re-write of non TCP/UDP"); 33463d486ec4SOz Shlomo netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n", 33473d486ec4SOz Shlomo ip_proto); 3348bdd66ac0SOr Gerlitz return false; 3349bdd66ac0SOr Gerlitz } 3350bdd66ac0SOr Gerlitz 3351bdd66ac0SOr Gerlitz out_ok: 3352bdd66ac0SOr Gerlitz return true; 3353bdd66ac0SOr Gerlitz } 3354bdd66ac0SOr Gerlitz 33559c1d3511SRoi Dayan static bool 33569c1d3511SRoi Dayan actions_match_supported_fdb(struct mlx5e_priv *priv, 33579c1d3511SRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr, 33589c1d3511SRoi Dayan struct mlx5e_tc_flow *flow, 33599c1d3511SRoi Dayan struct netlink_ext_ack *extack) 33609c1d3511SRoi Dayan { 3361d4f401d9SRoi Dayan struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; 33629c1d3511SRoi Dayan bool ct_flow, ct_clear; 33639c1d3511SRoi Dayan 33649c1d3511SRoi Dayan ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; 33659c1d3511SRoi Dayan ct_flow = flow_flag_test(flow, CT) && !ct_clear; 33669c1d3511SRoi Dayan 3367d4f401d9SRoi Dayan if (esw_attr->split_count && ct_flow && 3368d4f401d9SRoi Dayan !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) { 33699c1d3511SRoi Dayan /* All registers used by ct are cleared when using 33709c1d3511SRoi Dayan * split rules. 33719c1d3511SRoi Dayan */ 33729c1d3511SRoi Dayan NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct"); 33739c1d3511SRoi Dayan return false; 33749c1d3511SRoi Dayan } 33759c1d3511SRoi Dayan 3376d4f401d9SRoi Dayan if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { 3377d4f401d9SRoi Dayan NL_SET_ERR_MSG_MOD(extack, 3378d4f401d9SRoi Dayan "current firmware doesn't support split rule for port mirroring"); 3379d4f401d9SRoi Dayan netdev_warn_once(priv->netdev, 3380d4f401d9SRoi Dayan "current firmware doesn't support split rule for port mirroring\n"); 3381d4f401d9SRoi Dayan return false; 3382d4f401d9SRoi Dayan } 3383d4f401d9SRoi Dayan 33849c1d3511SRoi Dayan return true; 33859c1d3511SRoi Dayan } 33869c1d3511SRoi Dayan 33879c1d3511SRoi Dayan static bool 33889c1d3511SRoi Dayan actions_match_supported(struct mlx5e_priv *priv, 338973867881SPablo Neira Ayuso struct flow_action *flow_action, 33900610f8dcSRoi Dayan u32 actions, 3391bdd66ac0SOr Gerlitz struct mlx5e_tc_flow_parse_attr *parse_attr, 3392e98bedf5SEli Britstein struct mlx5e_tc_flow *flow, 3393e98bedf5SEli Britstein struct netlink_ext_ack *extack) 3394bdd66ac0SOr Gerlitz { 33959c1d3511SRoi Dayan bool ct_flow, ct_clear; 3396bdd66ac0SOr Gerlitz 33979c1d3511SRoi Dayan ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; 3398a7c119bdSPaul Blakey ct_flow = flow_flag_test(flow, CT) && !ct_clear; 3399c620b772SAriel Levkovich 34006b50cf45SRoi Dayan if (!(actions & 34016b50cf45SRoi Dayan (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { 34026b50cf45SRoi Dayan NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action"); 34036b50cf45SRoi Dayan return false; 34046b50cf45SRoi Dayan } 34056b50cf45SRoi Dayan 34065623ef8aSRoi Dayan if (!(~actions & 34075623ef8aSRoi Dayan (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { 34085623ef8aSRoi Dayan NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action"); 34095623ef8aSRoi Dayan return false; 34105623ef8aSRoi Dayan } 34115623ef8aSRoi Dayan 34129c1d3511SRoi Dayan if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && 3413a2446bc7SRoi Dayan actions & MLX5_FLOW_CONTEXT_ACTION_DROP) { 3414a2446bc7SRoi Dayan NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported"); 3415a2446bc7SRoi Dayan return false; 3416a2446bc7SRoi Dayan } 3417a2446bc7SRoi Dayan 34183d65492aSRoi Dayan if (!(~actions & 34193d65492aSRoi Dayan (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { 34203d65492aSRoi Dayan NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action"); 34213d65492aSRoi Dayan return false; 34223d65492aSRoi Dayan } 34233d65492aSRoi Dayan 3424a2446bc7SRoi Dayan if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && 342523216d38SRoi Dayan actions & MLX5_FLOW_CONTEXT_ACTION_DROP) { 342623216d38SRoi Dayan NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported"); 342723216d38SRoi Dayan return false; 342823216d38SRoi Dayan } 342923216d38SRoi Dayan 34309c1d3511SRoi Dayan if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && 34319c1d3511SRoi Dayan !modify_header_match_supported(priv, &parse_attr->spec, flow_action, 34329c1d3511SRoi Dayan actions, ct_flow, ct_clear, extack)) 343349397b80SDan Carpenter return false; 3434bdd66ac0SOr Gerlitz 34359c1d3511SRoi Dayan if (mlx5e_is_eswitch_flow(flow) && 34369c1d3511SRoi Dayan !actions_match_supported_fdb(priv, parse_attr, flow, extack)) 34379c1d3511SRoi Dayan return false; 3438bdd66ac0SOr Gerlitz 3439bdd66ac0SOr Gerlitz return true; 3440bdd66ac0SOr Gerlitz } 3441bdd66ac0SOr Gerlitz 344232134847SMaor Dickman static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 344332134847SMaor Dickman { 344432134847SMaor Dickman return priv->mdev == peer_priv->mdev; 344532134847SMaor Dickman } 344632134847SMaor Dickman 3447ab3f3d5eSRoi Dayan bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 34485c65c564SOr Gerlitz { 34495c65c564SOr Gerlitz struct mlx5_core_dev *fmdev, *pmdev; 3450816f6706SOr Gerlitz u64 fsystem_guid, psystem_guid; 34515c65c564SOr Gerlitz 34525c65c564SOr Gerlitz fmdev = priv->mdev; 34535c65c564SOr Gerlitz pmdev = peer_priv->mdev; 34545c65c564SOr Gerlitz 345559c9d35eSAlaa Hleihel fsystem_guid = mlx5_query_nic_system_image_guid(fmdev); 345659c9d35eSAlaa Hleihel psystem_guid = mlx5_query_nic_system_image_guid(pmdev); 34575c65c564SOr Gerlitz 3458816f6706SOr Gerlitz return (fsystem_guid == psystem_guid); 34595c65c564SOr Gerlitz } 34605c65c564SOr Gerlitz 34610bac1194SEli Britstein static int 3462d9581e2fSRoi Dayan actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv, 3463d9581e2fSRoi Dayan struct mlx5e_tc_flow *flow, 3464d9581e2fSRoi Dayan struct mlx5_flow_attr *attr, 3465d9581e2fSRoi Dayan struct netlink_ext_ack *extack) 3466d9581e2fSRoi Dayan { 3467d9581e2fSRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; 346809bf9792SRoi Dayan struct pedit_headers_action *hdrs = parse_attr->hdrs; 3469d9581e2fSRoi Dayan enum mlx5_flow_namespace_type ns_type; 3470d9581e2fSRoi Dayan int err; 3471d9581e2fSRoi Dayan 3472d9581e2fSRoi Dayan if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits && 3473d9581e2fSRoi Dayan !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) 3474d9581e2fSRoi Dayan return 0; 3475d9581e2fSRoi Dayan 3476e36db1eeSRoi Dayan ns_type = mlx5e_get_flow_namespace(flow); 3477d9581e2fSRoi Dayan 347809bf9792SRoi Dayan err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack); 3479d9581e2fSRoi Dayan if (err) 3480d9581e2fSRoi Dayan return err; 3481d9581e2fSRoi Dayan 3482d9581e2fSRoi Dayan if (parse_attr->mod_hdr_acts.num_actions > 0) 3483d9581e2fSRoi Dayan return 0; 3484d9581e2fSRoi Dayan 3485fc3a879aSRoi Dayan /* In case all pedit actions are skipped, remove the MOD_HDR flag. */ 3486d9581e2fSRoi Dayan attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 34872c0e5cf5SPaul Blakey mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 3488d9581e2fSRoi Dayan 3489d9581e2fSRoi Dayan if (ns_type != MLX5_FLOW_NAMESPACE_FDB) 3490d9581e2fSRoi Dayan return 0; 3491d9581e2fSRoi Dayan 3492d9581e2fSRoi Dayan if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || 3493d9581e2fSRoi Dayan (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))) 3494d9581e2fSRoi Dayan attr->esw_attr->split_count = 0; 3495d9581e2fSRoi Dayan 3496d9581e2fSRoi Dayan return 0; 3497d9581e2fSRoi Dayan } 3498d9581e2fSRoi Dayan 34998300f225SRoi Dayan static struct mlx5_flow_attr* 35008300f225SRoi Dayan mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr, 35018300f225SRoi Dayan enum mlx5_flow_namespace_type ns_type) 35028300f225SRoi Dayan { 35038300f225SRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr; 35048300f225SRoi Dayan u32 attr_sz = ns_to_attr_sz(ns_type); 35058300f225SRoi Dayan struct mlx5_flow_attr *attr2; 35068300f225SRoi Dayan 35078300f225SRoi Dayan attr2 = mlx5_alloc_flow_attr(ns_type); 35088300f225SRoi Dayan parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); 35098300f225SRoi Dayan if (!attr2 || !parse_attr) { 35108300f225SRoi Dayan kvfree(parse_attr); 35118300f225SRoi Dayan kfree(attr2); 3512371c2b34SDan Carpenter return NULL; 35138300f225SRoi Dayan } 35148300f225SRoi Dayan 35158300f225SRoi Dayan memcpy(attr2, attr, attr_sz); 35168300f225SRoi Dayan INIT_LIST_HEAD(&attr2->list); 35178300f225SRoi Dayan parse_attr->filter_dev = attr->parse_attr->filter_dev; 35188300f225SRoi Dayan attr2->action = 0; 35198300f225SRoi Dayan attr2->flags = 0; 35208300f225SRoi Dayan attr2->parse_attr = parse_attr; 35218300f225SRoi Dayan return attr2; 35228300f225SRoi Dayan } 35238300f225SRoi Dayan 35248300f225SRoi Dayan static struct mlx5_core_dev * 35258300f225SRoi Dayan get_flow_counter_dev(struct mlx5e_tc_flow *flow) 35268300f225SRoi Dayan { 35278300f225SRoi Dayan return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev; 35288300f225SRoi Dayan } 35298300f225SRoi Dayan 35308300f225SRoi Dayan struct mlx5_flow_attr * 35318300f225SRoi Dayan mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow) 35328300f225SRoi Dayan { 35338300f225SRoi Dayan struct mlx5_esw_flow_attr *esw_attr; 35348300f225SRoi Dayan struct mlx5_flow_attr *attr; 35358300f225SRoi Dayan int i; 35368300f225SRoi Dayan 35378300f225SRoi Dayan list_for_each_entry(attr, &flow->attrs, list) { 35388300f225SRoi Dayan esw_attr = attr->esw_attr; 35398300f225SRoi Dayan for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { 35408300f225SRoi Dayan if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) 35418300f225SRoi Dayan return attr; 35428300f225SRoi Dayan } 35438300f225SRoi Dayan } 35448300f225SRoi Dayan 35458300f225SRoi Dayan return NULL; 35468300f225SRoi Dayan } 35478300f225SRoi Dayan 35488300f225SRoi Dayan void 35498300f225SRoi Dayan mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow) 35508300f225SRoi Dayan { 35518300f225SRoi Dayan struct mlx5e_post_act *post_act = get_post_action(flow->priv); 35528300f225SRoi Dayan struct mlx5_flow_attr *attr; 35538300f225SRoi Dayan 35548300f225SRoi Dayan list_for_each_entry(attr, &flow->attrs, list) { 35558300f225SRoi Dayan if (list_is_last(&attr->list, &flow->attrs)) 35568300f225SRoi Dayan break; 35578300f225SRoi Dayan 35588300f225SRoi Dayan mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle); 35598300f225SRoi Dayan } 35608300f225SRoi Dayan } 35618300f225SRoi Dayan 35628300f225SRoi Dayan static void 35638300f225SRoi Dayan free_flow_post_acts(struct mlx5e_tc_flow *flow) 35648300f225SRoi Dayan { 35658300f225SRoi Dayan struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow); 35668300f225SRoi Dayan struct mlx5e_post_act *post_act = get_post_action(flow->priv); 35678300f225SRoi Dayan struct mlx5_flow_attr *attr, *tmp; 35688300f225SRoi Dayan bool vf_tun; 35698300f225SRoi Dayan 35708300f225SRoi Dayan list_for_each_entry_safe(attr, tmp, &flow->attrs, list) { 35718300f225SRoi Dayan if (list_is_last(&attr->list, &flow->attrs)) 35728300f225SRoi Dayan break; 35738300f225SRoi Dayan 35748300f225SRoi Dayan if (attr->post_act_handle) 35758300f225SRoi Dayan mlx5e_tc_post_act_del(post_act, attr->post_act_handle); 35768300f225SRoi Dayan 35778300f225SRoi Dayan clean_encap_dests(flow->priv, flow, attr, &vf_tun); 35788300f225SRoi Dayan 35798300f225SRoi Dayan if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 35808300f225SRoi Dayan mlx5_fc_destroy(counter_dev, attr->counter); 35818300f225SRoi Dayan 35828300f225SRoi Dayan if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 35838300f225SRoi Dayan mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); 35848300f225SRoi Dayan if (attr->modify_hdr) 35858300f225SRoi Dayan mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr); 35868300f225SRoi Dayan } 35878300f225SRoi Dayan 35888300f225SRoi Dayan list_del(&attr->list); 35898300f225SRoi Dayan kvfree(attr->parse_attr); 35908300f225SRoi Dayan kfree(attr); 35918300f225SRoi Dayan } 35928300f225SRoi Dayan } 35938300f225SRoi Dayan 35948300f225SRoi Dayan int 35958300f225SRoi Dayan mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow) 35968300f225SRoi Dayan { 35978300f225SRoi Dayan struct mlx5e_post_act *post_act = get_post_action(flow->priv); 35988300f225SRoi Dayan struct mlx5_flow_attr *attr; 35998300f225SRoi Dayan int err = 0; 36008300f225SRoi Dayan 36018300f225SRoi Dayan list_for_each_entry(attr, &flow->attrs, list) { 36028300f225SRoi Dayan if (list_is_last(&attr->list, &flow->attrs)) 36038300f225SRoi Dayan break; 36048300f225SRoi Dayan 36058300f225SRoi Dayan err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle); 36068300f225SRoi Dayan if (err) 36078300f225SRoi Dayan break; 36088300f225SRoi Dayan } 36098300f225SRoi Dayan 36108300f225SRoi Dayan return err; 36118300f225SRoi Dayan } 36128300f225SRoi Dayan 36138300f225SRoi Dayan /* TC filter rule HW translation: 36148300f225SRoi Dayan * 36158300f225SRoi Dayan * +---------------------+ 36168300f225SRoi Dayan * + ft prio (tc chain) + 36178300f225SRoi Dayan * + original match + 36188300f225SRoi Dayan * +---------------------+ 36198300f225SRoi Dayan * | 36208300f225SRoi Dayan * | if multi table action 36218300f225SRoi Dayan * | 36228300f225SRoi Dayan * v 36238300f225SRoi Dayan * +---------------------+ 36248300f225SRoi Dayan * + post act ft |<----. 36258300f225SRoi Dayan * + match fte id | | split on multi table action 36268300f225SRoi Dayan * + do actions |-----' 36278300f225SRoi Dayan * +---------------------+ 36288300f225SRoi Dayan * | 36298300f225SRoi Dayan * | 36308300f225SRoi Dayan * v 36318300f225SRoi Dayan * Do rest of the actions after last multi table action. 36328300f225SRoi Dayan */ 36338300f225SRoi Dayan static int 36348300f225SRoi Dayan alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) 36358300f225SRoi Dayan { 36368300f225SRoi Dayan struct mlx5e_post_act *post_act = get_post_action(flow->priv); 36378300f225SRoi Dayan struct mlx5_flow_attr *attr, *next_attr = NULL; 36388300f225SRoi Dayan struct mlx5e_post_act_handle *handle; 36398300f225SRoi Dayan bool vf_tun, encap_valid = true; 36408300f225SRoi Dayan int err; 36418300f225SRoi Dayan 36428300f225SRoi Dayan /* This is going in reverse order as needed. 36438300f225SRoi Dayan * The first entry is the last attribute. 36448300f225SRoi Dayan */ 36458300f225SRoi Dayan list_for_each_entry(attr, &flow->attrs, list) { 36468300f225SRoi Dayan if (!next_attr) { 36478300f225SRoi Dayan /* Set counter action on last post act rule. */ 36488300f225SRoi Dayan attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 36498300f225SRoi Dayan } else { 36508300f225SRoi Dayan err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr); 36518300f225SRoi Dayan if (err) 36528300f225SRoi Dayan goto out_free; 36538300f225SRoi Dayan } 36548300f225SRoi Dayan 36558300f225SRoi Dayan /* Don't add post_act rule for first attr (last in the list). 36568300f225SRoi Dayan * It's being handled by the caller. 36578300f225SRoi Dayan */ 36588300f225SRoi Dayan if (list_is_last(&attr->list, &flow->attrs)) 36598300f225SRoi Dayan break; 36608300f225SRoi Dayan 36618300f225SRoi Dayan err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun); 36628300f225SRoi Dayan if (err) 36638300f225SRoi Dayan goto out_free; 36648300f225SRoi Dayan 36658300f225SRoi Dayan if (!encap_valid) 36668300f225SRoi Dayan flow_flag_set(flow, SLOW); 36678300f225SRoi Dayan 36688300f225SRoi Dayan err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack); 36698300f225SRoi Dayan if (err) 36708300f225SRoi Dayan goto out_free; 36718300f225SRoi Dayan 36728300f225SRoi Dayan if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 36738300f225SRoi Dayan err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr); 36748300f225SRoi Dayan if (err) 36758300f225SRoi Dayan goto out_free; 36768300f225SRoi Dayan } 36778300f225SRoi Dayan 36788300f225SRoi Dayan if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 36798300f225SRoi Dayan err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr); 36808300f225SRoi Dayan if (err) 36818300f225SRoi Dayan goto out_free; 36828300f225SRoi Dayan } 36838300f225SRoi Dayan 36848300f225SRoi Dayan handle = mlx5e_tc_post_act_add(post_act, attr); 36858300f225SRoi Dayan if (IS_ERR(handle)) { 36868300f225SRoi Dayan err = PTR_ERR(handle); 36878300f225SRoi Dayan goto out_free; 36888300f225SRoi Dayan } 36898300f225SRoi Dayan 36908300f225SRoi Dayan attr->post_act_handle = handle; 36918300f225SRoi Dayan next_attr = attr; 36928300f225SRoi Dayan } 36938300f225SRoi Dayan 36948300f225SRoi Dayan if (flow_flag_test(flow, SLOW)) 36958300f225SRoi Dayan goto out; 36968300f225SRoi Dayan 36978300f225SRoi Dayan err = mlx5e_tc_offload_flow_post_acts(flow); 36988300f225SRoi Dayan if (err) 36998300f225SRoi Dayan goto out_free; 37008300f225SRoi Dayan 37018300f225SRoi Dayan out: 37028300f225SRoi Dayan return 0; 37038300f225SRoi Dayan 37048300f225SRoi Dayan out_free: 37058300f225SRoi Dayan free_flow_post_acts(flow); 37068300f225SRoi Dayan return err; 37078300f225SRoi Dayan } 37088300f225SRoi Dayan 37098300f225SRoi Dayan static int 37108300f225SRoi Dayan parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, 37118300f225SRoi Dayan struct flow_action *flow_action) 37128300f225SRoi Dayan { 37138300f225SRoi Dayan struct netlink_ext_ack *extack = parse_state->extack; 37148300f225SRoi Dayan struct mlx5e_tc_flow_action flow_action_reorder; 37158300f225SRoi Dayan struct mlx5e_tc_flow *flow = parse_state->flow; 37168300f225SRoi Dayan struct mlx5_flow_attr *attr = flow->attr; 37178300f225SRoi Dayan enum mlx5_flow_namespace_type ns_type; 37188300f225SRoi Dayan struct mlx5e_priv *priv = flow->priv; 37198300f225SRoi Dayan struct flow_action_entry *act, **_act; 37208300f225SRoi Dayan struct mlx5e_tc_act *tc_act; 37218300f225SRoi Dayan int err, i; 37228300f225SRoi Dayan 37238300f225SRoi Dayan flow_action_reorder.num_entries = flow_action->num_entries; 37248300f225SRoi Dayan flow_action_reorder.entries = kcalloc(flow_action->num_entries, 37258300f225SRoi Dayan sizeof(flow_action), GFP_KERNEL); 37268300f225SRoi Dayan if (!flow_action_reorder.entries) 37278300f225SRoi Dayan return -ENOMEM; 37288300f225SRoi Dayan 37298300f225SRoi Dayan mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder); 37308300f225SRoi Dayan 37318300f225SRoi Dayan ns_type = mlx5e_get_flow_namespace(flow); 37328300f225SRoi Dayan list_add(&attr->list, &flow->attrs); 37338300f225SRoi Dayan 37348300f225SRoi Dayan flow_action_for_each(i, _act, &flow_action_reorder) { 37358300f225SRoi Dayan act = *_act; 37368300f225SRoi Dayan tc_act = mlx5e_tc_act_get(act->id, ns_type); 37378300f225SRoi Dayan if (!tc_act) { 37388300f225SRoi Dayan NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action"); 37398300f225SRoi Dayan err = -EOPNOTSUPP; 37408300f225SRoi Dayan goto out_free; 37418300f225SRoi Dayan } 37428300f225SRoi Dayan 37438300f225SRoi Dayan if (!tc_act->can_offload(parse_state, act, i, attr)) { 37448300f225SRoi Dayan err = -EOPNOTSUPP; 37458300f225SRoi Dayan goto out_free; 37468300f225SRoi Dayan } 37478300f225SRoi Dayan 37488300f225SRoi Dayan err = tc_act->parse_action(parse_state, act, priv, attr); 37498300f225SRoi Dayan if (err) 37508300f225SRoi Dayan goto out_free; 37518300f225SRoi Dayan 37528300f225SRoi Dayan parse_state->actions |= attr->action; 37538300f225SRoi Dayan 37548300f225SRoi Dayan /* Split attr for multi table act if not the last act. */ 37558300f225SRoi Dayan if (tc_act->is_multi_table_act && 37568300f225SRoi Dayan tc_act->is_multi_table_act(priv, act, attr) && 37578300f225SRoi Dayan i < flow_action_reorder.num_entries - 1) { 37588300f225SRoi Dayan err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type); 37598300f225SRoi Dayan if (err) 37608300f225SRoi Dayan goto out_free; 37618300f225SRoi Dayan 37628300f225SRoi Dayan attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type); 37638300f225SRoi Dayan if (!attr) { 37648300f225SRoi Dayan err = -ENOMEM; 37658300f225SRoi Dayan goto out_free; 37668300f225SRoi Dayan } 37678300f225SRoi Dayan 37688300f225SRoi Dayan list_add(&attr->list, &flow->attrs); 37698300f225SRoi Dayan } 37708300f225SRoi Dayan } 37718300f225SRoi Dayan 37728300f225SRoi Dayan kfree(flow_action_reorder.entries); 37738300f225SRoi Dayan 37748300f225SRoi Dayan err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type); 37758300f225SRoi Dayan if (err) 37768300f225SRoi Dayan goto out_free_post_acts; 37778300f225SRoi Dayan 37788300f225SRoi Dayan err = alloc_flow_post_acts(flow, extack); 37798300f225SRoi Dayan if (err) 37808300f225SRoi Dayan goto out_free_post_acts; 37818300f225SRoi Dayan 37828300f225SRoi Dayan return 0; 37838300f225SRoi Dayan 37848300f225SRoi Dayan out_free: 37858300f225SRoi Dayan kfree(flow_action_reorder.entries); 37868300f225SRoi Dayan out_free_post_acts: 37878300f225SRoi Dayan free_flow_post_acts(flow); 37888300f225SRoi Dayan 37898300f225SRoi Dayan return err; 37908300f225SRoi Dayan } 37918300f225SRoi Dayan 3792d9581e2fSRoi Dayan static int 3793df990477SRoi Dayan flow_action_supported(struct flow_action *flow_action, 3794df990477SRoi Dayan struct netlink_ext_ack *extack) 3795df990477SRoi Dayan { 3796df990477SRoi Dayan if (!flow_action_has_entries(flow_action)) { 3797df990477SRoi Dayan NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries"); 3798df990477SRoi Dayan return -EINVAL; 3799df990477SRoi Dayan } 3800df990477SRoi Dayan 3801df990477SRoi Dayan if (!flow_action_hw_stats_check(flow_action, extack, 3802df990477SRoi Dayan FLOW_ACTION_HW_STATS_DELAYED_BIT)) { 3803df990477SRoi Dayan NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported"); 3804df990477SRoi Dayan return -EOPNOTSUPP; 3805df990477SRoi Dayan } 3806df990477SRoi Dayan 3807df990477SRoi Dayan return 0; 3808df990477SRoi Dayan } 3809df990477SRoi Dayan 3810df990477SRoi Dayan static int 3811d9581e2fSRoi Dayan parse_tc_nic_actions(struct mlx5e_priv *priv, 381273867881SPablo Neira Ayuso struct flow_action *flow_action, 3813e98bedf5SEli Britstein struct mlx5e_tc_flow *flow, 3814e98bedf5SEli Britstein struct netlink_ext_ack *extack) 3815e3a2b7edSAmir Vadai { 3816fad54790SRoi Dayan struct mlx5e_tc_act_parse_state *parse_state; 3817c6cfe113SRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr; 3818c620b772SAriel Levkovich struct mlx5_flow_attr *attr = flow->attr; 38198333d53eSRoi Dayan int err; 3820e3a2b7edSAmir Vadai 3821df990477SRoi Dayan err = flow_action_supported(flow_action, extack); 3822df990477SRoi Dayan if (err) 3823df990477SRoi Dayan return err; 3824319a1d19SJiri Pirko 3825fad54790SRoi Dayan attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 3826c6cfe113SRoi Dayan parse_attr = attr->parse_attr; 3827fad54790SRoi Dayan parse_state = &parse_attr->parse_state; 3828fad54790SRoi Dayan mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack); 3829758bc134SRoi Dayan parse_state->ct_priv = get_ct_priv(priv); 3830e3a2b7edSAmir Vadai 38318333d53eSRoi Dayan err = parse_tc_actions(parse_state, flow_action); 3832fad54790SRoi Dayan if (err) 3833fad54790SRoi Dayan return err; 3834e3a2b7edSAmir Vadai 383509bf9792SRoi Dayan err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack); 3836d9581e2fSRoi Dayan if (err) 3837d9581e2fSRoi Dayan return err; 3838d9581e2fSRoi Dayan 38390610f8dcSRoi Dayan if (!actions_match_supported(priv, flow_action, parse_state->actions, 38400610f8dcSRoi Dayan parse_attr, flow, extack)) 3841bdd66ac0SOr Gerlitz return -EOPNOTSUPP; 3842bdd66ac0SOr Gerlitz 3843e3a2b7edSAmir Vadai return 0; 3844e3a2b7edSAmir Vadai } 3845e3a2b7edSAmir Vadai 384632134847SMaor Dickman static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv, 3847b1d90e6bSRabie Loulou struct net_device *peer_netdev) 3848b1d90e6bSRabie Loulou { 3849b1d90e6bSRabie Loulou struct mlx5e_priv *peer_priv; 3850b1d90e6bSRabie Loulou 3851b1d90e6bSRabie Loulou peer_priv = netdev_priv(peer_netdev); 3852b1d90e6bSRabie Loulou 3853b1d90e6bSRabie Loulou return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && 385432134847SMaor Dickman mlx5e_eswitch_vf_rep(priv->netdev) && 385532134847SMaor Dickman mlx5e_eswitch_vf_rep(peer_netdev) && 3856ab3f3d5eSRoi Dayan mlx5e_same_hw_devs(priv, peer_priv)); 3857d34eb2fcSOr Gerlitz } 3858d34eb2fcSOr Gerlitz 385932134847SMaor Dickman static bool same_hw_reps(struct mlx5e_priv *priv, 386032134847SMaor Dickman struct net_device *peer_netdev) 386132134847SMaor Dickman { 386232134847SMaor Dickman struct mlx5e_priv *peer_priv; 386332134847SMaor Dickman 386432134847SMaor Dickman peer_priv = netdev_priv(peer_netdev); 386532134847SMaor Dickman 386632134847SMaor Dickman return mlx5e_eswitch_rep(priv->netdev) && 386732134847SMaor Dickman mlx5e_eswitch_rep(peer_netdev) && 3868ab3f3d5eSRoi Dayan mlx5e_same_hw_devs(priv, peer_priv); 386932134847SMaor Dickman } 387032134847SMaor Dickman 387132134847SMaor Dickman static bool is_lag_dev(struct mlx5e_priv *priv, 387232134847SMaor Dickman struct net_device *peer_netdev) 387332134847SMaor Dickman { 387432134847SMaor Dickman return ((mlx5_lag_is_sriov(priv->mdev) || 387532134847SMaor Dickman mlx5_lag_is_multipath(priv->mdev)) && 387632134847SMaor Dickman same_hw_reps(priv, peer_netdev)); 387732134847SMaor Dickman } 387832134847SMaor Dickman 387994db3317SEli Cohen static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev) 388094db3317SEli Cohen { 3881d6c13d74SEli Cohen if (same_hw_reps(priv, out_dev) && 388294db3317SEli Cohen MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) && 388394db3317SEli Cohen MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up)) 388494db3317SEli Cohen return true; 388594db3317SEli Cohen 388694db3317SEli Cohen return false; 388794db3317SEli Cohen } 388894db3317SEli Cohen 3889f6dc1264SPaul Blakey bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, 3890f6dc1264SPaul Blakey struct net_device *out_dev) 3891f6dc1264SPaul Blakey { 389232134847SMaor Dickman if (is_merged_eswitch_vfs(priv, out_dev)) 389332134847SMaor Dickman return true; 389432134847SMaor Dickman 389594db3317SEli Cohen if (is_multiport_eligible(priv, out_dev)) 389694db3317SEli Cohen return true; 389794db3317SEli Cohen 389832134847SMaor Dickman if (is_lag_dev(priv, out_dev)) 3899f6dc1264SPaul Blakey return true; 3900f6dc1264SPaul Blakey 3901f6dc1264SPaul Blakey return mlx5e_eswitch_rep(out_dev) && 390232134847SMaor Dickman same_port_devs(priv, netdev_priv(out_dev)); 3903f6dc1264SPaul Blakey } 3904f6dc1264SPaul Blakey 390527484f71SAriel Levkovich int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, 390627484f71SAriel Levkovich struct mlx5_flow_attr *attr, 390727484f71SAriel Levkovich int ifindex, 390827484f71SAriel Levkovich enum mlx5e_tc_int_port_type type, 390927484f71SAriel Levkovich u32 *action, 391027484f71SAriel Levkovich int out_index) 391127484f71SAriel Levkovich { 391227484f71SAriel Levkovich struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 391327484f71SAriel Levkovich struct mlx5e_tc_int_port_priv *int_port_priv; 391427484f71SAriel Levkovich struct mlx5e_tc_flow_parse_attr *parse_attr; 391527484f71SAriel Levkovich struct mlx5e_tc_int_port *dest_int_port; 391627484f71SAriel Levkovich int err; 391727484f71SAriel Levkovich 391827484f71SAriel Levkovich parse_attr = attr->parse_attr; 391927484f71SAriel Levkovich int_port_priv = mlx5e_get_int_port_priv(priv); 392027484f71SAriel Levkovich 392127484f71SAriel Levkovich dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type); 392227484f71SAriel Levkovich if (IS_ERR(dest_int_port)) 392327484f71SAriel Levkovich return PTR_ERR(dest_int_port); 392427484f71SAriel Levkovich 392527484f71SAriel Levkovich err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts, 392627484f71SAriel Levkovich MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG, 392727484f71SAriel Levkovich mlx5e_tc_int_port_get_metadata(dest_int_port)); 392827484f71SAriel Levkovich if (err) { 392927484f71SAriel Levkovich mlx5e_tc_int_port_put(int_port_priv, dest_int_port); 393027484f71SAriel Levkovich return err; 393127484f71SAriel Levkovich } 393227484f71SAriel Levkovich 393327484f71SAriel Levkovich *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 393427484f71SAriel Levkovich 393527484f71SAriel Levkovich esw_attr->dest_int_port = dest_int_port; 393627484f71SAriel Levkovich esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE; 393727484f71SAriel Levkovich 393827484f71SAriel Levkovich /* Forward to root fdb for matching against the new source vport */ 393927484f71SAriel Levkovich attr->dest_chain = 0; 394027484f71SAriel Levkovich 394127484f71SAriel Levkovich return 0; 394227484f71SAriel Levkovich } 394327484f71SAriel Levkovich 39448333d53eSRoi Dayan static int 39458333d53eSRoi Dayan parse_tc_fdb_actions(struct mlx5e_priv *priv, 394673867881SPablo Neira Ayuso struct flow_action *flow_action, 3947e98bedf5SEli Britstein struct mlx5e_tc_flow *flow, 394870f8019eSRoi Dayan struct netlink_ext_ack *extack) 3949a54e20b4SHadar Hen Zion { 3950fad54790SRoi Dayan struct mlx5e_tc_act_parse_state *parse_state; 3951c620b772SAriel Levkovich struct mlx5e_tc_flow_parse_attr *parse_attr; 3952c620b772SAriel Levkovich struct mlx5_flow_attr *attr = flow->attr; 3953c620b772SAriel Levkovich struct mlx5_esw_flow_attr *esw_attr; 39548333d53eSRoi Dayan int err; 395503a9d11eSOr Gerlitz 3956df990477SRoi Dayan err = flow_action_supported(flow_action, extack); 3957df990477SRoi Dayan if (err) 3958df990477SRoi Dayan return err; 3959319a1d19SJiri Pirko 3960c620b772SAriel Levkovich esw_attr = attr->esw_attr; 3961c620b772SAriel Levkovich parse_attr = attr->parse_attr; 3962fad54790SRoi Dayan parse_state = &parse_attr->parse_state; 3963fad54790SRoi Dayan mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack); 3964758bc134SRoi Dayan parse_state->ct_priv = get_ct_priv(priv); 3965c620b772SAriel Levkovich 39668333d53eSRoi Dayan err = parse_tc_actions(parse_state, flow_action); 3967fad54790SRoi Dayan if (err) 3968fad54790SRoi Dayan return err; 3969bdd66ac0SOr Gerlitz 3970166f431eSAriel Levkovich /* Forward to/from internal port can only have 1 dest */ 3971166f431eSAriel Levkovich if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) && 3972166f431eSAriel Levkovich esw_attr->out_count > 1) { 397327484f71SAriel Levkovich NL_SET_ERR_MSG_MOD(extack, 3974166f431eSAriel Levkovich "Rules with internal port can have only one destination"); 397527484f71SAriel Levkovich return -EOPNOTSUPP; 397627484f71SAriel Levkovich } 397727484f71SAriel Levkovich 397809bf9792SRoi Dayan err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack); 3979c500c86bSPablo Neira Ayuso if (err) 3980c500c86bSPablo Neira Ayuso return err; 3981c500c86bSPablo Neira Ayuso 39820610f8dcSRoi Dayan if (!actions_match_supported(priv, flow_action, parse_state->actions, 39830610f8dcSRoi Dayan parse_attr, flow, extack)) 3984bdd66ac0SOr Gerlitz return -EOPNOTSUPP; 3985bdd66ac0SOr Gerlitz 398631c8eba5SOr Gerlitz return 0; 398703a9d11eSOr Gerlitz } 398803a9d11eSOr Gerlitz 3989226f2ca3SVlad Buslov static void get_flags(int flags, unsigned long *flow_flags) 399060bd4af8SOr Gerlitz { 3991226f2ca3SVlad Buslov unsigned long __flow_flags = 0; 399260bd4af8SOr Gerlitz 3993226f2ca3SVlad Buslov if (flags & MLX5_TC_FLAG(INGRESS)) 3994226f2ca3SVlad Buslov __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS); 3995226f2ca3SVlad Buslov if (flags & MLX5_TC_FLAG(EGRESS)) 3996226f2ca3SVlad Buslov __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS); 399760bd4af8SOr Gerlitz 3998226f2ca3SVlad Buslov if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) 3999226f2ca3SVlad Buslov __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); 4000226f2ca3SVlad Buslov if (flags & MLX5_TC_FLAG(NIC_OFFLOAD)) 4001226f2ca3SVlad Buslov __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); 400284179981SPaul Blakey if (flags & MLX5_TC_FLAG(FT_OFFLOAD)) 400384179981SPaul Blakey __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT); 4004d9ee0491SOr Gerlitz 400560bd4af8SOr Gerlitz *flow_flags = __flow_flags; 400660bd4af8SOr Gerlitz } 400760bd4af8SOr Gerlitz 400805866c82SOr Gerlitz static const struct rhashtable_params tc_ht_params = { 400905866c82SOr Gerlitz .head_offset = offsetof(struct mlx5e_tc_flow, node), 401005866c82SOr Gerlitz .key_offset = offsetof(struct mlx5e_tc_flow, cookie), 401105866c82SOr Gerlitz .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie), 401205866c82SOr Gerlitz .automatic_shrinking = true, 401305866c82SOr Gerlitz }; 401405866c82SOr Gerlitz 4015226f2ca3SVlad Buslov static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, 4016226f2ca3SVlad Buslov unsigned long flags) 401705866c82SOr Gerlitz { 4018d1a3138fSPaul Blakey struct mlx5e_rep_priv *rpriv; 4019655dc3d2SOr Gerlitz 4020226f2ca3SVlad Buslov if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) { 4021d1a3138fSPaul Blakey rpriv = priv->ppriv; 4022d1a3138fSPaul Blakey return &rpriv->tc_ht; 4023d9ee0491SOr Gerlitz } else /* NIC offload */ 402405866c82SOr Gerlitz return &priv->fs.tc.ht; 402505866c82SOr Gerlitz } 402605866c82SOr Gerlitz 402704de7ddaSRoi Dayan static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) 402804de7ddaSRoi Dayan { 4029c620b772SAriel Levkovich struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; 4030c620b772SAriel Levkovich struct mlx5_flow_attr *attr = flow->attr; 4031c620b772SAriel Levkovich bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK && 4032226f2ca3SVlad Buslov flow_flag_test(flow, INGRESS); 40331418ddd9SAviv Heller bool act_is_encap = !!(attr->action & 40341418ddd9SAviv Heller MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); 4035c620b772SAriel Levkovich bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom, 40361418ddd9SAviv Heller MLX5_DEVCOM_ESW_OFFLOADS); 40371418ddd9SAviv Heller 403810fbb1cdSRoi Dayan if (!esw_paired) 403910fbb1cdSRoi Dayan return false; 404010fbb1cdSRoi Dayan 4041c620b772SAriel Levkovich if ((mlx5_lag_is_sriov(esw_attr->in_mdev) || 4042c620b772SAriel Levkovich mlx5_lag_is_multipath(esw_attr->in_mdev)) && 404310fbb1cdSRoi Dayan (is_rep_ingress || act_is_encap)) 404410fbb1cdSRoi Dayan return true; 404510fbb1cdSRoi Dayan 404610fbb1cdSRoi Dayan return false; 404704de7ddaSRoi Dayan } 404804de7ddaSRoi Dayan 4049c620b772SAriel Levkovich struct mlx5_flow_attr * 4050c620b772SAriel Levkovich mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type) 4051c620b772SAriel Levkovich { 4052c620b772SAriel Levkovich u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ? 4053c620b772SAriel Levkovich sizeof(struct mlx5_esw_flow_attr) : 4054c620b772SAriel Levkovich sizeof(struct mlx5_nic_flow_attr); 4055c620b772SAriel Levkovich struct mlx5_flow_attr *attr; 4056c620b772SAriel Levkovich 40578300f225SRoi Dayan attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL); 40588300f225SRoi Dayan if (!attr) 40598300f225SRoi Dayan return attr; 40608300f225SRoi Dayan 40618300f225SRoi Dayan INIT_LIST_HEAD(&attr->list); 40628300f225SRoi Dayan return attr; 4063c620b772SAriel Levkovich } 4064c620b772SAriel Levkovich 4065a88780a9SRoi Dayan static int 4066a88780a9SRoi Dayan mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, 4067226f2ca3SVlad Buslov struct flow_cls_offload *f, unsigned long flow_flags, 4068a88780a9SRoi Dayan struct mlx5e_tc_flow_parse_attr **__parse_attr, 4069a88780a9SRoi Dayan struct mlx5e_tc_flow **__flow) 4070e3a2b7edSAmir Vadai { 407117091853SOr Gerlitz struct mlx5e_tc_flow_parse_attr *parse_attr; 4072c620b772SAriel Levkovich struct mlx5_flow_attr *attr; 40733bc4b7bfSOr Gerlitz struct mlx5e_tc_flow *flow; 4074ff7ea04aSGustavo A. R. Silva int err = -ENOMEM; 4075ff7ea04aSGustavo A. R. Silva int out_index; 4076776b12b6SOr Gerlitz 4077c620b772SAriel Levkovich flow = kzalloc(sizeof(*flow), GFP_KERNEL); 40781b9a07eeSLeon Romanovsky parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); 4079ff7ea04aSGustavo A. R. Silva if (!parse_attr || !flow) 4080ff7ea04aSGustavo A. R. Silva goto err_free; 4081c620b772SAriel Levkovich 4082c620b772SAriel Levkovich flow->flags = flow_flags; 4083c620b772SAriel Levkovich flow->cookie = f->cookie; 4084c620b772SAriel Levkovich flow->priv = priv; 4085c620b772SAriel Levkovich 4086e36db1eeSRoi Dayan attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow)); 4087ff7ea04aSGustavo A. R. Silva if (!attr) 4088e3a2b7edSAmir Vadai goto err_free; 4089ff7ea04aSGustavo A. R. Silva 4090c620b772SAriel Levkovich flow->attr = attr; 4091e3a2b7edSAmir Vadai 40925a7e5bcbSVlad Buslov for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) 40935a7e5bcbSVlad Buslov INIT_LIST_HEAD(&flow->encaps[out_index].list); 40945a7e5bcbSVlad Buslov INIT_LIST_HEAD(&flow->hairpin); 409514e6b038SEli Cohen INIT_LIST_HEAD(&flow->l3_to_l2_reformat); 40968300f225SRoi Dayan INIT_LIST_HEAD(&flow->attrs); 40975a7e5bcbSVlad Buslov refcount_set(&flow->refcnt, 1); 409895435ad7SVlad Buslov init_completion(&flow->init_done); 4099362980eaSVlad Buslov init_completion(&flow->del_hw_done); 4100e3a2b7edSAmir Vadai 4101a88780a9SRoi Dayan *__flow = flow; 4102a88780a9SRoi Dayan *__parse_attr = parse_attr; 4103a88780a9SRoi Dayan 4104a88780a9SRoi Dayan return 0; 4105a88780a9SRoi Dayan 4106a88780a9SRoi Dayan err_free: 4107a88780a9SRoi Dayan kfree(flow); 4108a88780a9SRoi Dayan kvfree(parse_attr); 4109a88780a9SRoi Dayan return err; 4110adb4c123SOr Gerlitz } 4111adb4c123SOr Gerlitz 4112988ab9c7STonghao Zhang static void 4113c7569097SAriel Levkovich mlx5e_flow_attr_init(struct mlx5_flow_attr *attr, 4114c7569097SAriel Levkovich struct mlx5e_tc_flow_parse_attr *parse_attr, 4115c7569097SAriel Levkovich struct flow_cls_offload *f) 4116c7569097SAriel Levkovich { 4117c7569097SAriel Levkovich attr->parse_attr = parse_attr; 4118c7569097SAriel Levkovich attr->chain = f->common.chain_index; 4119c7569097SAriel Levkovich attr->prio = f->common.prio; 4120c7569097SAriel Levkovich } 4121c7569097SAriel Levkovich 4122c7569097SAriel Levkovich static void 4123c620b772SAriel Levkovich mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr, 4124988ab9c7STonghao Zhang struct mlx5e_priv *priv, 4125988ab9c7STonghao Zhang struct mlx5e_tc_flow_parse_attr *parse_attr, 4126f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 4127988ab9c7STonghao Zhang struct mlx5_eswitch_rep *in_rep, 4128988ab9c7STonghao Zhang struct mlx5_core_dev *in_mdev) 4129988ab9c7STonghao Zhang { 4130988ab9c7STonghao Zhang struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 4131c620b772SAriel Levkovich struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 4132988ab9c7STonghao Zhang 4133c7569097SAriel Levkovich mlx5e_flow_attr_init(attr, parse_attr, f); 4134988ab9c7STonghao Zhang 4135988ab9c7STonghao Zhang esw_attr->in_rep = in_rep; 4136988ab9c7STonghao Zhang esw_attr->in_mdev = in_mdev; 4137988ab9c7STonghao Zhang 4138988ab9c7STonghao Zhang if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) == 4139988ab9c7STonghao Zhang MLX5_COUNTER_SOURCE_ESWITCH) 4140988ab9c7STonghao Zhang esw_attr->counter_dev = in_mdev; 4141988ab9c7STonghao Zhang else 4142988ab9c7STonghao Zhang esw_attr->counter_dev = priv->mdev; 4143988ab9c7STonghao Zhang } 4144988ab9c7STonghao Zhang 414571129676SJason Gunthorpe static struct mlx5e_tc_flow * 414604de7ddaSRoi Dayan __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, 4147f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 4148226f2ca3SVlad Buslov unsigned long flow_flags, 4149d11afc26SOz Shlomo struct net_device *filter_dev, 415004de7ddaSRoi Dayan struct mlx5_eswitch_rep *in_rep, 415171129676SJason Gunthorpe struct mlx5_core_dev *in_mdev) 4152a88780a9SRoi Dayan { 4153f9e30088SPablo Neira Ayuso struct flow_rule *rule = flow_cls_offload_flow_rule(f); 415494db3317SEli Cohen struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 4155a88780a9SRoi Dayan struct netlink_ext_ack *extack = f->common.extack; 4156a88780a9SRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr; 4157a88780a9SRoi Dayan struct mlx5e_tc_flow *flow; 4158a88780a9SRoi Dayan int attr_size, err; 4159a88780a9SRoi Dayan 4160226f2ca3SVlad Buslov flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); 4161a88780a9SRoi Dayan attr_size = sizeof(struct mlx5_esw_flow_attr); 4162a88780a9SRoi Dayan err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, 4163a88780a9SRoi Dayan &parse_attr, &flow); 4164a88780a9SRoi Dayan if (err) 4165a88780a9SRoi Dayan goto out; 4166988ab9c7STonghao Zhang 4167d11afc26SOz Shlomo parse_attr->filter_dev = filter_dev; 4168c620b772SAriel Levkovich mlx5e_flow_esw_attr_init(flow->attr, 4169988ab9c7STonghao Zhang priv, parse_attr, 4170988ab9c7STonghao Zhang f, in_rep, in_mdev); 4171988ab9c7STonghao Zhang 417254c177caSOz Shlomo err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, 417354c177caSOz Shlomo f, filter_dev); 4174d11afc26SOz Shlomo if (err) 4175d11afc26SOz Shlomo goto err_free; 4176a88780a9SRoi Dayan 41777e36feebSPaul Blakey /* actions validation depends on parsing the ct matches first */ 4178aedd133dSAriel Levkovich err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f, 4179c620b772SAriel Levkovich &flow->attr->ct_attr, extack); 4180a88780a9SRoi Dayan if (err) 4181a88780a9SRoi Dayan goto err_free; 4182a88780a9SRoi Dayan 4183d4bb0531SRoi Dayan /* always set IP version for indirect table handling */ 4184d4bb0531SRoi Dayan flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true); 4185d4bb0531SRoi Dayan 418670f8019eSRoi Dayan err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); 41874c3844d9SPaul Blakey if (err) 41884c3844d9SPaul Blakey goto err_free; 41894c3844d9SPaul Blakey 419094db3317SEli Cohen if (flow->attr->lag.count) { 419194db3317SEli Cohen err = mlx5_lag_add_mpesw_rule(esw->dev); 419294db3317SEli Cohen if (err) 419394db3317SEli Cohen goto err_free; 419494db3317SEli Cohen } 419594db3317SEli Cohen 41967040632dSTonghao Zhang err = mlx5e_tc_add_fdb_flow(priv, flow, extack); 419795435ad7SVlad Buslov complete_all(&flow->init_done); 4198ef06c9eeSRoi Dayan if (err) { 4199ef06c9eeSRoi Dayan if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev))) 420094db3317SEli Cohen goto err_lag; 42015c40348cSOr Gerlitz 4202b4a23329SRoi Dayan add_unready_flow(flow); 4203ef06c9eeSRoi Dayan } 4204ef06c9eeSRoi Dayan 420571129676SJason Gunthorpe return flow; 4206e3a2b7edSAmir Vadai 420794db3317SEli Cohen err_lag: 420894db3317SEli Cohen if (flow->attr->lag.count) 420994db3317SEli Cohen mlx5_lag_del_mpesw_rule(esw->dev); 4210e3a2b7edSAmir Vadai err_free: 42115a7e5bcbSVlad Buslov mlx5e_flow_put(priv, flow); 4212a88780a9SRoi Dayan out: 421371129676SJason Gunthorpe return ERR_PTR(err); 4214a88780a9SRoi Dayan } 4215a88780a9SRoi Dayan 4216f9e30088SPablo Neira Ayuso static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f, 421795dc1902SRoi Dayan struct mlx5e_tc_flow *flow, 4218226f2ca3SVlad Buslov unsigned long flow_flags) 421904de7ddaSRoi Dayan { 422004de7ddaSRoi Dayan struct mlx5e_priv *priv = flow->priv, *peer_priv; 422104de7ddaSRoi Dayan struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw; 4222c620b772SAriel Levkovich struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr; 422304de7ddaSRoi Dayan struct mlx5_devcom *devcom = priv->mdev->priv.devcom; 422404de7ddaSRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr; 422504de7ddaSRoi Dayan struct mlx5e_rep_priv *peer_urpriv; 422604de7ddaSRoi Dayan struct mlx5e_tc_flow *peer_flow; 422704de7ddaSRoi Dayan struct mlx5_core_dev *in_mdev; 422804de7ddaSRoi Dayan int err = 0; 422904de7ddaSRoi Dayan 423004de7ddaSRoi Dayan peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 423104de7ddaSRoi Dayan if (!peer_esw) 423204de7ddaSRoi Dayan return -ENODEV; 423304de7ddaSRoi Dayan 423404de7ddaSRoi Dayan peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH); 423504de7ddaSRoi Dayan peer_priv = netdev_priv(peer_urpriv->netdev); 423604de7ddaSRoi Dayan 423704de7ddaSRoi Dayan /* in_mdev is assigned of which the packet originated from. 423804de7ddaSRoi Dayan * So packets redirected to uplink use the same mdev of the 423904de7ddaSRoi Dayan * original flow and packets redirected from uplink use the 424004de7ddaSRoi Dayan * peer mdev. 424104de7ddaSRoi Dayan */ 4242c620b772SAriel Levkovich if (attr->in_rep->vport == MLX5_VPORT_UPLINK) 424304de7ddaSRoi Dayan in_mdev = peer_priv->mdev; 424404de7ddaSRoi Dayan else 424504de7ddaSRoi Dayan in_mdev = priv->mdev; 424604de7ddaSRoi Dayan 4247c620b772SAriel Levkovich parse_attr = flow->attr->parse_attr; 424895dc1902SRoi Dayan peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags, 424904de7ddaSRoi Dayan parse_attr->filter_dev, 4250c620b772SAriel Levkovich attr->in_rep, in_mdev); 425171129676SJason Gunthorpe if (IS_ERR(peer_flow)) { 425271129676SJason Gunthorpe err = PTR_ERR(peer_flow); 425304de7ddaSRoi Dayan goto out; 425471129676SJason Gunthorpe } 425504de7ddaSRoi Dayan 425604de7ddaSRoi Dayan flow->peer_flow = peer_flow; 4257226f2ca3SVlad Buslov flow_flag_set(flow, DUP); 425804de7ddaSRoi Dayan mutex_lock(&esw->offloads.peer_mutex); 425904de7ddaSRoi Dayan list_add_tail(&flow->peer, &esw->offloads.peer_flows); 426004de7ddaSRoi Dayan mutex_unlock(&esw->offloads.peer_mutex); 426104de7ddaSRoi Dayan 426204de7ddaSRoi Dayan out: 426304de7ddaSRoi Dayan mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 426404de7ddaSRoi Dayan return err; 426504de7ddaSRoi Dayan } 426604de7ddaSRoi Dayan 426704de7ddaSRoi Dayan static int 426804de7ddaSRoi Dayan mlx5e_add_fdb_flow(struct mlx5e_priv *priv, 4269f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 4270226f2ca3SVlad Buslov unsigned long flow_flags, 427104de7ddaSRoi Dayan struct net_device *filter_dev, 427204de7ddaSRoi Dayan struct mlx5e_tc_flow **__flow) 427304de7ddaSRoi Dayan { 427404de7ddaSRoi Dayan struct mlx5e_rep_priv *rpriv = priv->ppriv; 427504de7ddaSRoi Dayan struct mlx5_eswitch_rep *in_rep = rpriv->rep; 427604de7ddaSRoi Dayan struct mlx5_core_dev *in_mdev = priv->mdev; 427704de7ddaSRoi Dayan struct mlx5e_tc_flow *flow; 427804de7ddaSRoi Dayan int err; 427904de7ddaSRoi Dayan 428071129676SJason Gunthorpe flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep, 428171129676SJason Gunthorpe in_mdev); 428271129676SJason Gunthorpe if (IS_ERR(flow)) 428371129676SJason Gunthorpe return PTR_ERR(flow); 428404de7ddaSRoi Dayan 428504de7ddaSRoi Dayan if (is_peer_flow_needed(flow)) { 428695dc1902SRoi Dayan err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags); 428704de7ddaSRoi Dayan if (err) { 428804de7ddaSRoi Dayan mlx5e_tc_del_fdb_flow(priv, flow); 428904de7ddaSRoi Dayan goto out; 429004de7ddaSRoi Dayan } 429104de7ddaSRoi Dayan } 429204de7ddaSRoi Dayan 429304de7ddaSRoi Dayan *__flow = flow; 429404de7ddaSRoi Dayan 429504de7ddaSRoi Dayan return 0; 429604de7ddaSRoi Dayan 429704de7ddaSRoi Dayan out: 429804de7ddaSRoi Dayan return err; 429904de7ddaSRoi Dayan } 430004de7ddaSRoi Dayan 4301a88780a9SRoi Dayan static int 4302a88780a9SRoi Dayan mlx5e_add_nic_flow(struct mlx5e_priv *priv, 4303f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 4304226f2ca3SVlad Buslov unsigned long flow_flags, 4305d11afc26SOz Shlomo struct net_device *filter_dev, 4306a88780a9SRoi Dayan struct mlx5e_tc_flow **__flow) 4307a88780a9SRoi Dayan { 4308f9e30088SPablo Neira Ayuso struct flow_rule *rule = flow_cls_offload_flow_rule(f); 4309a88780a9SRoi Dayan struct netlink_ext_ack *extack = f->common.extack; 4310a88780a9SRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr; 4311a88780a9SRoi Dayan struct mlx5e_tc_flow *flow; 4312a88780a9SRoi Dayan int attr_size, err; 4313a88780a9SRoi Dayan 4314c7569097SAriel Levkovich if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) { 4315bf07aa73SPaul Blakey if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common)) 4316bf07aa73SPaul Blakey return -EOPNOTSUPP; 4317c7569097SAriel Levkovich } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) { 4318c7569097SAriel Levkovich return -EOPNOTSUPP; 4319c7569097SAriel Levkovich } 4320bf07aa73SPaul Blakey 4321226f2ca3SVlad Buslov flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); 4322a88780a9SRoi Dayan attr_size = sizeof(struct mlx5_nic_flow_attr); 4323a88780a9SRoi Dayan err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, 4324a88780a9SRoi Dayan &parse_attr, &flow); 4325a88780a9SRoi Dayan if (err) 4326a88780a9SRoi Dayan goto out; 4327a88780a9SRoi Dayan 4328d11afc26SOz Shlomo parse_attr->filter_dev = filter_dev; 4329c7569097SAriel Levkovich mlx5e_flow_attr_init(flow->attr, parse_attr, f); 4330c7569097SAriel Levkovich 433154c177caSOz Shlomo err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, 433254c177caSOz Shlomo f, filter_dev); 4333d11afc26SOz Shlomo if (err) 4334d11afc26SOz Shlomo goto err_free; 4335d11afc26SOz Shlomo 4336aedd133dSAriel Levkovich err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f, 4337aedd133dSAriel Levkovich &flow->attr->ct_attr, extack); 4338aedd133dSAriel Levkovich if (err) 4339aedd133dSAriel Levkovich goto err_free; 4340aedd133dSAriel Levkovich 4341c6cfe113SRoi Dayan err = parse_tc_nic_actions(priv, &rule->action, flow, extack); 4342a88780a9SRoi Dayan if (err) 4343a88780a9SRoi Dayan goto err_free; 4344a88780a9SRoi Dayan 4345c6cfe113SRoi Dayan err = mlx5e_tc_add_nic_flow(priv, flow, extack); 4346a88780a9SRoi Dayan if (err) 4347a88780a9SRoi Dayan goto err_free; 4348a88780a9SRoi Dayan 4349226f2ca3SVlad Buslov flow_flag_set(flow, OFFLOADED); 4350a88780a9SRoi Dayan *__flow = flow; 4351a88780a9SRoi Dayan 4352a88780a9SRoi Dayan return 0; 4353a88780a9SRoi Dayan 4354a88780a9SRoi Dayan err_free: 43558914add2SVlad Buslov flow_flag_set(flow, FAILED); 43562c0e5cf5SPaul Blakey mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 43575a7e5bcbSVlad Buslov mlx5e_flow_put(priv, flow); 4358a88780a9SRoi Dayan out: 4359a88780a9SRoi Dayan return err; 4360a88780a9SRoi Dayan } 4361a88780a9SRoi Dayan 4362a88780a9SRoi Dayan static int 4363a88780a9SRoi Dayan mlx5e_tc_add_flow(struct mlx5e_priv *priv, 4364f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 4365226f2ca3SVlad Buslov unsigned long flags, 4366d11afc26SOz Shlomo struct net_device *filter_dev, 4367a88780a9SRoi Dayan struct mlx5e_tc_flow **flow) 4368a88780a9SRoi Dayan { 4369a88780a9SRoi Dayan struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 4370226f2ca3SVlad Buslov unsigned long flow_flags; 4371a88780a9SRoi Dayan int err; 4372a88780a9SRoi Dayan 4373a88780a9SRoi Dayan get_flags(flags, &flow_flags); 4374a88780a9SRoi Dayan 4375bf07aa73SPaul Blakey if (!tc_can_offload_extack(priv->netdev, f->common.extack)) 4376bf07aa73SPaul Blakey return -EOPNOTSUPP; 4377bf07aa73SPaul Blakey 4378f6455de0SBodong Wang if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) 4379d11afc26SOz Shlomo err = mlx5e_add_fdb_flow(priv, f, flow_flags, 4380d11afc26SOz Shlomo filter_dev, flow); 4381a88780a9SRoi Dayan else 4382d11afc26SOz Shlomo err = mlx5e_add_nic_flow(priv, f, flow_flags, 4383d11afc26SOz Shlomo filter_dev, flow); 4384a88780a9SRoi Dayan 4385a88780a9SRoi Dayan return err; 4386a88780a9SRoi Dayan } 4387a88780a9SRoi Dayan 4388553f9328SVu Pham static bool is_flow_rule_duplicate_allowed(struct net_device *dev, 4389553f9328SVu Pham struct mlx5e_rep_priv *rpriv) 4390553f9328SVu Pham { 4391553f9328SVu Pham /* Offloaded flow rule is allowed to duplicate on non-uplink representor 43922fb15e72SVlad Buslov * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this 43932fb15e72SVlad Buslov * function is called from NIC mode. 4394553f9328SVu Pham */ 43952fb15e72SVlad Buslov return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK; 4396553f9328SVu Pham } 4397553f9328SVu Pham 439871d82d2aSOz Shlomo int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, 4399226f2ca3SVlad Buslov struct flow_cls_offload *f, unsigned long flags) 4400a88780a9SRoi Dayan { 4401a88780a9SRoi Dayan struct netlink_ext_ack *extack = f->common.extack; 4402d9ee0491SOr Gerlitz struct rhashtable *tc_ht = get_tc_ht(priv, flags); 4403553f9328SVu Pham struct mlx5e_rep_priv *rpriv = priv->ppriv; 4404a88780a9SRoi Dayan struct mlx5e_tc_flow *flow; 4405a88780a9SRoi Dayan int err = 0; 4406a88780a9SRoi Dayan 44077dc84de9SRoi Dayan if (!mlx5_esw_hold(priv->mdev)) 44087dc84de9SRoi Dayan return -EAGAIN; 44097dc84de9SRoi Dayan 44107dc84de9SRoi Dayan mlx5_esw_get(priv->mdev); 44117dc84de9SRoi Dayan 4412c5d326b2SVlad Buslov rcu_read_lock(); 4413c5d326b2SVlad Buslov flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); 4414a88780a9SRoi Dayan if (flow) { 4415553f9328SVu Pham /* Same flow rule offloaded to non-uplink representor sharing tc block, 4416553f9328SVu Pham * just return 0. 4417553f9328SVu Pham */ 4418553f9328SVu Pham if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev) 4419c1aea9e1SVlad Buslov goto rcu_unlock; 4420553f9328SVu Pham 4421a88780a9SRoi Dayan NL_SET_ERR_MSG_MOD(extack, 4422a88780a9SRoi Dayan "flow cookie already exists, ignoring"); 4423a88780a9SRoi Dayan netdev_warn_once(priv->netdev, 4424a88780a9SRoi Dayan "flow cookie %lx already exists, ignoring\n", 4425a88780a9SRoi Dayan f->cookie); 44260e1c1a2fSVlad Buslov err = -EEXIST; 4427c1aea9e1SVlad Buslov goto rcu_unlock; 4428a88780a9SRoi Dayan } 4429c1aea9e1SVlad Buslov rcu_unlock: 4430c1aea9e1SVlad Buslov rcu_read_unlock(); 4431c1aea9e1SVlad Buslov if (flow) 4432c1aea9e1SVlad Buslov goto out; 4433a88780a9SRoi Dayan 44347a978759SDmytro Linkin trace_mlx5e_configure_flower(f); 4435d11afc26SOz Shlomo err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow); 4436a88780a9SRoi Dayan if (err) 4437a88780a9SRoi Dayan goto out; 4438a88780a9SRoi Dayan 4439553f9328SVu Pham /* Flow rule offloaded to non-uplink representor sharing tc block, 4440553f9328SVu Pham * set the flow's owner dev. 4441553f9328SVu Pham */ 4442553f9328SVu Pham if (is_flow_rule_duplicate_allowed(dev, rpriv)) 4443553f9328SVu Pham flow->orig_dev = dev; 4444553f9328SVu Pham 4445c5d326b2SVlad Buslov err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params); 4446a88780a9SRoi Dayan if (err) 4447a88780a9SRoi Dayan goto err_free; 4448a88780a9SRoi Dayan 44497dc84de9SRoi Dayan mlx5_esw_release(priv->mdev); 4450a88780a9SRoi Dayan return 0; 4451a88780a9SRoi Dayan 4452a88780a9SRoi Dayan err_free: 44535a7e5bcbSVlad Buslov mlx5e_flow_put(priv, flow); 4454a88780a9SRoi Dayan out: 44557dc84de9SRoi Dayan mlx5_esw_put(priv->mdev); 44567dc84de9SRoi Dayan mlx5_esw_release(priv->mdev); 4457e3a2b7edSAmir Vadai return err; 4458e3a2b7edSAmir Vadai } 4459e3a2b7edSAmir Vadai 44608f8ae895SOr Gerlitz static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags) 44618f8ae895SOr Gerlitz { 4462226f2ca3SVlad Buslov bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS)); 4463226f2ca3SVlad Buslov bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS)); 44648f8ae895SOr Gerlitz 4465226f2ca3SVlad Buslov return flow_flag_test(flow, INGRESS) == dir_ingress && 4466226f2ca3SVlad Buslov flow_flag_test(flow, EGRESS) == dir_egress; 44678f8ae895SOr Gerlitz } 44688f8ae895SOr Gerlitz 446971d82d2aSOz Shlomo int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, 4470226f2ca3SVlad Buslov struct flow_cls_offload *f, unsigned long flags) 4471e3a2b7edSAmir Vadai { 4472d9ee0491SOr Gerlitz struct rhashtable *tc_ht = get_tc_ht(priv, flags); 4473e3a2b7edSAmir Vadai struct mlx5e_tc_flow *flow; 4474c5d326b2SVlad Buslov int err; 4475e3a2b7edSAmir Vadai 4476c5d326b2SVlad Buslov rcu_read_lock(); 4477ab818362STaehee Yoo flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); 4478c5d326b2SVlad Buslov if (!flow || !same_flow_direction(flow, flags)) { 4479c5d326b2SVlad Buslov err = -EINVAL; 4480c5d326b2SVlad Buslov goto errout; 4481c5d326b2SVlad Buslov } 4482e3a2b7edSAmir Vadai 4483c5d326b2SVlad Buslov /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag 4484c5d326b2SVlad Buslov * set. 4485c5d326b2SVlad Buslov */ 4486c5d326b2SVlad Buslov if (flow_flag_test_and_set(flow, DELETED)) { 4487c5d326b2SVlad Buslov err = -EINVAL; 4488c5d326b2SVlad Buslov goto errout; 4489c5d326b2SVlad Buslov } 449005866c82SOr Gerlitz rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params); 4491c5d326b2SVlad Buslov rcu_read_unlock(); 4492e3a2b7edSAmir Vadai 44937a978759SDmytro Linkin trace_mlx5e_delete_flower(f); 44945a7e5bcbSVlad Buslov mlx5e_flow_put(priv, flow); 4495e3a2b7edSAmir Vadai 44967dc84de9SRoi Dayan mlx5_esw_put(priv->mdev); 4497e3a2b7edSAmir Vadai return 0; 4498c5d326b2SVlad Buslov 4499c5d326b2SVlad Buslov errout: 4500c5d326b2SVlad Buslov rcu_read_unlock(); 4501c5d326b2SVlad Buslov return err; 4502e3a2b7edSAmir Vadai } 4503e3a2b7edSAmir Vadai 450471d82d2aSOz Shlomo int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, 4505226f2ca3SVlad Buslov struct flow_cls_offload *f, unsigned long flags) 4506aad7e08dSAmir Vadai { 450704de7ddaSRoi Dayan struct mlx5_devcom *devcom = priv->mdev->priv.devcom; 4508d9ee0491SOr Gerlitz struct rhashtable *tc_ht = get_tc_ht(priv, flags); 450904de7ddaSRoi Dayan struct mlx5_eswitch *peer_esw; 4510aad7e08dSAmir Vadai struct mlx5e_tc_flow *flow; 4511aad7e08dSAmir Vadai struct mlx5_fc *counter; 4512316d5f72SRoi Dayan u64 lastuse = 0; 4513316d5f72SRoi Dayan u64 packets = 0; 4514316d5f72SRoi Dayan u64 bytes = 0; 45155a7e5bcbSVlad Buslov int err = 0; 4516aad7e08dSAmir Vadai 4517c5d326b2SVlad Buslov rcu_read_lock(); 4518c5d326b2SVlad Buslov flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie, 45195a7e5bcbSVlad Buslov tc_ht_params)); 4520c5d326b2SVlad Buslov rcu_read_unlock(); 45215a7e5bcbSVlad Buslov if (IS_ERR(flow)) 45225a7e5bcbSVlad Buslov return PTR_ERR(flow); 45235a7e5bcbSVlad Buslov 45245a7e5bcbSVlad Buslov if (!same_flow_direction(flow, flags)) { 45255a7e5bcbSVlad Buslov err = -EINVAL; 45265a7e5bcbSVlad Buslov goto errout; 45275a7e5bcbSVlad Buslov } 4528aad7e08dSAmir Vadai 45294c3844d9SPaul Blakey if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) { 4530b8aee822SMark Bloch counter = mlx5e_tc_get_counter(flow); 4531aad7e08dSAmir Vadai if (!counter) 45325a7e5bcbSVlad Buslov goto errout; 4533aad7e08dSAmir Vadai 4534aad7e08dSAmir Vadai mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 4535316d5f72SRoi Dayan } 4536aad7e08dSAmir Vadai 4537316d5f72SRoi Dayan /* Under multipath it's possible for one rule to be currently 4538316d5f72SRoi Dayan * un-offloaded while the other rule is offloaded. 4539316d5f72SRoi Dayan */ 454004de7ddaSRoi Dayan peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 454104de7ddaSRoi Dayan if (!peer_esw) 454204de7ddaSRoi Dayan goto out; 454304de7ddaSRoi Dayan 4544226f2ca3SVlad Buslov if (flow_flag_test(flow, DUP) && 4545226f2ca3SVlad Buslov flow_flag_test(flow->peer_flow, OFFLOADED)) { 454604de7ddaSRoi Dayan u64 bytes2; 454704de7ddaSRoi Dayan u64 packets2; 454804de7ddaSRoi Dayan u64 lastuse2; 454904de7ddaSRoi Dayan 455004de7ddaSRoi Dayan counter = mlx5e_tc_get_counter(flow->peer_flow); 4551316d5f72SRoi Dayan if (!counter) 4552316d5f72SRoi Dayan goto no_peer_counter; 455304de7ddaSRoi Dayan mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2); 455404de7ddaSRoi Dayan 455504de7ddaSRoi Dayan bytes += bytes2; 455604de7ddaSRoi Dayan packets += packets2; 455704de7ddaSRoi Dayan lastuse = max_t(u64, lastuse, lastuse2); 455804de7ddaSRoi Dayan } 455904de7ddaSRoi Dayan 4560316d5f72SRoi Dayan no_peer_counter: 456104de7ddaSRoi Dayan mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 456204de7ddaSRoi Dayan out: 45634b61d3e8SPo Liu flow_stats_update(&f->stats, bytes, packets, 0, lastuse, 456493a129ebSJiri Pirko FLOW_ACTION_HW_STATS_DELAYED); 45657a978759SDmytro Linkin trace_mlx5e_stats_flower(f); 45665a7e5bcbSVlad Buslov errout: 45675a7e5bcbSVlad Buslov mlx5e_flow_put(priv, flow); 45685a7e5bcbSVlad Buslov return err; 4569aad7e08dSAmir Vadai } 4570aad7e08dSAmir Vadai 45711fe3e316SParav Pandit static int apply_police_params(struct mlx5e_priv *priv, u64 rate, 4572fcb64c0fSEli Cohen struct netlink_ext_ack *extack) 4573fcb64c0fSEli Cohen { 4574fcb64c0fSEli Cohen struct mlx5e_rep_priv *rpriv = priv->ppriv; 4575fcb64c0fSEli Cohen struct mlx5_eswitch *esw; 45761fe3e316SParav Pandit u32 rate_mbps = 0; 4577fcb64c0fSEli Cohen u16 vport_num; 4578fcb64c0fSEli Cohen int err; 4579fcb64c0fSEli Cohen 4580e401a184SEli Cohen vport_num = rpriv->rep->vport; 4581e401a184SEli Cohen if (vport_num >= MLX5_VPORT_ECPF) { 4582e401a184SEli Cohen NL_SET_ERR_MSG_MOD(extack, 4583e401a184SEli Cohen "Ingress rate limit is supported only for Eswitch ports connected to VFs"); 4584e401a184SEli Cohen return -EOPNOTSUPP; 4585e401a184SEli Cohen } 4586e401a184SEli Cohen 4587fcb64c0fSEli Cohen esw = priv->mdev->priv.eswitch; 4588fcb64c0fSEli Cohen /* rate is given in bytes/sec. 4589fcb64c0fSEli Cohen * First convert to bits/sec and then round to the nearest mbit/secs. 4590fcb64c0fSEli Cohen * mbit means million bits. 4591fcb64c0fSEli Cohen * Moreover, if rate is non zero we choose to configure to a minimum of 4592fcb64c0fSEli Cohen * 1 mbit/sec. 4593fcb64c0fSEli Cohen */ 45941fe3e316SParav Pandit if (rate) { 45951fe3e316SParav Pandit rate = (rate * BITS_PER_BYTE) + 500000; 45968b90d897SParav Pandit do_div(rate, 1000000); 45978b90d897SParav Pandit rate_mbps = max_t(u32, rate, 1); 45981fe3e316SParav Pandit } 45991fe3e316SParav Pandit 46002d116e3eSDmytro Linkin err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps); 4601fcb64c0fSEli Cohen if (err) 4602fcb64c0fSEli Cohen NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); 4603fcb64c0fSEli Cohen 4604fcb64c0fSEli Cohen return err; 4605fcb64c0fSEli Cohen } 4606fcb64c0fSEli Cohen 4607a8d52b02SJianbo Liu int mlx5e_policer_validate(const struct flow_action *action, 4608d97b4b10SJianbo Liu const struct flow_action_entry *act, 4609d97b4b10SJianbo Liu struct netlink_ext_ack *extack) 4610d97b4b10SJianbo Liu { 4611d97b4b10SJianbo Liu if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 4612d97b4b10SJianbo Liu NL_SET_ERR_MSG_MOD(extack, 4613d97b4b10SJianbo Liu "Offload not supported when exceed action is not drop"); 4614d97b4b10SJianbo Liu return -EOPNOTSUPP; 4615d97b4b10SJianbo Liu } 4616d97b4b10SJianbo Liu 4617d97b4b10SJianbo Liu if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 4618d97b4b10SJianbo Liu !flow_action_is_last_entry(action, act)) { 4619d97b4b10SJianbo Liu NL_SET_ERR_MSG_MOD(extack, 4620d97b4b10SJianbo Liu "Offload not supported when conform action is ok, but action is not last"); 4621d97b4b10SJianbo Liu return -EOPNOTSUPP; 4622d97b4b10SJianbo Liu } 4623d97b4b10SJianbo Liu 4624d97b4b10SJianbo Liu if (act->police.peakrate_bytes_ps || 4625d97b4b10SJianbo Liu act->police.avrate || act->police.overhead) { 4626d97b4b10SJianbo Liu NL_SET_ERR_MSG_MOD(extack, 4627d97b4b10SJianbo Liu "Offload not supported when peakrate/avrate/overhead is configured"); 4628d97b4b10SJianbo Liu return -EOPNOTSUPP; 4629d97b4b10SJianbo Liu } 4630d97b4b10SJianbo Liu 4631d97b4b10SJianbo Liu if (act->police.rate_pkt_ps) { 4632d97b4b10SJianbo Liu NL_SET_ERR_MSG_MOD(extack, 4633d97b4b10SJianbo Liu "QoS offload not support packets per second"); 4634d97b4b10SJianbo Liu return -EOPNOTSUPP; 4635d97b4b10SJianbo Liu } 4636d97b4b10SJianbo Liu 4637d97b4b10SJianbo Liu return 0; 4638d97b4b10SJianbo Liu } 4639d97b4b10SJianbo Liu 4640fcb64c0fSEli Cohen static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, 4641fcb64c0fSEli Cohen struct flow_action *flow_action, 4642fcb64c0fSEli Cohen struct netlink_ext_ack *extack) 4643fcb64c0fSEli Cohen { 4644fcb64c0fSEli Cohen struct mlx5e_rep_priv *rpriv = priv->ppriv; 4645fcb64c0fSEli Cohen const struct flow_action_entry *act; 4646fcb64c0fSEli Cohen int err; 4647fcb64c0fSEli Cohen int i; 4648fcb64c0fSEli Cohen 4649fcb64c0fSEli Cohen if (!flow_action_has_entries(flow_action)) { 4650fcb64c0fSEli Cohen NL_SET_ERR_MSG_MOD(extack, "matchall called with no action"); 4651fcb64c0fSEli Cohen return -EINVAL; 4652fcb64c0fSEli Cohen } 4653fcb64c0fSEli Cohen 4654fcb64c0fSEli Cohen if (!flow_offload_has_one_action(flow_action)) { 4655fcb64c0fSEli Cohen NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action"); 4656fcb64c0fSEli Cohen return -EOPNOTSUPP; 4657fcb64c0fSEli Cohen } 4658fcb64c0fSEli Cohen 46590885ae1aSAbhiram R N if (!flow_action_basic_hw_stats_check(flow_action, extack)) { 46600885ae1aSAbhiram R N NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported"); 4661319a1d19SJiri Pirko return -EOPNOTSUPP; 46620885ae1aSAbhiram R N } 4663319a1d19SJiri Pirko 4664fcb64c0fSEli Cohen flow_action_for_each(i, act, flow_action) { 4665fcb64c0fSEli Cohen switch (act->id) { 4666fcb64c0fSEli Cohen case FLOW_ACTION_POLICE: 46674d1e07d8SVlad Buslov if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) { 46684d1e07d8SVlad Buslov NL_SET_ERR_MSG_MOD(extack, 46694d1e07d8SVlad Buslov "Offload not supported when conform action is not continue"); 46704d1e07d8SVlad Buslov return -EOPNOTSUPP; 46714d1e07d8SVlad Buslov } 46724d1e07d8SVlad Buslov 4673d97b4b10SJianbo Liu err = mlx5e_policer_validate(flow_action, act, extack); 4674d97b4b10SJianbo Liu if (err) 4675d97b4b10SJianbo Liu return err; 4676d97b4b10SJianbo Liu 4677fcb64c0fSEli Cohen err = apply_police_params(priv, act->police.rate_bytes_ps, extack); 4678fcb64c0fSEli Cohen if (err) 4679fcb64c0fSEli Cohen return err; 4680fcb64c0fSEli Cohen 4681fcb64c0fSEli Cohen rpriv->prev_vf_vport_stats = priv->stats.vf_vport; 4682fcb64c0fSEli Cohen break; 4683fcb64c0fSEli Cohen default: 4684fcb64c0fSEli Cohen NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall"); 4685fcb64c0fSEli Cohen return -EOPNOTSUPP; 4686fcb64c0fSEli Cohen } 4687fcb64c0fSEli Cohen } 4688fcb64c0fSEli Cohen 4689fcb64c0fSEli Cohen return 0; 4690fcb64c0fSEli Cohen } 4691fcb64c0fSEli Cohen 4692fcb64c0fSEli Cohen int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, 4693fcb64c0fSEli Cohen struct tc_cls_matchall_offload *ma) 4694fcb64c0fSEli Cohen { 4695fcb64c0fSEli Cohen struct netlink_ext_ack *extack = ma->common.extack; 4696fcb64c0fSEli Cohen 46977b83355fSEli Cohen if (ma->common.prio != 1) { 4698fcb64c0fSEli Cohen NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); 4699fcb64c0fSEli Cohen return -EINVAL; 4700fcb64c0fSEli Cohen } 4701fcb64c0fSEli Cohen 4702fcb64c0fSEli Cohen return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack); 4703fcb64c0fSEli Cohen } 4704fcb64c0fSEli Cohen 4705fcb64c0fSEli Cohen int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, 4706fcb64c0fSEli Cohen struct tc_cls_matchall_offload *ma) 4707fcb64c0fSEli Cohen { 4708fcb64c0fSEli Cohen struct netlink_ext_ack *extack = ma->common.extack; 4709fcb64c0fSEli Cohen 4710fcb64c0fSEli Cohen return apply_police_params(priv, 0, extack); 4711fcb64c0fSEli Cohen } 4712fcb64c0fSEli Cohen 4713fcb64c0fSEli Cohen void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, 4714fcb64c0fSEli Cohen struct tc_cls_matchall_offload *ma) 4715fcb64c0fSEli Cohen { 4716fcb64c0fSEli Cohen struct mlx5e_rep_priv *rpriv = priv->ppriv; 4717fcb64c0fSEli Cohen struct rtnl_link_stats64 cur_stats; 4718fcb64c0fSEli Cohen u64 dbytes; 4719fcb64c0fSEli Cohen u64 dpkts; 4720fcb64c0fSEli Cohen 4721fcb64c0fSEli Cohen cur_stats = priv->stats.vf_vport; 4722fcb64c0fSEli Cohen dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; 4723fcb64c0fSEli Cohen dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; 4724fcb64c0fSEli Cohen rpriv->prev_vf_vport_stats = cur_stats; 47254b61d3e8SPo Liu flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies, 472693a129ebSJiri Pirko FLOW_ACTION_HW_STATS_DELAYED); 4727fcb64c0fSEli Cohen } 4728fcb64c0fSEli Cohen 47294d8fcf21SAlaa Hleihel static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, 47304d8fcf21SAlaa Hleihel struct mlx5e_priv *peer_priv) 47314d8fcf21SAlaa Hleihel { 47324d8fcf21SAlaa Hleihel struct mlx5_core_dev *peer_mdev = peer_priv->mdev; 4733db76ca24SVlad Buslov struct mlx5e_hairpin_entry *hpe, *tmp; 4734db76ca24SVlad Buslov LIST_HEAD(init_wait_list); 47354d8fcf21SAlaa Hleihel u16 peer_vhca_id; 47364d8fcf21SAlaa Hleihel int bkt; 47374d8fcf21SAlaa Hleihel 4738ab3f3d5eSRoi Dayan if (!mlx5e_same_hw_devs(priv, peer_priv)) 47394d8fcf21SAlaa Hleihel return; 47404d8fcf21SAlaa Hleihel 47414d8fcf21SAlaa Hleihel peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id); 47424d8fcf21SAlaa Hleihel 4743b32accdaSVlad Buslov mutex_lock(&priv->fs.tc.hairpin_tbl_lock); 4744db76ca24SVlad Buslov hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) 4745db76ca24SVlad Buslov if (refcount_inc_not_zero(&hpe->refcnt)) 4746db76ca24SVlad Buslov list_add(&hpe->dead_peer_wait_list, &init_wait_list); 4747b32accdaSVlad Buslov mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); 4748db76ca24SVlad Buslov 4749db76ca24SVlad Buslov list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) { 4750db76ca24SVlad Buslov wait_for_completion(&hpe->res_ready); 4751db76ca24SVlad Buslov if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id) 4752a3e5fd93SDima Chumak mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair); 4753db76ca24SVlad Buslov 4754db76ca24SVlad Buslov mlx5e_hairpin_put(priv, hpe); 4755db76ca24SVlad Buslov } 47564d8fcf21SAlaa Hleihel } 47574d8fcf21SAlaa Hleihel 47584d8fcf21SAlaa Hleihel static int mlx5e_tc_netdev_event(struct notifier_block *this, 47594d8fcf21SAlaa Hleihel unsigned long event, void *ptr) 47604d8fcf21SAlaa Hleihel { 47614d8fcf21SAlaa Hleihel struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 47624d8fcf21SAlaa Hleihel struct mlx5e_flow_steering *fs; 47634d8fcf21SAlaa Hleihel struct mlx5e_priv *peer_priv; 47644d8fcf21SAlaa Hleihel struct mlx5e_tc_table *tc; 47654d8fcf21SAlaa Hleihel struct mlx5e_priv *priv; 47664d8fcf21SAlaa Hleihel 47674d8fcf21SAlaa Hleihel if (ndev->netdev_ops != &mlx5e_netdev_ops || 47684d8fcf21SAlaa Hleihel event != NETDEV_UNREGISTER || 47694d8fcf21SAlaa Hleihel ndev->reg_state == NETREG_REGISTERED) 47704d8fcf21SAlaa Hleihel return NOTIFY_DONE; 47714d8fcf21SAlaa Hleihel 47724d8fcf21SAlaa Hleihel tc = container_of(this, struct mlx5e_tc_table, netdevice_nb); 47734d8fcf21SAlaa Hleihel fs = container_of(tc, struct mlx5e_flow_steering, tc); 47744d8fcf21SAlaa Hleihel priv = container_of(fs, struct mlx5e_priv, fs); 47754d8fcf21SAlaa Hleihel peer_priv = netdev_priv(ndev); 47764d8fcf21SAlaa Hleihel if (priv == peer_priv || 47774d8fcf21SAlaa Hleihel !(priv->netdev->features & NETIF_F_HW_TC)) 47784d8fcf21SAlaa Hleihel return NOTIFY_DONE; 47794d8fcf21SAlaa Hleihel 47804d8fcf21SAlaa Hleihel mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv); 47814d8fcf21SAlaa Hleihel 47824d8fcf21SAlaa Hleihel return NOTIFY_DONE; 47834d8fcf21SAlaa Hleihel } 47844d8fcf21SAlaa Hleihel 47856a064674SAriel Levkovich static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev) 47866a064674SAriel Levkovich { 47876a064674SAriel Levkovich int tc_grp_size, tc_tbl_size; 47886a064674SAriel Levkovich u32 max_flow_counter; 47896a064674SAriel Levkovich 47906a064674SAriel Levkovich max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | 47916a064674SAriel Levkovich MLX5_CAP_GEN(dev, max_flow_counter_15_0); 47926a064674SAriel Levkovich 47936a064674SAriel Levkovich tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE); 47946a064674SAriel Levkovich 47956a064674SAriel Levkovich tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS, 47966a064674SAriel Levkovich BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size))); 47976a064674SAriel Levkovich 47986a064674SAriel Levkovich return tc_tbl_size; 47996a064674SAriel Levkovich } 48006a064674SAriel Levkovich 480166cb64e2SMaor Dickman static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv) 480266cb64e2SMaor Dickman { 480366cb64e2SMaor Dickman struct mlx5_flow_table **ft = &priv->fs.tc.miss_t; 480466cb64e2SMaor Dickman struct mlx5_flow_table_attr ft_attr = {}; 480566cb64e2SMaor Dickman struct mlx5_flow_namespace *ns; 480666cb64e2SMaor Dickman int err = 0; 480766cb64e2SMaor Dickman 480866cb64e2SMaor Dickman ft_attr.max_fte = 1; 480966cb64e2SMaor Dickman ft_attr.autogroup.max_num_groups = 1; 481066cb64e2SMaor Dickman ft_attr.level = MLX5E_TC_MISS_LEVEL; 481166cb64e2SMaor Dickman ft_attr.prio = 0; 481266cb64e2SMaor Dickman ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); 481366cb64e2SMaor Dickman 481466cb64e2SMaor Dickman *ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); 481566cb64e2SMaor Dickman if (IS_ERR(*ft)) { 481666cb64e2SMaor Dickman err = PTR_ERR(*ft); 481766cb64e2SMaor Dickman netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err); 481866cb64e2SMaor Dickman } 481966cb64e2SMaor Dickman 482066cb64e2SMaor Dickman return err; 482166cb64e2SMaor Dickman } 482266cb64e2SMaor Dickman 482366cb64e2SMaor Dickman static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv) 482466cb64e2SMaor Dickman { 482566cb64e2SMaor Dickman mlx5_destroy_flow_table(priv->fs.tc.miss_t); 482666cb64e2SMaor Dickman } 482766cb64e2SMaor Dickman 4828655dc3d2SOr Gerlitz int mlx5e_tc_nic_init(struct mlx5e_priv *priv) 4829e8f887acSAmir Vadai { 4830acff797cSMaor Gottlieb struct mlx5e_tc_table *tc = &priv->fs.tc; 48316a064674SAriel Levkovich struct mlx5_core_dev *dev = priv->mdev; 4832c9355682SChris Mi struct mapping_ctx *chains_mapping; 48336a064674SAriel Levkovich struct mlx5_chains_attr attr = {}; 48342198b932SRoi Dayan u64 mapping_id; 48354d8fcf21SAlaa Hleihel int err; 4836e8f887acSAmir Vadai 4837b2fdf3d0SPaul Blakey mlx5e_mod_hdr_tbl_init(&tc->mod_hdr); 4838b6fac0b4SVlad Buslov mutex_init(&tc->t_lock); 4839b32accdaSVlad Buslov mutex_init(&tc->hairpin_tbl_lock); 48405c65c564SOr Gerlitz hash_init(tc->hairpin_tbl); 484111c9c548SOr Gerlitz 48424d8fcf21SAlaa Hleihel err = rhashtable_init(&tc->ht, &tc_ht_params); 48434d8fcf21SAlaa Hleihel if (err) 48444d8fcf21SAlaa Hleihel return err; 48454d8fcf21SAlaa Hleihel 48469ba33339SRoi Dayan lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key); 48479ba33339SRoi Dayan 48482198b932SRoi Dayan mapping_id = mlx5_query_nic_system_image_guid(dev); 48492198b932SRoi Dayan 48502198b932SRoi Dayan chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, 48512198b932SRoi Dayan sizeof(struct mlx5_mapped_obj), 4852c9355682SChris Mi MLX5E_TC_TABLE_CHAIN_TAG_MASK, true); 48532198b932SRoi Dayan 4854c9355682SChris Mi if (IS_ERR(chains_mapping)) { 4855c9355682SChris Mi err = PTR_ERR(chains_mapping); 4856c9355682SChris Mi goto err_mapping; 4857c9355682SChris Mi } 4858c9355682SChris Mi tc->mapping = chains_mapping; 4859c9355682SChris Mi 486066cb64e2SMaor Dickman err = mlx5e_tc_nic_create_miss_table(priv); 486166cb64e2SMaor Dickman if (err) 486266cb64e2SMaor Dickman goto err_chains; 486366cb64e2SMaor Dickman 4864c9355682SChris Mi if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) 4865c7569097SAriel Levkovich attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED | 4866c7569097SAriel Levkovich MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; 48676a064674SAriel Levkovich attr.ns = MLX5_FLOW_NAMESPACE_KERNEL; 48686a064674SAriel Levkovich attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev); 48696a064674SAriel Levkovich attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS; 487066cb64e2SMaor Dickman attr.default_ft = priv->fs.tc.miss_t; 4871c9355682SChris Mi attr.mapping = chains_mapping; 48726a064674SAriel Levkovich 48736a064674SAriel Levkovich tc->chains = mlx5_chains_create(dev, &attr); 48746a064674SAriel Levkovich if (IS_ERR(tc->chains)) { 48756a064674SAriel Levkovich err = PTR_ERR(tc->chains); 487666cb64e2SMaor Dickman goto err_miss; 48776a064674SAriel Levkovich } 48786a064674SAriel Levkovich 4879f0da4daaSChris Mi tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL); 4880aedd133dSAriel Levkovich tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr, 4881f0da4daaSChris Mi MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act); 4882aedd133dSAriel Levkovich 48834d8fcf21SAlaa Hleihel tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; 4884d48834f9SJiri Pirko err = register_netdevice_notifier_dev_net(priv->netdev, 4885d48834f9SJiri Pirko &tc->netdevice_nb, 4886d48834f9SJiri Pirko &tc->netdevice_nn); 4887d48834f9SJiri Pirko if (err) { 48884d8fcf21SAlaa Hleihel tc->netdevice_nb.notifier_call = NULL; 48894d8fcf21SAlaa Hleihel mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n"); 48906a064674SAriel Levkovich goto err_reg; 48914d8fcf21SAlaa Hleihel } 48924d8fcf21SAlaa Hleihel 48936a064674SAriel Levkovich return 0; 48946a064674SAriel Levkovich 48956a064674SAriel Levkovich err_reg: 4896aedd133dSAriel Levkovich mlx5_tc_ct_clean(tc->ct); 4897f0da4daaSChris Mi mlx5e_tc_post_act_destroy(tc->post_act); 48986a064674SAriel Levkovich mlx5_chains_destroy(tc->chains); 489966cb64e2SMaor Dickman err_miss: 490066cb64e2SMaor Dickman mlx5e_tc_nic_destroy_miss_table(priv); 49016a064674SAriel Levkovich err_chains: 4902c9355682SChris Mi mapping_destroy(chains_mapping); 4903c9355682SChris Mi err_mapping: 49046a064674SAriel Levkovich rhashtable_destroy(&tc->ht); 49054d8fcf21SAlaa Hleihel return err; 4906e8f887acSAmir Vadai } 4907e8f887acSAmir Vadai 4908e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg) 4909e8f887acSAmir Vadai { 4910e8f887acSAmir Vadai struct mlx5e_tc_flow *flow = ptr; 4911655dc3d2SOr Gerlitz struct mlx5e_priv *priv = flow->priv; 4912e8f887acSAmir Vadai 4913961e8979SRoi Dayan mlx5e_tc_del_flow(priv, flow); 4914e8f887acSAmir Vadai kfree(flow); 4915e8f887acSAmir Vadai } 4916e8f887acSAmir Vadai 4917655dc3d2SOr Gerlitz void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) 4918e8f887acSAmir Vadai { 4919acff797cSMaor Gottlieb struct mlx5e_tc_table *tc = &priv->fs.tc; 4920e8f887acSAmir Vadai 49214d8fcf21SAlaa Hleihel if (tc->netdevice_nb.notifier_call) 4922d48834f9SJiri Pirko unregister_netdevice_notifier_dev_net(priv->netdev, 4923d48834f9SJiri Pirko &tc->netdevice_nb, 4924d48834f9SJiri Pirko &tc->netdevice_nn); 49254d8fcf21SAlaa Hleihel 4926b2fdf3d0SPaul Blakey mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr); 4927b32accdaSVlad Buslov mutex_destroy(&tc->hairpin_tbl_lock); 4928b32accdaSVlad Buslov 49296a064674SAriel Levkovich rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL); 4930e8f887acSAmir Vadai 4931acff797cSMaor Gottlieb if (!IS_ERR_OR_NULL(tc->t)) { 49326a064674SAriel Levkovich mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL); 4933acff797cSMaor Gottlieb tc->t = NULL; 4934e8f887acSAmir Vadai } 4935b6fac0b4SVlad Buslov mutex_destroy(&tc->t_lock); 49366a064674SAriel Levkovich 4937aedd133dSAriel Levkovich mlx5_tc_ct_clean(tc->ct); 4938f0da4daaSChris Mi mlx5e_tc_post_act_destroy(tc->post_act); 4939c9355682SChris Mi mapping_destroy(tc->mapping); 49406a064674SAriel Levkovich mlx5_chains_destroy(tc->chains); 494166cb64e2SMaor Dickman mlx5e_tc_nic_destroy_miss_table(priv); 4942e8f887acSAmir Vadai } 4943655dc3d2SOr Gerlitz 4944d1a3138fSPaul Blakey int mlx5e_tc_ht_init(struct rhashtable *tc_ht) 4945d1a3138fSPaul Blakey { 4946d1a3138fSPaul Blakey int err; 4947d1a3138fSPaul Blakey 4948d1a3138fSPaul Blakey err = rhashtable_init(tc_ht, &tc_ht_params); 4949d1a3138fSPaul Blakey if (err) 4950d1a3138fSPaul Blakey return err; 4951d1a3138fSPaul Blakey 4952d1a3138fSPaul Blakey lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key); 4953d1a3138fSPaul Blakey 4954d1a3138fSPaul Blakey return 0; 4955d1a3138fSPaul Blakey } 4956d1a3138fSPaul Blakey 4957d1a3138fSPaul Blakey void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) 4958d1a3138fSPaul Blakey { 4959d1a3138fSPaul Blakey rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); 4960d1a3138fSPaul Blakey } 4961d1a3138fSPaul Blakey 4962d1a3138fSPaul Blakey int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) 4963655dc3d2SOr Gerlitz { 4964d7a42ad0SRoi Dayan const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts); 4965aedd133dSAriel Levkovich struct mlx5e_rep_priv *rpriv; 49660a7fcb78SPaul Blakey struct mapping_ctx *mapping; 4967aedd133dSAriel Levkovich struct mlx5_eswitch *esw; 4968aedd133dSAriel Levkovich struct mlx5e_priv *priv; 49692198b932SRoi Dayan u64 mapping_id; 4970aedd133dSAriel Levkovich int err = 0; 49710a7fcb78SPaul Blakey 4972aedd133dSAriel Levkovich rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); 4973aedd133dSAriel Levkovich priv = netdev_priv(rpriv->netdev); 4974aedd133dSAriel Levkovich esw = priv->mdev->priv.eswitch; 49750a7fcb78SPaul Blakey 4976f0da4daaSChris Mi uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw), 4977f0da4daaSChris Mi MLX5_FLOW_NAMESPACE_FDB); 4978aedd133dSAriel Levkovich uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev), 4979aedd133dSAriel Levkovich esw_chains(esw), 4980aedd133dSAriel Levkovich &esw->offloads.mod_hdr, 4981f0da4daaSChris Mi MLX5_FLOW_NAMESPACE_FDB, 4982f0da4daaSChris Mi uplink_priv->post_act); 49834c3844d9SPaul Blakey 49844f4edcc2SAriel Levkovich uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev)); 49854f4edcc2SAriel Levkovich 49862741f223SChris Mi uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act); 49872a9ab10aSChris Mi 49882198b932SRoi Dayan mapping_id = mlx5_query_nic_system_image_guid(esw->dev); 49892198b932SRoi Dayan 49902198b932SRoi Dayan mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL, 49912198b932SRoi Dayan sizeof(struct tunnel_match_key), 49920a7fcb78SPaul Blakey TUNNEL_INFO_BITS_MASK, true); 49932198b932SRoi Dayan 49940a7fcb78SPaul Blakey if (IS_ERR(mapping)) { 49950a7fcb78SPaul Blakey err = PTR_ERR(mapping); 49960a7fcb78SPaul Blakey goto err_tun_mapping; 49970a7fcb78SPaul Blakey } 49980a7fcb78SPaul Blakey uplink_priv->tunnel_mapping = mapping; 49990a7fcb78SPaul Blakey 50003222efd4SVlad Buslov /* Two last values are reserved for stack devices slow path table mark 50013222efd4SVlad Buslov * and bridge ingress push mark. 50023222efd4SVlad Buslov */ 50032198b932SRoi Dayan mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS, 50043222efd4SVlad Buslov sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true); 50050a7fcb78SPaul Blakey if (IS_ERR(mapping)) { 50060a7fcb78SPaul Blakey err = PTR_ERR(mapping); 50070a7fcb78SPaul Blakey goto err_enc_opts_mapping; 50080a7fcb78SPaul Blakey } 50090a7fcb78SPaul Blakey uplink_priv->tunnel_enc_opts_mapping = mapping; 50100a7fcb78SPaul Blakey 50118914add2SVlad Buslov uplink_priv->encap = mlx5e_tc_tun_init(priv); 50122b6c3c1eSWei Yongjun if (IS_ERR(uplink_priv->encap)) { 50132b6c3c1eSWei Yongjun err = PTR_ERR(uplink_priv->encap); 50148914add2SVlad Buslov goto err_register_fib_notifier; 50152b6c3c1eSWei Yongjun } 50168914add2SVlad Buslov 50172b6c3c1eSWei Yongjun return 0; 50180a7fcb78SPaul Blakey 50198914add2SVlad Buslov err_register_fib_notifier: 50200a7fcb78SPaul Blakey mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); 50210a7fcb78SPaul Blakey err_enc_opts_mapping: 50220a7fcb78SPaul Blakey mapping_destroy(uplink_priv->tunnel_mapping); 50230a7fcb78SPaul Blakey err_tun_mapping: 50240027d70cSChris Mi mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); 50254f4edcc2SAriel Levkovich mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv); 5026aedd133dSAriel Levkovich mlx5_tc_ct_clean(uplink_priv->ct_priv); 50270a7fcb78SPaul Blakey netdev_warn(priv->netdev, 50280a7fcb78SPaul Blakey "Failed to initialize tc (eswitch), err: %d", err); 5029f0da4daaSChris Mi mlx5e_tc_post_act_destroy(uplink_priv->post_act); 50300a7fcb78SPaul Blakey return err; 5031655dc3d2SOr Gerlitz } 5032655dc3d2SOr Gerlitz 5033d1a3138fSPaul Blakey void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv) 5034655dc3d2SOr Gerlitz { 50358914add2SVlad Buslov mlx5e_tc_tun_cleanup(uplink_priv->encap); 50368914add2SVlad Buslov 50370a7fcb78SPaul Blakey mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); 50380a7fcb78SPaul Blakey mapping_destroy(uplink_priv->tunnel_mapping); 50394c3844d9SPaul Blakey 50400027d70cSChris Mi mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); 50414f4edcc2SAriel Levkovich mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv); 5042aedd133dSAriel Levkovich mlx5_tc_ct_clean(uplink_priv->ct_priv); 504374e6b2a8SJianbo Liu mlx5e_flow_meters_cleanup(uplink_priv->flow_meters); 5044f0da4daaSChris Mi mlx5e_tc_post_act_destroy(uplink_priv->post_act); 5045655dc3d2SOr Gerlitz } 504601252a27SOr Gerlitz 5047226f2ca3SVlad Buslov int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) 504801252a27SOr Gerlitz { 5049d9ee0491SOr Gerlitz struct rhashtable *tc_ht = get_tc_ht(priv, flags); 505001252a27SOr Gerlitz 505101252a27SOr Gerlitz return atomic_read(&tc_ht->nelems); 505201252a27SOr Gerlitz } 505304de7ddaSRoi Dayan 505404de7ddaSRoi Dayan void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw) 505504de7ddaSRoi Dayan { 505604de7ddaSRoi Dayan struct mlx5e_tc_flow *flow, *tmp; 505704de7ddaSRoi Dayan 505804de7ddaSRoi Dayan list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer) 505904de7ddaSRoi Dayan __mlx5e_tc_del_fdb_peer_flow(flow); 506004de7ddaSRoi Dayan } 5061b4a23329SRoi Dayan 5062b4a23329SRoi Dayan void mlx5e_tc_reoffload_flows_work(struct work_struct *work) 5063b4a23329SRoi Dayan { 5064b4a23329SRoi Dayan struct mlx5_rep_uplink_priv *rpriv = 5065b4a23329SRoi Dayan container_of(work, struct mlx5_rep_uplink_priv, 5066b4a23329SRoi Dayan reoffload_flows_work); 5067b4a23329SRoi Dayan struct mlx5e_tc_flow *flow, *tmp; 5068b4a23329SRoi Dayan 5069ad86755bSVlad Buslov mutex_lock(&rpriv->unready_flows_lock); 5070b4a23329SRoi Dayan list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) { 5071b4a23329SRoi Dayan if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL)) 5072ad86755bSVlad Buslov unready_flow_del(flow); 5073b4a23329SRoi Dayan } 5074ad86755bSVlad Buslov mutex_unlock(&rpriv->unready_flows_lock); 5075b4a23329SRoi Dayan } 5076e2394a61SVlad Buslov 5077e2394a61SVlad Buslov static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, 5078e2394a61SVlad Buslov struct flow_cls_offload *cls_flower, 5079e2394a61SVlad Buslov unsigned long flags) 5080e2394a61SVlad Buslov { 5081e2394a61SVlad Buslov switch (cls_flower->command) { 5082e2394a61SVlad Buslov case FLOW_CLS_REPLACE: 5083e2394a61SVlad Buslov return mlx5e_configure_flower(priv->netdev, priv, cls_flower, 5084e2394a61SVlad Buslov flags); 5085e2394a61SVlad Buslov case FLOW_CLS_DESTROY: 5086e2394a61SVlad Buslov return mlx5e_delete_flower(priv->netdev, priv, cls_flower, 5087e2394a61SVlad Buslov flags); 5088e2394a61SVlad Buslov case FLOW_CLS_STATS: 5089e2394a61SVlad Buslov return mlx5e_stats_flower(priv->netdev, priv, cls_flower, 5090e2394a61SVlad Buslov flags); 5091e2394a61SVlad Buslov default: 5092e2394a61SVlad Buslov return -EOPNOTSUPP; 5093e2394a61SVlad Buslov } 5094e2394a61SVlad Buslov } 5095e2394a61SVlad Buslov 5096e2394a61SVlad Buslov int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 5097e2394a61SVlad Buslov void *cb_priv) 5098e2394a61SVlad Buslov { 5099ec9457a6SRoi Dayan unsigned long flags = MLX5_TC_FLAG(INGRESS); 5100e2394a61SVlad Buslov struct mlx5e_priv *priv = cb_priv; 5101e2394a61SVlad Buslov 51022ff349c5SRoi Dayan if (!priv->netdev || !netif_device_present(priv->netdev)) 51032ff349c5SRoi Dayan return -EOPNOTSUPP; 51042ff349c5SRoi Dayan 5105ec9457a6SRoi Dayan if (mlx5e_is_uplink_rep(priv)) 5106ec9457a6SRoi Dayan flags |= MLX5_TC_FLAG(ESW_OFFLOAD); 5107ec9457a6SRoi Dayan else 5108ec9457a6SRoi Dayan flags |= MLX5_TC_FLAG(NIC_OFFLOAD); 5109ec9457a6SRoi Dayan 5110e2394a61SVlad Buslov switch (type) { 5111e2394a61SVlad Buslov case TC_SETUP_CLSFLOWER: 5112e2394a61SVlad Buslov return mlx5e_setup_tc_cls_flower(priv, type_data, flags); 5113e2394a61SVlad Buslov default: 5114e2394a61SVlad Buslov return -EOPNOTSUPP; 5115e2394a61SVlad Buslov } 5116e2394a61SVlad Buslov } 5117c7569097SAriel Levkovich 5118c7569097SAriel Levkovich bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, 5119c7569097SAriel Levkovich struct sk_buff *skb) 5120c7569097SAriel Levkovich { 5121c7569097SAriel Levkovich #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 5122aedd133dSAriel Levkovich u32 chain = 0, chain_tag, reg_b, zone_restore_id; 5123c7569097SAriel Levkovich struct mlx5e_priv *priv = netdev_priv(skb->dev); 5124aedd133dSAriel Levkovich struct mlx5e_tc_table *tc = &priv->fs.tc; 5125a91d98a0SChris Mi struct mlx5_mapped_obj mapped_obj; 5126c7569097SAriel Levkovich struct tc_skb_ext *tc_skb_ext; 5127c7569097SAriel Levkovich int err; 5128c7569097SAriel Levkovich 5129c7569097SAriel Levkovich reg_b = be32_to_cpu(cqe->ft_metadata); 5130c7569097SAriel Levkovich 5131c7569097SAriel Levkovich chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK; 5132c7569097SAriel Levkovich 5133c9355682SChris Mi err = mapping_find(tc->mapping, chain_tag, &mapped_obj); 5134c7569097SAriel Levkovich if (err) { 5135c7569097SAriel Levkovich netdev_dbg(priv->netdev, 5136c7569097SAriel Levkovich "Couldn't find chain for chain tag: %d, err: %d\n", 5137c7569097SAriel Levkovich chain_tag, err); 5138c7569097SAriel Levkovich return false; 5139c7569097SAriel Levkovich } 5140c7569097SAriel Levkovich 5141a91d98a0SChris Mi if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) { 5142a91d98a0SChris Mi chain = mapped_obj.chain; 51439453d45eSVlad Buslov tc_skb_ext = tc_skb_ext_alloc(skb); 5144c7569097SAriel Levkovich if (WARN_ON(!tc_skb_ext)) 5145c7569097SAriel Levkovich return false; 5146c7569097SAriel Levkovich 5147c7569097SAriel Levkovich tc_skb_ext->chain = chain; 5148aedd133dSAriel Levkovich 514917c5da03SJianbo Liu zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) & 515048d216e5SVlad Buslov ESW_ZONE_ID_MASK; 5151aedd133dSAriel Levkovich 5152aedd133dSAriel Levkovich if (!mlx5e_tc_ct_restore_flow(tc->ct, skb, 5153aedd133dSAriel Levkovich zone_restore_id)) 5154aedd133dSAriel Levkovich return false; 5155a91d98a0SChris Mi } else { 5156a91d98a0SChris Mi netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type); 5157a91d98a0SChris Mi return false; 5158c7569097SAriel Levkovich } 5159c7569097SAriel Levkovich #endif /* CONFIG_NET_TC_SKB_EXT */ 5160c7569097SAriel Levkovich 5161c7569097SAriel Levkovich return true; 5162c7569097SAriel Levkovich } 5163