1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */ 3 4 #include "mlx5_core.h" 5 #include "eswitch.h" 6 #include "helper.h" 7 #include "lgcy.h" 8 9 static void esw_acl_ingress_lgcy_rules_destroy(struct mlx5_vport *vport) 10 { 11 if (vport->ingress.legacy.drop_rule) { 12 mlx5_del_flow_rules(vport->ingress.legacy.drop_rule); 13 vport->ingress.legacy.drop_rule = NULL; 14 } 15 esw_acl_ingress_allow_rule_destroy(vport); 16 } 17 18 static int esw_acl_ingress_lgcy_groups_create(struct mlx5_eswitch *esw, 19 struct mlx5_vport *vport) 20 { 21 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 22 struct mlx5_core_dev *dev = esw->dev; 23 struct mlx5_flow_group *g; 24 void *match_criteria; 25 u32 *flow_group_in; 26 int err; 27 28 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 29 if (!flow_group_in) 30 return -ENOMEM; 31 32 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 33 34 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 35 MLX5_MATCH_OUTER_HEADERS); 36 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); 37 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); 38 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); 39 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 40 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); 41 42 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); 43 if (IS_ERR(g)) { 44 err = PTR_ERR(g); 45 esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n", 46 vport->vport, err); 47 goto spoof_err; 48 } 49 vport->ingress.legacy.allow_untagged_spoofchk_grp = g; 50 51 memset(flow_group_in, 0, inlen); 52 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 53 MLX5_MATCH_OUTER_HEADERS); 54 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); 55 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); 56 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); 57 58 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); 59 if (IS_ERR(g)) { 60 err = PTR_ERR(g); 61 esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n", 62 vport->vport, err); 63 goto untagged_err; 64 } 65 vport->ingress.legacy.allow_untagged_only_grp = g; 66 67 memset(flow_group_in, 0, inlen); 68 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 69 MLX5_MATCH_OUTER_HEADERS); 70 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); 71 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); 72 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2); 73 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); 74 75 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); 76 if (IS_ERR(g)) { 77 err = PTR_ERR(g); 78 esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n", 79 vport->vport, err); 80 goto allow_spoof_err; 81 } 82 vport->ingress.legacy.allow_spoofchk_only_grp = g; 83 84 memset(flow_group_in, 0, inlen); 85 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3); 86 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); 87 88 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); 89 if (IS_ERR(g)) { 90 err = PTR_ERR(g); 91 esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n", 92 vport->vport, err); 93 goto drop_err; 94 } 95 vport->ingress.legacy.drop_grp = g; 96 kvfree(flow_group_in); 97 return 0; 98 99 drop_err: 100 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) { 101 mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp); 102 vport->ingress.legacy.allow_spoofchk_only_grp = NULL; 103 } 104 allow_spoof_err: 105 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) { 106 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp); 107 vport->ingress.legacy.allow_untagged_only_grp = NULL; 108 } 109 untagged_err: 110 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) { 111 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp); 112 vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL; 113 } 114 spoof_err: 115 kvfree(flow_group_in); 116 return err; 117 } 118 119 static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport) 120 { 121 if (vport->ingress.legacy.allow_spoofchk_only_grp) { 122 mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp); 123 vport->ingress.legacy.allow_spoofchk_only_grp = NULL; 124 } 125 if (vport->ingress.legacy.allow_untagged_only_grp) { 126 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp); 127 vport->ingress.legacy.allow_untagged_only_grp = NULL; 128 } 129 if (vport->ingress.legacy.allow_untagged_spoofchk_grp) { 130 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp); 131 vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL; 132 } 133 if (vport->ingress.legacy.drop_grp) { 134 mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp); 135 vport->ingress.legacy.drop_grp = NULL; 136 } 137 } 138 139 int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, 140 struct mlx5_vport *vport) 141 { 142 struct mlx5_flow_destination drop_ctr_dst = {}; 143 struct mlx5_flow_destination *dst = NULL; 144 struct mlx5_flow_act flow_act = {}; 145 struct mlx5_flow_spec *spec = NULL; 146 struct mlx5_fc *counter = NULL; 147 /* The ingress acl table contains 4 groups 148 * (2 active rules at the same time - 149 * 1 allow rule from one of the first 3 groups. 150 * 1 drop rule from the last group): 151 * 1)Allow untagged traffic with smac=original mac. 152 * 2)Allow untagged traffic. 153 * 3)Allow traffic with smac=original mac. 154 * 4)Drop all other traffic. 155 */ 156 int table_size = 4; 157 int dest_num = 0; 158 int err = 0; 159 u8 *smac_v; 160 161 esw_acl_ingress_lgcy_rules_destroy(vport); 162 163 if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) { 164 counter = mlx5_fc_create(esw->dev, false); 165 if (IS_ERR(counter)) { 166 esw_warn(esw->dev, 167 "vport[%d] configure ingress drop rule counter failed\n", 168 vport->vport); 169 counter = NULL; 170 } 171 vport->ingress.legacy.drop_counter = counter; 172 } 173 174 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { 175 esw_acl_ingress_lgcy_cleanup(esw, vport); 176 return 0; 177 } 178 179 if (!vport->ingress.acl) { 180 vport->ingress.acl = esw_acl_table_create(esw, vport, 181 MLX5_FLOW_NAMESPACE_ESW_INGRESS, 182 table_size); 183 if (IS_ERR(vport->ingress.acl)) { 184 err = PTR_ERR(vport->ingress.acl); 185 vport->ingress.acl = NULL; 186 return err; 187 } 188 189 err = esw_acl_ingress_lgcy_groups_create(esw, vport); 190 if (err) 191 goto out; 192 } 193 194 esw_debug(esw->dev, 195 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", 196 vport->vport, vport->info.vlan, vport->info.qos); 197 198 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 199 if (!spec) { 200 err = -ENOMEM; 201 goto out; 202 } 203 204 if (vport->info.vlan || vport->info.qos) 205 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 206 outer_headers.cvlan_tag); 207 208 if (vport->info.spoofchk) { 209 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 210 outer_headers.smac_47_16); 211 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 212 outer_headers.smac_15_0); 213 smac_v = MLX5_ADDR_OF(fte_match_param, 214 spec->match_value, 215 outer_headers.smac_47_16); 216 ether_addr_copy(smac_v, vport->info.mac); 217 } 218 219 /* Create ingress allow rule */ 220 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 221 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; 222 vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, 223 &flow_act, NULL, 0); 224 if (IS_ERR(vport->ingress.allow_rule)) { 225 err = PTR_ERR(vport->ingress.allow_rule); 226 esw_warn(esw->dev, 227 "vport[%d] configure ingress allow rule, err(%d)\n", 228 vport->vport, err); 229 vport->ingress.allow_rule = NULL; 230 goto out; 231 } 232 233 memset(&flow_act, 0, sizeof(flow_act)); 234 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; 235 /* Attach drop flow counter */ 236 if (counter) { 237 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 238 drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 239 drop_ctr_dst.counter_id = mlx5_fc_id(counter); 240 dst = &drop_ctr_dst; 241 dest_num++; 242 } 243 vport->ingress.legacy.drop_rule = 244 mlx5_add_flow_rules(vport->ingress.acl, NULL, 245 &flow_act, dst, dest_num); 246 if (IS_ERR(vport->ingress.legacy.drop_rule)) { 247 err = PTR_ERR(vport->ingress.legacy.drop_rule); 248 esw_warn(esw->dev, 249 "vport[%d] configure ingress drop rule, err(%d)\n", 250 vport->vport, err); 251 vport->ingress.legacy.drop_rule = NULL; 252 goto out; 253 } 254 kvfree(spec); 255 return 0; 256 257 out: 258 esw_acl_ingress_lgcy_cleanup(esw, vport); 259 kvfree(spec); 260 return err; 261 } 262 263 void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw, 264 struct mlx5_vport *vport) 265 { 266 if (IS_ERR_OR_NULL(vport->ingress.acl)) 267 goto clean_drop_counter; 268 269 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport); 270 271 esw_acl_ingress_lgcy_rules_destroy(vport); 272 esw_acl_ingress_lgcy_groups_destroy(vport); 273 esw_acl_ingress_table_destroy(vport); 274 275 clean_drop_counter: 276 if (vport->ingress.legacy.drop_counter) { 277 mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter); 278 vport->ingress.legacy.drop_counter = NULL; 279 } 280 } 281