1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. 4 */ 5 6 #include <rdma/ib_user_verbs.h> 7 #include <rdma/ib_verbs.h> 8 #include <rdma/uverbs_types.h> 9 #include <rdma/uverbs_ioctl.h> 10 #include <rdma/uverbs_std_types.h> 11 #include <rdma/mlx5_user_ioctl_cmds.h> 12 #include <rdma/mlx5_user_ioctl_verbs.h> 13 #include <rdma/ib_umem.h> 14 #include <linux/mlx5/driver.h> 15 #include <linux/mlx5/fs.h> 16 #include <linux/mlx5/fs_helpers.h> 17 #include <linux/mlx5/accel.h> 18 #include <linux/mlx5/eswitch.h> 19 #include "mlx5_ib.h" 20 #include "counters.h" 21 #include "devx.h" 22 #include "fs.h" 23 24 #define UVERBS_MODULE_NAME mlx5_ib 25 #include <rdma/uverbs_named_ioctl.h> 26 27 enum { 28 MATCH_CRITERIA_ENABLE_OUTER_BIT, 29 MATCH_CRITERIA_ENABLE_MISC_BIT, 30 MATCH_CRITERIA_ENABLE_INNER_BIT, 31 MATCH_CRITERIA_ENABLE_MISC2_BIT 32 }; 33 34 #define HEADER_IS_ZERO(match_criteria, headers) \ 35 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 36 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 37 38 static u8 get_match_criteria_enable(u32 *match_criteria) 39 { 40 u8 match_criteria_enable; 41 42 match_criteria_enable = 43 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 44 MATCH_CRITERIA_ENABLE_OUTER_BIT; 45 match_criteria_enable |= 46 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 47 MATCH_CRITERIA_ENABLE_MISC_BIT; 48 match_criteria_enable |= 49 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 50 MATCH_CRITERIA_ENABLE_INNER_BIT; 51 match_criteria_enable |= 52 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) << 53 MATCH_CRITERIA_ENABLE_MISC2_BIT; 54 55 return match_criteria_enable; 56 } 57 58 static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) 59 { 60 u8 entry_mask; 61 u8 entry_val; 62 int err = 0; 63 64 if (!mask) 65 goto out; 66 67 entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c, 68 ip_protocol); 69 entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v, 70 ip_protocol); 71 if (!entry_mask) { 72 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); 73 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 74 goto out; 75 } 76 /* Don't override existing ip protocol */ 77 if (mask != entry_mask || val != entry_val) 78 err = -EINVAL; 79 out: 80 return err; 81 } 82 83 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val, 84 bool inner) 85 { 86 if (inner) { 87 MLX5_SET(fte_match_set_misc, 88 misc_c, inner_ipv6_flow_label, mask); 89 MLX5_SET(fte_match_set_misc, 90 misc_v, inner_ipv6_flow_label, val); 91 } else { 92 MLX5_SET(fte_match_set_misc, 93 misc_c, outer_ipv6_flow_label, mask); 94 MLX5_SET(fte_match_set_misc, 95 misc_v, outer_ipv6_flow_label, val); 96 } 97 } 98 99 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) 100 { 101 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask); 102 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val); 103 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2); 104 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2); 105 } 106 107 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask) 108 { 109 if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) && 110 !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL)) 111 return -EOPNOTSUPP; 112 113 if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) && 114 !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP)) 115 return -EOPNOTSUPP; 116 117 if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) && 118 !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS)) 119 return -EOPNOTSUPP; 120 121 if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) && 122 !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL)) 123 return -EOPNOTSUPP; 124 125 return 0; 126 } 127 128 #define LAST_ETH_FIELD vlan_tag 129 #define LAST_IB_FIELD sl 130 #define LAST_IPV4_FIELD tos 131 #define LAST_IPV6_FIELD traffic_class 132 #define LAST_TCP_UDP_FIELD src_port 133 #define LAST_TUNNEL_FIELD tunnel_id 134 #define LAST_FLOW_TAG_FIELD tag_id 135 #define LAST_DROP_FIELD size 136 #define LAST_COUNTERS_FIELD counters 137 138 /* Field is the last supported field */ 139 #define FIELDS_NOT_SUPPORTED(filter, field) \ 140 memchr_inv((void *)&filter.field + sizeof(filter.field), 0, \ 141 sizeof(filter) - offsetofend(typeof(filter), field)) 142 143 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, 144 bool is_egress, 145 struct mlx5_flow_act *action) 146 { 147 148 switch (maction->ib_action.type) { 149 case IB_FLOW_ACTION_ESP: 150 if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 151 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)) 152 return -EINVAL; 153 /* Currently only AES_GCM keymat is supported by the driver */ 154 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx; 155 action->action |= is_egress ? 156 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT : 157 MLX5_FLOW_CONTEXT_ACTION_DECRYPT; 158 return 0; 159 case IB_FLOW_ACTION_UNSPECIFIED: 160 if (maction->flow_action_raw.sub_type == 161 MLX5_IB_FLOW_ACTION_MODIFY_HEADER) { 162 if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 163 return -EINVAL; 164 action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 165 action->modify_hdr = 166 maction->flow_action_raw.modify_hdr; 167 return 0; 168 } 169 if (maction->flow_action_raw.sub_type == 170 MLX5_IB_FLOW_ACTION_DECAP) { 171 if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) 172 return -EINVAL; 173 action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 174 return 0; 175 } 176 if (maction->flow_action_raw.sub_type == 177 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) { 178 if (action->action & 179 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) 180 return -EINVAL; 181 action->action |= 182 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 183 action->pkt_reformat = 184 maction->flow_action_raw.pkt_reformat; 185 return 0; 186 } 187 fallthrough; 188 default: 189 return -EOPNOTSUPP; 190 } 191 } 192 193 static int parse_flow_attr(struct mlx5_core_dev *mdev, 194 struct mlx5_flow_spec *spec, 195 const union ib_flow_spec *ib_spec, 196 const struct ib_flow_attr *flow_attr, 197 struct mlx5_flow_act *action, u32 prev_type) 198 { 199 struct mlx5_flow_context *flow_context = &spec->flow_context; 200 u32 *match_c = spec->match_criteria; 201 u32 *match_v = spec->match_value; 202 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, 203 misc_parameters); 204 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, 205 misc_parameters); 206 void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c, 207 misc_parameters_2); 208 void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v, 209 misc_parameters_2); 210 void *headers_c; 211 void *headers_v; 212 int match_ipv; 213 int ret; 214 215 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 216 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 217 inner_headers); 218 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 219 inner_headers); 220 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 221 ft_field_support.inner_ip_version); 222 } else { 223 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 224 outer_headers); 225 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 226 outer_headers); 227 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 228 ft_field_support.outer_ip_version); 229 } 230 231 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 232 case IB_FLOW_SPEC_ETH: 233 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) 234 return -EOPNOTSUPP; 235 236 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 237 dmac_47_16), 238 ib_spec->eth.mask.dst_mac); 239 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 240 dmac_47_16), 241 ib_spec->eth.val.dst_mac); 242 243 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 244 smac_47_16), 245 ib_spec->eth.mask.src_mac); 246 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 247 smac_47_16), 248 ib_spec->eth.val.src_mac); 249 250 if (ib_spec->eth.mask.vlan_tag) { 251 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 252 cvlan_tag, 1); 253 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 254 cvlan_tag, 1); 255 256 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 257 first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); 258 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 259 first_vid, ntohs(ib_spec->eth.val.vlan_tag)); 260 261 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 262 first_cfi, 263 ntohs(ib_spec->eth.mask.vlan_tag) >> 12); 264 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 265 first_cfi, 266 ntohs(ib_spec->eth.val.vlan_tag) >> 12); 267 268 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 269 first_prio, 270 ntohs(ib_spec->eth.mask.vlan_tag) >> 13); 271 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 272 first_prio, 273 ntohs(ib_spec->eth.val.vlan_tag) >> 13); 274 } 275 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 276 ethertype, ntohs(ib_spec->eth.mask.ether_type)); 277 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 278 ethertype, ntohs(ib_spec->eth.val.ether_type)); 279 break; 280 case IB_FLOW_SPEC_IPV4: 281 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) 282 return -EOPNOTSUPP; 283 284 if (match_ipv) { 285 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 286 ip_version, 0xf); 287 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 288 ip_version, MLX5_FS_IPV4_VERSION); 289 } else { 290 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 291 ethertype, 0xffff); 292 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 293 ethertype, ETH_P_IP); 294 } 295 296 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 297 src_ipv4_src_ipv6.ipv4_layout.ipv4), 298 &ib_spec->ipv4.mask.src_ip, 299 sizeof(ib_spec->ipv4.mask.src_ip)); 300 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 301 src_ipv4_src_ipv6.ipv4_layout.ipv4), 302 &ib_spec->ipv4.val.src_ip, 303 sizeof(ib_spec->ipv4.val.src_ip)); 304 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 305 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 306 &ib_spec->ipv4.mask.dst_ip, 307 sizeof(ib_spec->ipv4.mask.dst_ip)); 308 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 309 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 310 &ib_spec->ipv4.val.dst_ip, 311 sizeof(ib_spec->ipv4.val.dst_ip)); 312 313 set_tos(headers_c, headers_v, 314 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); 315 316 if (set_proto(headers_c, headers_v, 317 ib_spec->ipv4.mask.proto, 318 ib_spec->ipv4.val.proto)) 319 return -EINVAL; 320 break; 321 case IB_FLOW_SPEC_IPV6: 322 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) 323 return -EOPNOTSUPP; 324 325 if (match_ipv) { 326 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 327 ip_version, 0xf); 328 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 329 ip_version, MLX5_FS_IPV6_VERSION); 330 } else { 331 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 332 ethertype, 0xffff); 333 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 334 ethertype, ETH_P_IPV6); 335 } 336 337 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 338 src_ipv4_src_ipv6.ipv6_layout.ipv6), 339 &ib_spec->ipv6.mask.src_ip, 340 sizeof(ib_spec->ipv6.mask.src_ip)); 341 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 342 src_ipv4_src_ipv6.ipv6_layout.ipv6), 343 &ib_spec->ipv6.val.src_ip, 344 sizeof(ib_spec->ipv6.val.src_ip)); 345 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 346 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 347 &ib_spec->ipv6.mask.dst_ip, 348 sizeof(ib_spec->ipv6.mask.dst_ip)); 349 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 350 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 351 &ib_spec->ipv6.val.dst_ip, 352 sizeof(ib_spec->ipv6.val.dst_ip)); 353 354 set_tos(headers_c, headers_v, 355 ib_spec->ipv6.mask.traffic_class, 356 ib_spec->ipv6.val.traffic_class); 357 358 if (set_proto(headers_c, headers_v, 359 ib_spec->ipv6.mask.next_hdr, 360 ib_spec->ipv6.val.next_hdr)) 361 return -EINVAL; 362 363 set_flow_label(misc_params_c, misc_params_v, 364 ntohl(ib_spec->ipv6.mask.flow_label), 365 ntohl(ib_spec->ipv6.val.flow_label), 366 ib_spec->type & IB_FLOW_SPEC_INNER); 367 break; 368 case IB_FLOW_SPEC_ESP: 369 if (ib_spec->esp.mask.seq) 370 return -EOPNOTSUPP; 371 372 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 373 ntohl(ib_spec->esp.mask.spi)); 374 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 375 ntohl(ib_spec->esp.val.spi)); 376 break; 377 case IB_FLOW_SPEC_TCP: 378 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 379 LAST_TCP_UDP_FIELD)) 380 return -EOPNOTSUPP; 381 382 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP)) 383 return -EINVAL; 384 385 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport, 386 ntohs(ib_spec->tcp_udp.mask.src_port)); 387 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, 388 ntohs(ib_spec->tcp_udp.val.src_port)); 389 390 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport, 391 ntohs(ib_spec->tcp_udp.mask.dst_port)); 392 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport, 393 ntohs(ib_spec->tcp_udp.val.dst_port)); 394 break; 395 case IB_FLOW_SPEC_UDP: 396 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 397 LAST_TCP_UDP_FIELD)) 398 return -EOPNOTSUPP; 399 400 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP)) 401 return -EINVAL; 402 403 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, 404 ntohs(ib_spec->tcp_udp.mask.src_port)); 405 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, 406 ntohs(ib_spec->tcp_udp.val.src_port)); 407 408 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, 409 ntohs(ib_spec->tcp_udp.mask.dst_port)); 410 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 411 ntohs(ib_spec->tcp_udp.val.dst_port)); 412 break; 413 case IB_FLOW_SPEC_GRE: 414 if (ib_spec->gre.mask.c_ks_res0_ver) 415 return -EOPNOTSUPP; 416 417 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE)) 418 return -EINVAL; 419 420 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 421 0xff); 422 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 423 IPPROTO_GRE); 424 425 MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol, 426 ntohs(ib_spec->gre.mask.protocol)); 427 MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol, 428 ntohs(ib_spec->gre.val.protocol)); 429 430 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, 431 gre_key.nvgre.hi), 432 &ib_spec->gre.mask.key, 433 sizeof(ib_spec->gre.mask.key)); 434 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v, 435 gre_key.nvgre.hi), 436 &ib_spec->gre.val.key, 437 sizeof(ib_spec->gre.val.key)); 438 break; 439 case IB_FLOW_SPEC_MPLS: 440 switch (prev_type) { 441 case IB_FLOW_SPEC_UDP: 442 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 443 ft_field_support.outer_first_mpls_over_udp), 444 &ib_spec->mpls.mask.tag)) 445 return -EOPNOTSUPP; 446 447 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 448 outer_first_mpls_over_udp), 449 &ib_spec->mpls.val.tag, 450 sizeof(ib_spec->mpls.val.tag)); 451 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 452 outer_first_mpls_over_udp), 453 &ib_spec->mpls.mask.tag, 454 sizeof(ib_spec->mpls.mask.tag)); 455 break; 456 case IB_FLOW_SPEC_GRE: 457 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 458 ft_field_support.outer_first_mpls_over_gre), 459 &ib_spec->mpls.mask.tag)) 460 return -EOPNOTSUPP; 461 462 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 463 outer_first_mpls_over_gre), 464 &ib_spec->mpls.val.tag, 465 sizeof(ib_spec->mpls.val.tag)); 466 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 467 outer_first_mpls_over_gre), 468 &ib_spec->mpls.mask.tag, 469 sizeof(ib_spec->mpls.mask.tag)); 470 break; 471 default: 472 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 473 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 474 ft_field_support.inner_first_mpls), 475 &ib_spec->mpls.mask.tag)) 476 return -EOPNOTSUPP; 477 478 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 479 inner_first_mpls), 480 &ib_spec->mpls.val.tag, 481 sizeof(ib_spec->mpls.val.tag)); 482 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 483 inner_first_mpls), 484 &ib_spec->mpls.mask.tag, 485 sizeof(ib_spec->mpls.mask.tag)); 486 } else { 487 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 488 ft_field_support.outer_first_mpls), 489 &ib_spec->mpls.mask.tag)) 490 return -EOPNOTSUPP; 491 492 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 493 outer_first_mpls), 494 &ib_spec->mpls.val.tag, 495 sizeof(ib_spec->mpls.val.tag)); 496 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 497 outer_first_mpls), 498 &ib_spec->mpls.mask.tag, 499 sizeof(ib_spec->mpls.mask.tag)); 500 } 501 } 502 break; 503 case IB_FLOW_SPEC_VXLAN_TUNNEL: 504 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask, 505 LAST_TUNNEL_FIELD)) 506 return -EOPNOTSUPP; 507 508 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni, 509 ntohl(ib_spec->tunnel.mask.tunnel_id)); 510 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni, 511 ntohl(ib_spec->tunnel.val.tunnel_id)); 512 break; 513 case IB_FLOW_SPEC_ACTION_TAG: 514 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag, 515 LAST_FLOW_TAG_FIELD)) 516 return -EOPNOTSUPP; 517 if (ib_spec->flow_tag.tag_id >= BIT(24)) 518 return -EINVAL; 519 520 flow_context->flow_tag = ib_spec->flow_tag.tag_id; 521 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 522 break; 523 case IB_FLOW_SPEC_ACTION_DROP: 524 if (FIELDS_NOT_SUPPORTED(ib_spec->drop, 525 LAST_DROP_FIELD)) 526 return -EOPNOTSUPP; 527 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 528 break; 529 case IB_FLOW_SPEC_ACTION_HANDLE: 530 ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act), 531 flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action); 532 if (ret) 533 return ret; 534 break; 535 case IB_FLOW_SPEC_ACTION_COUNT: 536 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count, 537 LAST_COUNTERS_FIELD)) 538 return -EOPNOTSUPP; 539 540 /* for now support only one counters spec per flow */ 541 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 542 return -EINVAL; 543 544 action->counters = ib_spec->flow_count.counters; 545 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 546 break; 547 default: 548 return -EINVAL; 549 } 550 551 return 0; 552 } 553 554 /* If a flow could catch both multicast and unicast packets, 555 * it won't fall into the multicast flow steering table and this rule 556 * could steal other multicast packets. 557 */ 558 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr) 559 { 560 union ib_flow_spec *flow_spec; 561 562 if (ib_attr->type != IB_FLOW_ATTR_NORMAL || 563 ib_attr->num_of_specs < 1) 564 return false; 565 566 flow_spec = (union ib_flow_spec *)(ib_attr + 1); 567 if (flow_spec->type == IB_FLOW_SPEC_IPV4) { 568 struct ib_flow_spec_ipv4 *ipv4_spec; 569 570 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec; 571 if (ipv4_is_multicast(ipv4_spec->val.dst_ip)) 572 return true; 573 574 return false; 575 } 576 577 if (flow_spec->type == IB_FLOW_SPEC_ETH) { 578 struct ib_flow_spec_eth *eth_spec; 579 580 eth_spec = (struct ib_flow_spec_eth *)flow_spec; 581 return is_multicast_ether_addr(eth_spec->mask.dst_mac) && 582 is_multicast_ether_addr(eth_spec->val.dst_mac); 583 } 584 585 return false; 586 } 587 588 enum valid_spec { 589 VALID_SPEC_INVALID, 590 VALID_SPEC_VALID, 591 VALID_SPEC_NA, 592 }; 593 594 static enum valid_spec 595 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev, 596 const struct mlx5_flow_spec *spec, 597 const struct mlx5_flow_act *flow_act, 598 bool egress) 599 { 600 const u32 *match_c = spec->match_criteria; 601 bool is_crypto = 602 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 603 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)); 604 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c); 605 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP; 606 607 /* 608 * Currently only crypto is supported in egress, when regular egress 609 * rules would be supported, always return VALID_SPEC_NA. 610 */ 611 if (!is_crypto) 612 return VALID_SPEC_NA; 613 614 return is_crypto && is_ipsec && 615 (!egress || (!is_drop && 616 !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ? 617 VALID_SPEC_VALID : VALID_SPEC_INVALID; 618 } 619 620 static bool is_valid_spec(struct mlx5_core_dev *mdev, 621 const struct mlx5_flow_spec *spec, 622 const struct mlx5_flow_act *flow_act, 623 bool egress) 624 { 625 /* We curretly only support ipsec egress flow */ 626 return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID; 627 } 628 629 static bool is_valid_ethertype(struct mlx5_core_dev *mdev, 630 const struct ib_flow_attr *flow_attr, 631 bool check_inner) 632 { 633 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1); 634 int match_ipv = check_inner ? 635 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 636 ft_field_support.inner_ip_version) : 637 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 638 ft_field_support.outer_ip_version); 639 int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0; 640 bool ipv4_spec_valid, ipv6_spec_valid; 641 unsigned int ip_spec_type = 0; 642 bool has_ethertype = false; 643 unsigned int spec_index; 644 bool mask_valid = true; 645 u16 eth_type = 0; 646 bool type_valid; 647 648 /* Validate that ethertype is correct */ 649 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 650 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) && 651 ib_spec->eth.mask.ether_type) { 652 mask_valid = (ib_spec->eth.mask.ether_type == 653 htons(0xffff)); 654 has_ethertype = true; 655 eth_type = ntohs(ib_spec->eth.val.ether_type); 656 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) || 657 (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) { 658 ip_spec_type = ib_spec->type; 659 } 660 ib_spec = (void *)ib_spec + ib_spec->size; 661 } 662 663 type_valid = (!has_ethertype) || (!ip_spec_type); 664 if (!type_valid && mask_valid) { 665 ipv4_spec_valid = (eth_type == ETH_P_IP) && 666 (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit)); 667 ipv6_spec_valid = (eth_type == ETH_P_IPV6) && 668 (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit)); 669 670 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) || 671 (((eth_type == ETH_P_MPLS_UC) || 672 (eth_type == ETH_P_MPLS_MC)) && match_ipv); 673 } 674 675 return type_valid; 676 } 677 678 static bool is_valid_attr(struct mlx5_core_dev *mdev, 679 const struct ib_flow_attr *flow_attr) 680 { 681 return is_valid_ethertype(mdev, flow_attr, false) && 682 is_valid_ethertype(mdev, flow_attr, true); 683 } 684 685 static void put_flow_table(struct mlx5_ib_dev *dev, 686 struct mlx5_ib_flow_prio *prio, bool ft_added) 687 { 688 prio->refcount -= !!ft_added; 689 if (!prio->refcount) { 690 mlx5_destroy_flow_table(prio->flow_table); 691 prio->flow_table = NULL; 692 } 693 } 694 695 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) 696 { 697 struct mlx5_ib_flow_handler *handler = container_of(flow_id, 698 struct mlx5_ib_flow_handler, 699 ibflow); 700 struct mlx5_ib_flow_handler *iter, *tmp; 701 struct mlx5_ib_dev *dev = handler->dev; 702 703 mutex_lock(&dev->flow_db->lock); 704 705 list_for_each_entry_safe(iter, tmp, &handler->list, list) { 706 mlx5_del_flow_rules(iter->rule); 707 put_flow_table(dev, iter->prio, true); 708 list_del(&iter->list); 709 kfree(iter); 710 } 711 712 mlx5_del_flow_rules(handler->rule); 713 put_flow_table(dev, handler->prio, true); 714 mlx5_ib_counters_clear_description(handler->ibcounters); 715 mutex_unlock(&dev->flow_db->lock); 716 if (handler->flow_matcher) 717 atomic_dec(&handler->flow_matcher->usecnt); 718 kfree(handler); 719 720 return 0; 721 } 722 723 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap) 724 { 725 priority *= 2; 726 if (!dont_trap) 727 priority++; 728 return priority; 729 } 730 731 enum flow_table_type { 732 MLX5_IB_FT_RX, 733 MLX5_IB_FT_TX 734 }; 735 736 #define MLX5_FS_MAX_TYPES 6 737 #define MLX5_FS_MAX_ENTRIES BIT(16) 738 739 static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns, 740 struct mlx5_ib_flow_prio *prio, 741 int priority, 742 int num_entries, int num_groups, 743 u32 flags) 744 { 745 struct mlx5_flow_table_attr ft_attr = {}; 746 struct mlx5_flow_table *ft; 747 748 ft_attr.prio = priority; 749 ft_attr.max_fte = num_entries; 750 ft_attr.flags = flags; 751 ft_attr.autogroup.max_num_groups = num_groups; 752 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); 753 if (IS_ERR(ft)) 754 return ERR_CAST(ft); 755 756 prio->flow_table = ft; 757 prio->refcount = 0; 758 return prio; 759 } 760 761 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, 762 struct ib_flow_attr *flow_attr, 763 enum flow_table_type ft_type) 764 { 765 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP; 766 struct mlx5_flow_namespace *ns = NULL; 767 enum mlx5_flow_namespace_type fn_type; 768 struct mlx5_ib_flow_prio *prio; 769 struct mlx5_flow_table *ft; 770 int max_table_size; 771 int num_entries; 772 int num_groups; 773 bool esw_encap; 774 u32 flags = 0; 775 int priority; 776 777 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 778 log_max_ft_size)); 779 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != 780 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 781 switch (flow_attr->type) { 782 case IB_FLOW_ATTR_NORMAL: 783 if (flow_is_multicast_only(flow_attr) && !dont_trap) 784 priority = MLX5_IB_FLOW_MCAST_PRIO; 785 else 786 priority = ib_prio_to_core_prio(flow_attr->priority, 787 dont_trap); 788 if (ft_type == MLX5_IB_FT_RX) { 789 fn_type = MLX5_FLOW_NAMESPACE_BYPASS; 790 prio = &dev->flow_db->prios[priority]; 791 if (!dev->is_rep && !esw_encap && 792 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap)) 793 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 794 if (!dev->is_rep && !esw_encap && 795 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 796 reformat_l3_tunnel_to_l2)) 797 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 798 } else { 799 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX( 800 dev->mdev, log_max_ft_size)); 801 fn_type = MLX5_FLOW_NAMESPACE_EGRESS; 802 prio = &dev->flow_db->egress_prios[priority]; 803 if (!dev->is_rep && !esw_encap && 804 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat)) 805 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 806 } 807 ns = mlx5_get_flow_namespace(dev->mdev, fn_type); 808 num_entries = MLX5_FS_MAX_ENTRIES; 809 num_groups = MLX5_FS_MAX_TYPES; 810 break; 811 case IB_FLOW_ATTR_ALL_DEFAULT: 812 case IB_FLOW_ATTR_MC_DEFAULT: 813 ns = mlx5_get_flow_namespace(dev->mdev, 814 MLX5_FLOW_NAMESPACE_LEFTOVERS); 815 build_leftovers_ft_param(&priority, &num_entries, &num_groups); 816 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; 817 break; 818 case IB_FLOW_ATTR_SNIFFER: 819 if (!MLX5_CAP_FLOWTABLE(dev->mdev, 820 allow_sniffer_and_nic_rx_shared_tir)) 821 return ERR_PTR(-EOPNOTSUPP); 822 823 ns = mlx5_get_flow_namespace( 824 dev->mdev, ft_type == MLX5_IB_FT_RX ? 825 MLX5_FLOW_NAMESPACE_SNIFFER_RX : 826 MLX5_FLOW_NAMESPACE_SNIFFER_TX); 827 828 prio = &dev->flow_db->sniffer[ft_type]; 829 priority = 0; 830 num_entries = 1; 831 num_groups = 1; 832 break; 833 default: 834 break; 835 } 836 837 if (!ns) 838 return ERR_PTR(-EOPNOTSUPP); 839 840 max_table_size = min_t(int, num_entries, max_table_size); 841 842 ft = prio->flow_table; 843 if (!ft) 844 return _get_prio(ns, prio, priority, max_table_size, num_groups, 845 flags); 846 847 return prio; 848 } 849 850 static void set_underlay_qp(struct mlx5_ib_dev *dev, 851 struct mlx5_flow_spec *spec, 852 u32 underlay_qpn) 853 { 854 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, 855 spec->match_criteria, 856 misc_parameters); 857 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 858 misc_parameters); 859 860 if (underlay_qpn && 861 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 862 ft_field_support.bth_dst_qp)) { 863 MLX5_SET(fte_match_set_misc, 864 misc_params_v, bth_dst_qp, underlay_qpn); 865 MLX5_SET(fte_match_set_misc, 866 misc_params_c, bth_dst_qp, 0xffffff); 867 } 868 } 869 870 static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev, 871 struct mlx5_flow_spec *spec, 872 struct mlx5_eswitch_rep *rep) 873 { 874 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; 875 void *misc; 876 877 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 878 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 879 misc_parameters_2); 880 881 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 882 mlx5_eswitch_get_vport_metadata_for_match(rep->esw, 883 rep->vport)); 884 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 885 misc_parameters_2); 886 887 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 888 mlx5_eswitch_get_vport_metadata_mask()); 889 } else { 890 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 891 misc_parameters); 892 893 MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport); 894 895 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 896 misc_parameters); 897 898 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 899 } 900 } 901 902 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, 903 struct mlx5_ib_flow_prio *ft_prio, 904 const struct ib_flow_attr *flow_attr, 905 struct mlx5_flow_destination *dst, 906 u32 underlay_qpn, 907 struct mlx5_ib_create_flow *ucmd) 908 { 909 struct mlx5_flow_table *ft = ft_prio->flow_table; 910 struct mlx5_ib_flow_handler *handler; 911 struct mlx5_flow_act flow_act = {}; 912 struct mlx5_flow_spec *spec; 913 struct mlx5_flow_destination dest_arr[2] = {}; 914 struct mlx5_flow_destination *rule_dst = dest_arr; 915 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); 916 unsigned int spec_index; 917 u32 prev_type = 0; 918 int err = 0; 919 int dest_num = 0; 920 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 921 922 if (!is_valid_attr(dev->mdev, flow_attr)) 923 return ERR_PTR(-EINVAL); 924 925 if (dev->is_rep && is_egress) 926 return ERR_PTR(-EINVAL); 927 928 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 929 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 930 if (!handler || !spec) { 931 err = -ENOMEM; 932 goto free; 933 } 934 935 INIT_LIST_HEAD(&handler->list); 936 937 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 938 err = parse_flow_attr(dev->mdev, spec, 939 ib_flow, flow_attr, &flow_act, 940 prev_type); 941 if (err < 0) 942 goto free; 943 944 prev_type = ((union ib_flow_spec *)ib_flow)->type; 945 ib_flow += ((union ib_flow_spec *)ib_flow)->size; 946 } 947 948 if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) { 949 memcpy(&dest_arr[0], dst, sizeof(*dst)); 950 dest_num++; 951 } 952 953 if (!flow_is_multicast_only(flow_attr)) 954 set_underlay_qp(dev, spec, underlay_qpn); 955 956 if (dev->is_rep && flow_attr->type != IB_FLOW_ATTR_SNIFFER) { 957 struct mlx5_eswitch_rep *rep; 958 959 rep = dev->port[flow_attr->port - 1].rep; 960 if (!rep) { 961 err = -EINVAL; 962 goto free; 963 } 964 965 mlx5_ib_set_rule_source_port(dev, spec, rep); 966 } 967 968 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); 969 970 if (is_egress && 971 !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) { 972 err = -EINVAL; 973 goto free; 974 } 975 976 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 977 struct mlx5_ib_mcounters *mcounters; 978 979 err = mlx5_ib_flow_counters_set_data(flow_act.counters, ucmd); 980 if (err) 981 goto free; 982 983 mcounters = to_mcounters(flow_act.counters); 984 handler->ibcounters = flow_act.counters; 985 dest_arr[dest_num].type = 986 MLX5_FLOW_DESTINATION_TYPE_COUNTER; 987 dest_arr[dest_num].counter_id = 988 mlx5_fc_id(mcounters->hw_cntrs_hndl); 989 dest_num++; 990 } 991 992 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { 993 if (!dest_num) 994 rule_dst = NULL; 995 } else { 996 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) 997 flow_act.action |= 998 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 999 if (is_egress) 1000 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 1001 else if (dest_num) 1002 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1003 } 1004 1005 if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) && 1006 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 1007 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 1008 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n", 1009 spec->flow_context.flow_tag, flow_attr->type); 1010 err = -EINVAL; 1011 goto free; 1012 } 1013 handler->rule = mlx5_add_flow_rules(ft, spec, 1014 &flow_act, 1015 rule_dst, dest_num); 1016 1017 if (IS_ERR(handler->rule)) { 1018 err = PTR_ERR(handler->rule); 1019 goto free; 1020 } 1021 1022 ft_prio->refcount++; 1023 handler->prio = ft_prio; 1024 handler->dev = dev; 1025 1026 ft_prio->flow_table = ft; 1027 free: 1028 if (err && handler) { 1029 mlx5_ib_counters_clear_description(handler->ibcounters); 1030 kfree(handler); 1031 } 1032 kvfree(spec); 1033 return err ? ERR_PTR(err) : handler; 1034 } 1035 1036 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, 1037 struct mlx5_ib_flow_prio *ft_prio, 1038 const struct ib_flow_attr *flow_attr, 1039 struct mlx5_flow_destination *dst) 1040 { 1041 return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL); 1042 } 1043 1044 enum { 1045 LEFTOVERS_MC, 1046 LEFTOVERS_UC, 1047 }; 1048 1049 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev, 1050 struct mlx5_ib_flow_prio *ft_prio, 1051 struct ib_flow_attr *flow_attr, 1052 struct mlx5_flow_destination *dst) 1053 { 1054 struct mlx5_ib_flow_handler *handler_ucast = NULL; 1055 struct mlx5_ib_flow_handler *handler = NULL; 1056 1057 static struct { 1058 struct ib_flow_attr flow_attr; 1059 struct ib_flow_spec_eth eth_flow; 1060 } leftovers_specs[] = { 1061 [LEFTOVERS_MC] = { 1062 .flow_attr = { 1063 .num_of_specs = 1, 1064 .size = sizeof(leftovers_specs[0]) 1065 }, 1066 .eth_flow = { 1067 .type = IB_FLOW_SPEC_ETH, 1068 .size = sizeof(struct ib_flow_spec_eth), 1069 .mask = {.dst_mac = {0x1} }, 1070 .val = {.dst_mac = {0x1} } 1071 } 1072 }, 1073 [LEFTOVERS_UC] = { 1074 .flow_attr = { 1075 .num_of_specs = 1, 1076 .size = sizeof(leftovers_specs[0]) 1077 }, 1078 .eth_flow = { 1079 .type = IB_FLOW_SPEC_ETH, 1080 .size = sizeof(struct ib_flow_spec_eth), 1081 .mask = {.dst_mac = {0x1} }, 1082 .val = {.dst_mac = {} } 1083 } 1084 } 1085 }; 1086 1087 handler = create_flow_rule(dev, ft_prio, 1088 &leftovers_specs[LEFTOVERS_MC].flow_attr, 1089 dst); 1090 if (!IS_ERR(handler) && 1091 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) { 1092 handler_ucast = create_flow_rule(dev, ft_prio, 1093 &leftovers_specs[LEFTOVERS_UC].flow_attr, 1094 dst); 1095 if (IS_ERR(handler_ucast)) { 1096 mlx5_del_flow_rules(handler->rule); 1097 ft_prio->refcount--; 1098 kfree(handler); 1099 handler = handler_ucast; 1100 } else { 1101 list_add(&handler_ucast->list, &handler->list); 1102 } 1103 } 1104 1105 return handler; 1106 } 1107 1108 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, 1109 struct mlx5_ib_flow_prio *ft_rx, 1110 struct mlx5_ib_flow_prio *ft_tx, 1111 struct mlx5_flow_destination *dst) 1112 { 1113 struct mlx5_ib_flow_handler *handler_rx; 1114 struct mlx5_ib_flow_handler *handler_tx; 1115 int err; 1116 static const struct ib_flow_attr flow_attr = { 1117 .num_of_specs = 0, 1118 .type = IB_FLOW_ATTR_SNIFFER, 1119 .size = sizeof(flow_attr) 1120 }; 1121 1122 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst); 1123 if (IS_ERR(handler_rx)) { 1124 err = PTR_ERR(handler_rx); 1125 goto err; 1126 } 1127 1128 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst); 1129 if (IS_ERR(handler_tx)) { 1130 err = PTR_ERR(handler_tx); 1131 goto err_tx; 1132 } 1133 1134 list_add(&handler_tx->list, &handler_rx->list); 1135 1136 return handler_rx; 1137 1138 err_tx: 1139 mlx5_del_flow_rules(handler_rx->rule); 1140 ft_rx->refcount--; 1141 kfree(handler_rx); 1142 err: 1143 return ERR_PTR(err); 1144 } 1145 1146 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, 1147 struct ib_flow_attr *flow_attr, 1148 struct ib_udata *udata) 1149 { 1150 struct mlx5_ib_dev *dev = to_mdev(qp->device); 1151 struct mlx5_ib_qp *mqp = to_mqp(qp); 1152 struct mlx5_ib_flow_handler *handler = NULL; 1153 struct mlx5_flow_destination *dst = NULL; 1154 struct mlx5_ib_flow_prio *ft_prio_tx = NULL; 1155 struct mlx5_ib_flow_prio *ft_prio; 1156 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 1157 struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr; 1158 size_t min_ucmd_sz, required_ucmd_sz; 1159 int err; 1160 int underlay_qpn; 1161 1162 if (udata && udata->inlen) { 1163 min_ucmd_sz = offsetofend(struct mlx5_ib_create_flow, reserved); 1164 if (udata->inlen < min_ucmd_sz) 1165 return ERR_PTR(-EOPNOTSUPP); 1166 1167 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz); 1168 if (err) 1169 return ERR_PTR(err); 1170 1171 /* currently supports only one counters data */ 1172 if (ucmd_hdr.ncounters_data > 1) 1173 return ERR_PTR(-EINVAL); 1174 1175 required_ucmd_sz = min_ucmd_sz + 1176 sizeof(struct mlx5_ib_flow_counters_data) * 1177 ucmd_hdr.ncounters_data; 1178 if (udata->inlen > required_ucmd_sz && 1179 !ib_is_udata_cleared(udata, required_ucmd_sz, 1180 udata->inlen - required_ucmd_sz)) 1181 return ERR_PTR(-EOPNOTSUPP); 1182 1183 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL); 1184 if (!ucmd) 1185 return ERR_PTR(-ENOMEM); 1186 1187 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz); 1188 if (err) 1189 goto free_ucmd; 1190 } 1191 1192 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) { 1193 err = -ENOMEM; 1194 goto free_ucmd; 1195 } 1196 1197 if (flow_attr->flags & 1198 ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) { 1199 err = -EINVAL; 1200 goto free_ucmd; 1201 } 1202 1203 if (is_egress && 1204 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 1205 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 1206 err = -EINVAL; 1207 goto free_ucmd; 1208 } 1209 1210 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 1211 if (!dst) { 1212 err = -ENOMEM; 1213 goto free_ucmd; 1214 } 1215 1216 mutex_lock(&dev->flow_db->lock); 1217 1218 ft_prio = get_flow_table(dev, flow_attr, 1219 is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX); 1220 if (IS_ERR(ft_prio)) { 1221 err = PTR_ERR(ft_prio); 1222 goto unlock; 1223 } 1224 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 1225 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX); 1226 if (IS_ERR(ft_prio_tx)) { 1227 err = PTR_ERR(ft_prio_tx); 1228 ft_prio_tx = NULL; 1229 goto destroy_ft; 1230 } 1231 } 1232 1233 if (is_egress) { 1234 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT; 1235 } else { 1236 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1237 if (mqp->is_rss) 1238 dst->tir_num = mqp->rss_qp.tirn; 1239 else 1240 dst->tir_num = mqp->raw_packet_qp.rq.tirn; 1241 } 1242 1243 switch (flow_attr->type) { 1244 case IB_FLOW_ATTR_NORMAL: 1245 underlay_qpn = (mqp->flags & IB_QP_CREATE_SOURCE_QPN) ? 1246 mqp->underlay_qpn : 1247 0; 1248 handler = _create_flow_rule(dev, ft_prio, flow_attr, dst, 1249 underlay_qpn, ucmd); 1250 break; 1251 case IB_FLOW_ATTR_ALL_DEFAULT: 1252 case IB_FLOW_ATTR_MC_DEFAULT: 1253 handler = create_leftovers_rule(dev, ft_prio, flow_attr, dst); 1254 break; 1255 case IB_FLOW_ATTR_SNIFFER: 1256 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst); 1257 break; 1258 default: 1259 err = -EINVAL; 1260 goto destroy_ft; 1261 } 1262 1263 if (IS_ERR(handler)) { 1264 err = PTR_ERR(handler); 1265 handler = NULL; 1266 goto destroy_ft; 1267 } 1268 1269 mutex_unlock(&dev->flow_db->lock); 1270 kfree(dst); 1271 kfree(ucmd); 1272 1273 return &handler->ibflow; 1274 1275 destroy_ft: 1276 put_flow_table(dev, ft_prio, false); 1277 if (ft_prio_tx) 1278 put_flow_table(dev, ft_prio_tx, false); 1279 unlock: 1280 mutex_unlock(&dev->flow_db->lock); 1281 kfree(dst); 1282 free_ucmd: 1283 kfree(ucmd); 1284 return ERR_PTR(err); 1285 } 1286 1287 static struct mlx5_ib_flow_prio * 1288 _get_flow_table(struct mlx5_ib_dev *dev, 1289 struct mlx5_ib_flow_matcher *fs_matcher, 1290 bool mcast) 1291 { 1292 struct mlx5_flow_namespace *ns = NULL; 1293 struct mlx5_ib_flow_prio *prio = NULL; 1294 int max_table_size = 0; 1295 bool esw_encap; 1296 u32 flags = 0; 1297 int priority; 1298 1299 if (mcast) 1300 priority = MLX5_IB_FLOW_MCAST_PRIO; 1301 else 1302 priority = ib_prio_to_core_prio(fs_matcher->priority, false); 1303 1304 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != 1305 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 1306 switch (fs_matcher->ns_type) { 1307 case MLX5_FLOW_NAMESPACE_BYPASS: 1308 max_table_size = BIT( 1309 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, log_max_ft_size)); 1310 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap) 1311 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 1312 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 1313 reformat_l3_tunnel_to_l2) && 1314 !esw_encap) 1315 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 1316 break; 1317 case MLX5_FLOW_NAMESPACE_EGRESS: 1318 max_table_size = BIT( 1319 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size)); 1320 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && 1321 !esw_encap) 1322 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 1323 break; 1324 case MLX5_FLOW_NAMESPACE_FDB: 1325 max_table_size = BIT( 1326 MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size)); 1327 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap) 1328 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 1329 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, 1330 reformat_l3_tunnel_to_l2) && 1331 esw_encap) 1332 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 1333 priority = FDB_BYPASS_PATH; 1334 break; 1335 case MLX5_FLOW_NAMESPACE_RDMA_RX: 1336 max_table_size = BIT( 1337 MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, log_max_ft_size)); 1338 priority = fs_matcher->priority; 1339 break; 1340 case MLX5_FLOW_NAMESPACE_RDMA_TX: 1341 max_table_size = BIT( 1342 MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, log_max_ft_size)); 1343 priority = fs_matcher->priority; 1344 break; 1345 default: 1346 break; 1347 } 1348 1349 max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES); 1350 1351 ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type); 1352 if (!ns) 1353 return ERR_PTR(-EOPNOTSUPP); 1354 1355 switch (fs_matcher->ns_type) { 1356 case MLX5_FLOW_NAMESPACE_BYPASS: 1357 prio = &dev->flow_db->prios[priority]; 1358 break; 1359 case MLX5_FLOW_NAMESPACE_EGRESS: 1360 prio = &dev->flow_db->egress_prios[priority]; 1361 break; 1362 case MLX5_FLOW_NAMESPACE_FDB: 1363 prio = &dev->flow_db->fdb; 1364 break; 1365 case MLX5_FLOW_NAMESPACE_RDMA_RX: 1366 prio = &dev->flow_db->rdma_rx[priority]; 1367 break; 1368 case MLX5_FLOW_NAMESPACE_RDMA_TX: 1369 prio = &dev->flow_db->rdma_tx[priority]; 1370 break; 1371 default: return ERR_PTR(-EINVAL); 1372 } 1373 1374 if (!prio) 1375 return ERR_PTR(-EINVAL); 1376 1377 if (prio->flow_table) 1378 return prio; 1379 1380 return _get_prio(ns, prio, priority, max_table_size, 1381 MLX5_FS_MAX_TYPES, flags); 1382 } 1383 1384 static struct mlx5_ib_flow_handler * 1385 _create_raw_flow_rule(struct mlx5_ib_dev *dev, 1386 struct mlx5_ib_flow_prio *ft_prio, 1387 struct mlx5_flow_destination *dst, 1388 struct mlx5_ib_flow_matcher *fs_matcher, 1389 struct mlx5_flow_context *flow_context, 1390 struct mlx5_flow_act *flow_act, 1391 void *cmd_in, int inlen, 1392 int dst_num) 1393 { 1394 struct mlx5_ib_flow_handler *handler; 1395 struct mlx5_flow_spec *spec; 1396 struct mlx5_flow_table *ft = ft_prio->flow_table; 1397 int err = 0; 1398 1399 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1400 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 1401 if (!handler || !spec) { 1402 err = -ENOMEM; 1403 goto free; 1404 } 1405 1406 INIT_LIST_HEAD(&handler->list); 1407 1408 memcpy(spec->match_value, cmd_in, inlen); 1409 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params, 1410 fs_matcher->mask_len); 1411 spec->match_criteria_enable = fs_matcher->match_criteria_enable; 1412 spec->flow_context = *flow_context; 1413 1414 handler->rule = mlx5_add_flow_rules(ft, spec, 1415 flow_act, dst, dst_num); 1416 1417 if (IS_ERR(handler->rule)) { 1418 err = PTR_ERR(handler->rule); 1419 goto free; 1420 } 1421 1422 ft_prio->refcount++; 1423 handler->prio = ft_prio; 1424 handler->dev = dev; 1425 ft_prio->flow_table = ft; 1426 1427 free: 1428 if (err) 1429 kfree(handler); 1430 kvfree(spec); 1431 return err ? ERR_PTR(err) : handler; 1432 } 1433 1434 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher, 1435 void *match_v) 1436 { 1437 void *match_c; 1438 void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4; 1439 void *dmac, *dmac_mask; 1440 void *ipv4, *ipv4_mask; 1441 1442 if (!(fs_matcher->match_criteria_enable & 1443 (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT))) 1444 return false; 1445 1446 match_c = fs_matcher->matcher_mask.match_params; 1447 match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v, 1448 outer_headers); 1449 match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c, 1450 outer_headers); 1451 1452 dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 1453 dmac_47_16); 1454 dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 1455 dmac_47_16); 1456 1457 if (is_multicast_ether_addr(dmac) && 1458 is_multicast_ether_addr(dmac_mask)) 1459 return true; 1460 1461 ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 1462 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 1463 1464 ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 1465 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 1466 1467 if (ipv4_is_multicast(*(__be32 *)(ipv4)) && 1468 ipv4_is_multicast(*(__be32 *)(ipv4_mask))) 1469 return true; 1470 1471 return false; 1472 } 1473 1474 static struct mlx5_ib_flow_handler *raw_fs_rule_add( 1475 struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher, 1476 struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act, 1477 u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type) 1478 { 1479 struct mlx5_flow_destination *dst; 1480 struct mlx5_ib_flow_prio *ft_prio; 1481 struct mlx5_ib_flow_handler *handler; 1482 int dst_num = 0; 1483 bool mcast; 1484 int err; 1485 1486 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL) 1487 return ERR_PTR(-EOPNOTSUPP); 1488 1489 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO) 1490 return ERR_PTR(-ENOMEM); 1491 1492 dst = kcalloc(2, sizeof(*dst), GFP_KERNEL); 1493 if (!dst) 1494 return ERR_PTR(-ENOMEM); 1495 1496 mcast = raw_fs_is_multicast(fs_matcher, cmd_in); 1497 mutex_lock(&dev->flow_db->lock); 1498 1499 ft_prio = _get_flow_table(dev, fs_matcher, mcast); 1500 if (IS_ERR(ft_prio)) { 1501 err = PTR_ERR(ft_prio); 1502 goto unlock; 1503 } 1504 1505 switch (dest_type) { 1506 case MLX5_FLOW_DESTINATION_TYPE_TIR: 1507 dst[dst_num].type = dest_type; 1508 dst[dst_num++].tir_num = dest_id; 1509 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1510 break; 1511 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: 1512 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM; 1513 dst[dst_num++].ft_num = dest_id; 1514 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1515 break; 1516 case MLX5_FLOW_DESTINATION_TYPE_PORT: 1517 dst[dst_num++].type = MLX5_FLOW_DESTINATION_TYPE_PORT; 1518 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 1519 break; 1520 default: 1521 break; 1522 } 1523 1524 if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1525 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1526 dst[dst_num].counter_id = counter_id; 1527 dst_num++; 1528 } 1529 1530 handler = _create_raw_flow_rule(dev, ft_prio, dst_num ? dst : NULL, 1531 fs_matcher, flow_context, flow_act, 1532 cmd_in, inlen, dst_num); 1533 1534 if (IS_ERR(handler)) { 1535 err = PTR_ERR(handler); 1536 goto destroy_ft; 1537 } 1538 1539 mutex_unlock(&dev->flow_db->lock); 1540 atomic_inc(&fs_matcher->usecnt); 1541 handler->flow_matcher = fs_matcher; 1542 1543 kfree(dst); 1544 1545 return handler; 1546 1547 destroy_ft: 1548 put_flow_table(dev, ft_prio, false); 1549 unlock: 1550 mutex_unlock(&dev->flow_db->lock); 1551 kfree(dst); 1552 1553 return ERR_PTR(err); 1554 } 1555 1556 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags) 1557 { 1558 u32 flags = 0; 1559 1560 if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA) 1561 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA; 1562 1563 return flags; 1564 } 1565 1566 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED \ 1567 MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA 1568 static struct ib_flow_action * 1569 mlx5_ib_create_flow_action_esp(struct ib_device *device, 1570 const struct ib_flow_action_attrs_esp *attr, 1571 struct uverbs_attr_bundle *attrs) 1572 { 1573 struct mlx5_ib_dev *mdev = to_mdev(device); 1574 struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm; 1575 struct mlx5_accel_esp_xfrm_attrs accel_attrs = {}; 1576 struct mlx5_ib_flow_action *action; 1577 u64 action_flags; 1578 u64 flags; 1579 int err = 0; 1580 1581 err = uverbs_get_flags64( 1582 &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 1583 ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1)); 1584 if (err) 1585 return ERR_PTR(err); 1586 1587 flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags); 1588 1589 /* We current only support a subset of the standard features. Only a 1590 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn 1591 * (with overlap). Full offload mode isn't supported. 1592 */ 1593 if (!attr->keymat || attr->replay || attr->encap || 1594 attr->spi || attr->seq || attr->tfc_pad || 1595 attr->hard_limit_pkts || 1596 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 1597 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT))) 1598 return ERR_PTR(-EOPNOTSUPP); 1599 1600 if (attr->keymat->protocol != 1601 IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM) 1602 return ERR_PTR(-EOPNOTSUPP); 1603 1604 aes_gcm = &attr->keymat->keymat.aes_gcm; 1605 1606 if (aes_gcm->icv_len != 16 || 1607 aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ) 1608 return ERR_PTR(-EOPNOTSUPP); 1609 1610 action = kmalloc(sizeof(*action), GFP_KERNEL); 1611 if (!action) 1612 return ERR_PTR(-ENOMEM); 1613 1614 action->esp_aes_gcm.ib_flags = attr->flags; 1615 memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key, 1616 sizeof(accel_attrs.keymat.aes_gcm.aes_key)); 1617 accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8; 1618 memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt, 1619 sizeof(accel_attrs.keymat.aes_gcm.salt)); 1620 memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv, 1621 sizeof(accel_attrs.keymat.aes_gcm.seq_iv)); 1622 accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8; 1623 accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ; 1624 accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM; 1625 1626 accel_attrs.esn = attr->esn; 1627 if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) 1628 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; 1629 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 1630 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 1631 1632 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT) 1633 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT; 1634 1635 action->esp_aes_gcm.ctx = 1636 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags); 1637 if (IS_ERR(action->esp_aes_gcm.ctx)) { 1638 err = PTR_ERR(action->esp_aes_gcm.ctx); 1639 goto err_parse; 1640 } 1641 1642 action->esp_aes_gcm.ib_flags = attr->flags; 1643 1644 return &action->ib_action; 1645 1646 err_parse: 1647 kfree(action); 1648 return ERR_PTR(err); 1649 } 1650 1651 static int 1652 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action, 1653 const struct ib_flow_action_attrs_esp *attr, 1654 struct uverbs_attr_bundle *attrs) 1655 { 1656 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 1657 struct mlx5_accel_esp_xfrm_attrs accel_attrs; 1658 int err = 0; 1659 1660 if (attr->keymat || attr->replay || attr->encap || 1661 attr->spi || attr->seq || attr->tfc_pad || 1662 attr->hard_limit_pkts || 1663 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 1664 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS | 1665 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))) 1666 return -EOPNOTSUPP; 1667 1668 /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can 1669 * be modified. 1670 */ 1671 if (!(maction->esp_aes_gcm.ib_flags & 1672 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) && 1673 attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 1674 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)) 1675 return -EINVAL; 1676 1677 memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs, 1678 sizeof(accel_attrs)); 1679 1680 accel_attrs.esn = attr->esn; 1681 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 1682 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 1683 else 1684 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 1685 1686 err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx, 1687 &accel_attrs); 1688 if (err) 1689 return err; 1690 1691 maction->esp_aes_gcm.ib_flags &= 1692 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 1693 maction->esp_aes_gcm.ib_flags |= 1694 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 1695 1696 return 0; 1697 } 1698 1699 static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction) 1700 { 1701 switch (maction->flow_action_raw.sub_type) { 1702 case MLX5_IB_FLOW_ACTION_MODIFY_HEADER: 1703 mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev, 1704 maction->flow_action_raw.modify_hdr); 1705 break; 1706 case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT: 1707 mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev, 1708 maction->flow_action_raw.pkt_reformat); 1709 break; 1710 case MLX5_IB_FLOW_ACTION_DECAP: 1711 break; 1712 default: 1713 break; 1714 } 1715 } 1716 1717 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action) 1718 { 1719 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 1720 1721 switch (action->type) { 1722 case IB_FLOW_ACTION_ESP: 1723 /* 1724 * We only support aes_gcm by now, so we implicitly know this is 1725 * the underline crypto. 1726 */ 1727 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx); 1728 break; 1729 case IB_FLOW_ACTION_UNSPECIFIED: 1730 destroy_flow_action_raw(maction); 1731 break; 1732 default: 1733 WARN_ON(true); 1734 break; 1735 } 1736 1737 kfree(maction); 1738 return 0; 1739 } 1740 1741 static int 1742 mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type, 1743 enum mlx5_flow_namespace_type *namespace) 1744 { 1745 switch (table_type) { 1746 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX: 1747 *namespace = MLX5_FLOW_NAMESPACE_BYPASS; 1748 break; 1749 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX: 1750 *namespace = MLX5_FLOW_NAMESPACE_EGRESS; 1751 break; 1752 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB: 1753 *namespace = MLX5_FLOW_NAMESPACE_FDB; 1754 break; 1755 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX: 1756 *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX; 1757 break; 1758 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX: 1759 *namespace = MLX5_FLOW_NAMESPACE_RDMA_TX; 1760 break; 1761 default: 1762 return -EINVAL; 1763 } 1764 1765 return 0; 1766 } 1767 1768 static const struct uverbs_attr_spec mlx5_ib_flow_type[] = { 1769 [MLX5_IB_FLOW_TYPE_NORMAL] = { 1770 .type = UVERBS_ATTR_TYPE_PTR_IN, 1771 .u.ptr = { 1772 .len = sizeof(u16), /* data is priority */ 1773 .min_len = sizeof(u16), 1774 } 1775 }, 1776 [MLX5_IB_FLOW_TYPE_SNIFFER] = { 1777 .type = UVERBS_ATTR_TYPE_PTR_IN, 1778 UVERBS_ATTR_NO_DATA(), 1779 }, 1780 [MLX5_IB_FLOW_TYPE_ALL_DEFAULT] = { 1781 .type = UVERBS_ATTR_TYPE_PTR_IN, 1782 UVERBS_ATTR_NO_DATA(), 1783 }, 1784 [MLX5_IB_FLOW_TYPE_MC_DEFAULT] = { 1785 .type = UVERBS_ATTR_TYPE_PTR_IN, 1786 UVERBS_ATTR_NO_DATA(), 1787 }, 1788 }; 1789 1790 static bool is_flow_dest(void *obj, int *dest_id, int *dest_type) 1791 { 1792 struct devx_obj *devx_obj = obj; 1793 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode); 1794 1795 switch (opcode) { 1796 case MLX5_CMD_OP_DESTROY_TIR: 1797 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1798 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, 1799 obj_id); 1800 return true; 1801 1802 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 1803 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1804 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox, 1805 table_id); 1806 return true; 1807 default: 1808 return false; 1809 } 1810 } 1811 1812 static int get_dests(struct uverbs_attr_bundle *attrs, 1813 struct mlx5_ib_flow_matcher *fs_matcher, int *dest_id, 1814 int *dest_type, struct ib_qp **qp, u32 *flags) 1815 { 1816 bool dest_devx, dest_qp; 1817 void *devx_obj; 1818 int err; 1819 1820 dest_devx = uverbs_attr_is_valid(attrs, 1821 MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX); 1822 dest_qp = uverbs_attr_is_valid(attrs, 1823 MLX5_IB_ATTR_CREATE_FLOW_DEST_QP); 1824 1825 *flags = 0; 1826 err = uverbs_get_flags32(flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_FLAGS, 1827 MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS | 1828 MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP); 1829 if (err) 1830 return err; 1831 1832 /* Both flags are not allowed */ 1833 if (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS && 1834 *flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP) 1835 return -EINVAL; 1836 1837 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) { 1838 if (dest_devx && (dest_qp || *flags)) 1839 return -EINVAL; 1840 else if (dest_qp && *flags) 1841 return -EINVAL; 1842 } 1843 1844 /* Allow only DEVX object, drop as dest for FDB */ 1845 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !(dest_devx || 1846 (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP))) 1847 return -EINVAL; 1848 1849 /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */ 1850 if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) && 1851 ((!dest_devx && !dest_qp) || (dest_devx && dest_qp))) 1852 return -EINVAL; 1853 1854 *qp = NULL; 1855 if (dest_devx) { 1856 devx_obj = 1857 uverbs_attr_get_obj(attrs, 1858 MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX); 1859 1860 /* Verify that the given DEVX object is a flow 1861 * steering destination. 1862 */ 1863 if (!is_flow_dest(devx_obj, dest_id, dest_type)) 1864 return -EINVAL; 1865 /* Allow only flow table as dest when inserting to FDB or RDMA_RX */ 1866 if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB || 1867 fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) && 1868 *dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) 1869 return -EINVAL; 1870 } else if (dest_qp) { 1871 struct mlx5_ib_qp *mqp; 1872 1873 *qp = uverbs_attr_get_obj(attrs, 1874 MLX5_IB_ATTR_CREATE_FLOW_DEST_QP); 1875 if (IS_ERR(*qp)) 1876 return PTR_ERR(*qp); 1877 1878 if ((*qp)->qp_type != IB_QPT_RAW_PACKET) 1879 return -EINVAL; 1880 1881 mqp = to_mqp(*qp); 1882 if (mqp->is_rss) 1883 *dest_id = mqp->rss_qp.tirn; 1884 else 1885 *dest_id = mqp->raw_packet_qp.rq.tirn; 1886 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1887 } else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS || 1888 fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) && 1889 !(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) { 1890 *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT; 1891 } 1892 1893 if (*dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR && 1894 (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS || 1895 fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX)) 1896 return -EINVAL; 1897 1898 return 0; 1899 } 1900 1901 static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id) 1902 { 1903 struct devx_obj *devx_obj = obj; 1904 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode); 1905 1906 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) { 1907 1908 if (offset && offset >= devx_obj->flow_counter_bulk_size) 1909 return false; 1910 1911 *counter_id = MLX5_GET(dealloc_flow_counter_in, 1912 devx_obj->dinbox, 1913 flow_counter_id); 1914 *counter_id += offset; 1915 return true; 1916 } 1917 1918 return false; 1919 } 1920 1921 #define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2 1922 static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( 1923 struct uverbs_attr_bundle *attrs) 1924 { 1925 struct mlx5_flow_context flow_context = {.flow_tag = 1926 MLX5_FS_DEFAULT_FLOW_TAG}; 1927 u32 *offset_attr, offset = 0, counter_id = 0; 1928 int dest_id, dest_type = -1, inlen, len, ret, i; 1929 struct mlx5_ib_flow_handler *flow_handler; 1930 struct mlx5_ib_flow_matcher *fs_matcher; 1931 struct ib_uobject **arr_flow_actions; 1932 struct ib_uflow_resources *uflow_res; 1933 struct mlx5_flow_act flow_act = {}; 1934 struct ib_qp *qp = NULL; 1935 void *devx_obj, *cmd_in; 1936 struct ib_uobject *uobj; 1937 struct mlx5_ib_dev *dev; 1938 u32 flags; 1939 1940 if (!capable(CAP_NET_RAW)) 1941 return -EPERM; 1942 1943 fs_matcher = uverbs_attr_get_obj(attrs, 1944 MLX5_IB_ATTR_CREATE_FLOW_MATCHER); 1945 uobj = uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE); 1946 dev = mlx5_udata_to_mdev(&attrs->driver_udata); 1947 1948 if (get_dests(attrs, fs_matcher, &dest_id, &dest_type, &qp, &flags)) 1949 return -EINVAL; 1950 1951 if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS) 1952 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS; 1953 1954 if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP) 1955 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 1956 1957 len = uverbs_attr_get_uobjs_arr(attrs, 1958 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions); 1959 if (len) { 1960 devx_obj = arr_flow_actions[0]->object; 1961 1962 if (uverbs_attr_is_valid(attrs, 1963 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET)) { 1964 1965 int num_offsets = uverbs_attr_ptr_get_array_size( 1966 attrs, 1967 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET, 1968 sizeof(u32)); 1969 1970 if (num_offsets != 1) 1971 return -EINVAL; 1972 1973 offset_attr = uverbs_attr_get_alloced_ptr( 1974 attrs, 1975 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET); 1976 offset = *offset_attr; 1977 } 1978 1979 if (!is_flow_counter(devx_obj, offset, &counter_id)) 1980 return -EINVAL; 1981 1982 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 1983 } 1984 1985 cmd_in = uverbs_attr_get_alloced_ptr( 1986 attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE); 1987 inlen = uverbs_attr_get_len(attrs, 1988 MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE); 1989 1990 uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS); 1991 if (!uflow_res) 1992 return -ENOMEM; 1993 1994 len = uverbs_attr_get_uobjs_arr(attrs, 1995 MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions); 1996 for (i = 0; i < len; i++) { 1997 struct mlx5_ib_flow_action *maction = 1998 to_mflow_act(arr_flow_actions[i]->object); 1999 2000 ret = parse_flow_flow_action(maction, false, &flow_act); 2001 if (ret) 2002 goto err_out; 2003 flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_HANDLE, 2004 arr_flow_actions[i]->object); 2005 } 2006 2007 ret = uverbs_copy_from(&flow_context.flow_tag, attrs, 2008 MLX5_IB_ATTR_CREATE_FLOW_TAG); 2009 if (!ret) { 2010 if (flow_context.flow_tag >= BIT(24)) { 2011 ret = -EINVAL; 2012 goto err_out; 2013 } 2014 flow_context.flags |= FLOW_CONTEXT_HAS_TAG; 2015 } 2016 2017 flow_handler = 2018 raw_fs_rule_add(dev, fs_matcher, &flow_context, &flow_act, 2019 counter_id, cmd_in, inlen, dest_id, dest_type); 2020 if (IS_ERR(flow_handler)) { 2021 ret = PTR_ERR(flow_handler); 2022 goto err_out; 2023 } 2024 2025 ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev, uflow_res); 2026 2027 return 0; 2028 err_out: 2029 ib_uverbs_flow_resources_free(uflow_res); 2030 return ret; 2031 } 2032 2033 static int flow_matcher_cleanup(struct ib_uobject *uobject, 2034 enum rdma_remove_reason why, 2035 struct uverbs_attr_bundle *attrs) 2036 { 2037 struct mlx5_ib_flow_matcher *obj = uobject->object; 2038 2039 if (atomic_read(&obj->usecnt)) 2040 return -EBUSY; 2041 2042 kfree(obj); 2043 return 0; 2044 } 2045 2046 static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs, 2047 struct mlx5_ib_flow_matcher *obj) 2048 { 2049 enum mlx5_ib_uapi_flow_table_type ft_type = 2050 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX; 2051 u32 flags; 2052 int err; 2053 2054 /* New users should use MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE and older 2055 * users should switch to it. We leave this to not break userspace 2056 */ 2057 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE) && 2058 uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS)) 2059 return -EINVAL; 2060 2061 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE)) { 2062 err = uverbs_get_const(&ft_type, attrs, 2063 MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE); 2064 if (err) 2065 return err; 2066 2067 err = mlx5_ib_ft_type_to_namespace(ft_type, &obj->ns_type); 2068 if (err) 2069 return err; 2070 2071 return 0; 2072 } 2073 2074 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS)) { 2075 err = uverbs_get_flags32(&flags, attrs, 2076 MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS, 2077 IB_FLOW_ATTR_FLAGS_EGRESS); 2078 if (err) 2079 return err; 2080 2081 if (flags) { 2082 mlx5_ib_ft_type_to_namespace( 2083 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX, 2084 &obj->ns_type); 2085 return 0; 2086 } 2087 } 2088 2089 obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS; 2090 2091 return 0; 2092 } 2093 2094 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)( 2095 struct uverbs_attr_bundle *attrs) 2096 { 2097 struct ib_uobject *uobj = uverbs_attr_get_uobject( 2098 attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE); 2099 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata); 2100 struct mlx5_ib_flow_matcher *obj; 2101 int err; 2102 2103 obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL); 2104 if (!obj) 2105 return -ENOMEM; 2106 2107 obj->mask_len = uverbs_attr_get_len( 2108 attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK); 2109 err = uverbs_copy_from(&obj->matcher_mask, 2110 attrs, 2111 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK); 2112 if (err) 2113 goto end; 2114 2115 obj->flow_type = uverbs_attr_get_enum_id( 2116 attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE); 2117 2118 if (obj->flow_type == MLX5_IB_FLOW_TYPE_NORMAL) { 2119 err = uverbs_copy_from(&obj->priority, 2120 attrs, 2121 MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE); 2122 if (err) 2123 goto end; 2124 } 2125 2126 err = uverbs_copy_from(&obj->match_criteria_enable, 2127 attrs, 2128 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA); 2129 if (err) 2130 goto end; 2131 2132 err = mlx5_ib_matcher_ns(attrs, obj); 2133 if (err) 2134 goto end; 2135 2136 if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB && 2137 mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) { 2138 err = -EINVAL; 2139 goto end; 2140 } 2141 2142 uobj->object = obj; 2143 obj->mdev = dev->mdev; 2144 atomic_set(&obj->usecnt, 0); 2145 return 0; 2146 2147 end: 2148 kfree(obj); 2149 return err; 2150 } 2151 2152 static struct ib_flow_action * 2153 mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev, 2154 enum mlx5_ib_uapi_flow_table_type ft_type, 2155 u8 num_actions, void *in) 2156 { 2157 enum mlx5_flow_namespace_type namespace; 2158 struct mlx5_ib_flow_action *maction; 2159 int ret; 2160 2161 ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace); 2162 if (ret) 2163 return ERR_PTR(-EINVAL); 2164 2165 maction = kzalloc(sizeof(*maction), GFP_KERNEL); 2166 if (!maction) 2167 return ERR_PTR(-ENOMEM); 2168 2169 maction->flow_action_raw.modify_hdr = 2170 mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in); 2171 2172 if (IS_ERR(maction->flow_action_raw.modify_hdr)) { 2173 ret = PTR_ERR(maction->flow_action_raw.modify_hdr); 2174 kfree(maction); 2175 return ERR_PTR(ret); 2176 } 2177 maction->flow_action_raw.sub_type = 2178 MLX5_IB_FLOW_ACTION_MODIFY_HEADER; 2179 maction->flow_action_raw.dev = dev; 2180 2181 return &maction->ib_action; 2182 } 2183 2184 static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev) 2185 { 2186 return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 2187 max_modify_header_actions) || 2188 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, 2189 max_modify_header_actions) || 2190 MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, 2191 max_modify_header_actions); 2192 } 2193 2194 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)( 2195 struct uverbs_attr_bundle *attrs) 2196 { 2197 struct ib_uobject *uobj = uverbs_attr_get_uobject( 2198 attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE); 2199 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata); 2200 enum mlx5_ib_uapi_flow_table_type ft_type; 2201 struct ib_flow_action *action; 2202 int num_actions; 2203 void *in; 2204 int ret; 2205 2206 if (!mlx5_ib_modify_header_supported(mdev)) 2207 return -EOPNOTSUPP; 2208 2209 in = uverbs_attr_get_alloced_ptr(attrs, 2210 MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM); 2211 2212 num_actions = uverbs_attr_ptr_get_array_size( 2213 attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM, 2214 MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)); 2215 if (num_actions < 0) 2216 return num_actions; 2217 2218 ret = uverbs_get_const(&ft_type, attrs, 2219 MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE); 2220 if (ret) 2221 return ret; 2222 action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in); 2223 if (IS_ERR(action)) 2224 return PTR_ERR(action); 2225 2226 uverbs_flow_action_fill_action(action, uobj, &mdev->ib_dev, 2227 IB_FLOW_ACTION_UNSPECIFIED); 2228 2229 return 0; 2230 } 2231 2232 static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev *ibdev, 2233 u8 packet_reformat_type, 2234 u8 ft_type) 2235 { 2236 switch (packet_reformat_type) { 2237 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL: 2238 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX) 2239 return MLX5_CAP_FLOWTABLE(ibdev->mdev, 2240 encap_general_header); 2241 break; 2242 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL: 2243 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX) 2244 return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev, 2245 reformat_l2_to_l3_tunnel); 2246 break; 2247 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2: 2248 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX) 2249 return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, 2250 reformat_l3_tunnel_to_l2); 2251 break; 2252 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2: 2253 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX) 2254 return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap); 2255 break; 2256 default: 2257 break; 2258 } 2259 2260 return false; 2261 } 2262 2263 static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt, u8 *prm_prt) 2264 { 2265 switch (dv_prt) { 2266 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL: 2267 *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL; 2268 break; 2269 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2: 2270 *prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; 2271 break; 2272 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL: 2273 *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL; 2274 break; 2275 default: 2276 return -EINVAL; 2277 } 2278 2279 return 0; 2280 } 2281 2282 static int mlx5_ib_flow_action_create_packet_reformat_ctx( 2283 struct mlx5_ib_dev *dev, 2284 struct mlx5_ib_flow_action *maction, 2285 u8 ft_type, u8 dv_prt, 2286 void *in, size_t len) 2287 { 2288 struct mlx5_pkt_reformat_params reformat_params; 2289 enum mlx5_flow_namespace_type namespace; 2290 u8 prm_prt; 2291 int ret; 2292 2293 ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace); 2294 if (ret) 2295 return ret; 2296 2297 ret = mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt, &prm_prt); 2298 if (ret) 2299 return ret; 2300 2301 memset(&reformat_params, 0, sizeof(reformat_params)); 2302 reformat_params.type = prm_prt; 2303 reformat_params.size = len; 2304 reformat_params.data = in; 2305 maction->flow_action_raw.pkt_reformat = 2306 mlx5_packet_reformat_alloc(dev->mdev, &reformat_params, 2307 namespace); 2308 if (IS_ERR(maction->flow_action_raw.pkt_reformat)) { 2309 ret = PTR_ERR(maction->flow_action_raw.pkt_reformat); 2310 return ret; 2311 } 2312 2313 maction->flow_action_raw.sub_type = 2314 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT; 2315 maction->flow_action_raw.dev = dev; 2316 2317 return 0; 2318 } 2319 2320 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)( 2321 struct uverbs_attr_bundle *attrs) 2322 { 2323 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, 2324 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE); 2325 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata); 2326 enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt; 2327 enum mlx5_ib_uapi_flow_table_type ft_type; 2328 struct mlx5_ib_flow_action *maction; 2329 int ret; 2330 2331 ret = uverbs_get_const(&ft_type, attrs, 2332 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE); 2333 if (ret) 2334 return ret; 2335 2336 ret = uverbs_get_const(&dv_prt, attrs, 2337 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE); 2338 if (ret) 2339 return ret; 2340 2341 if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type)) 2342 return -EOPNOTSUPP; 2343 2344 maction = kzalloc(sizeof(*maction), GFP_KERNEL); 2345 if (!maction) 2346 return -ENOMEM; 2347 2348 if (dv_prt == 2349 MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2) { 2350 maction->flow_action_raw.sub_type = 2351 MLX5_IB_FLOW_ACTION_DECAP; 2352 maction->flow_action_raw.dev = mdev; 2353 } else { 2354 void *in; 2355 int len; 2356 2357 in = uverbs_attr_get_alloced_ptr(attrs, 2358 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF); 2359 if (IS_ERR(in)) { 2360 ret = PTR_ERR(in); 2361 goto free_maction; 2362 } 2363 2364 len = uverbs_attr_get_len(attrs, 2365 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF); 2366 2367 ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev, 2368 maction, ft_type, dv_prt, in, len); 2369 if (ret) 2370 goto free_maction; 2371 } 2372 2373 uverbs_flow_action_fill_action(&maction->ib_action, uobj, &mdev->ib_dev, 2374 IB_FLOW_ACTION_UNSPECIFIED); 2375 return 0; 2376 2377 free_maction: 2378 kfree(maction); 2379 return ret; 2380 } 2381 2382 DECLARE_UVERBS_NAMED_METHOD( 2383 MLX5_IB_METHOD_CREATE_FLOW, 2384 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE, 2385 UVERBS_OBJECT_FLOW, 2386 UVERBS_ACCESS_NEW, 2387 UA_MANDATORY), 2388 UVERBS_ATTR_PTR_IN( 2389 MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE, 2390 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)), 2391 UA_MANDATORY, 2392 UA_ALLOC_AND_COPY), 2393 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER, 2394 MLX5_IB_OBJECT_FLOW_MATCHER, 2395 UVERBS_ACCESS_READ, 2396 UA_MANDATORY), 2397 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP, 2398 UVERBS_OBJECT_QP, 2399 UVERBS_ACCESS_READ), 2400 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX, 2401 MLX5_IB_OBJECT_DEVX_OBJ, 2402 UVERBS_ACCESS_READ), 2403 UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, 2404 UVERBS_OBJECT_FLOW_ACTION, 2405 UVERBS_ACCESS_READ, 1, 2406 MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS, 2407 UA_OPTIONAL), 2408 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_TAG, 2409 UVERBS_ATTR_TYPE(u32), 2410 UA_OPTIONAL), 2411 UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, 2412 MLX5_IB_OBJECT_DEVX_OBJ, 2413 UVERBS_ACCESS_READ, 1, 1, 2414 UA_OPTIONAL), 2415 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET, 2416 UVERBS_ATTR_MIN_SIZE(sizeof(u32)), 2417 UA_OPTIONAL, 2418 UA_ALLOC_AND_COPY), 2419 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_FLAGS, 2420 enum mlx5_ib_create_flow_flags, 2421 UA_OPTIONAL)); 2422 2423 DECLARE_UVERBS_NAMED_METHOD_DESTROY( 2424 MLX5_IB_METHOD_DESTROY_FLOW, 2425 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE, 2426 UVERBS_OBJECT_FLOW, 2427 UVERBS_ACCESS_DESTROY, 2428 UA_MANDATORY)); 2429 2430 ADD_UVERBS_METHODS(mlx5_ib_fs, 2431 UVERBS_OBJECT_FLOW, 2432 &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW), 2433 &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW)); 2434 2435 DECLARE_UVERBS_NAMED_METHOD( 2436 MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER, 2437 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE, 2438 UVERBS_OBJECT_FLOW_ACTION, 2439 UVERBS_ACCESS_NEW, 2440 UA_MANDATORY), 2441 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM, 2442 UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES( 2443 set_add_copy_action_in_auto)), 2444 UA_MANDATORY, 2445 UA_ALLOC_AND_COPY), 2446 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE, 2447 enum mlx5_ib_uapi_flow_table_type, 2448 UA_MANDATORY)); 2449 2450 DECLARE_UVERBS_NAMED_METHOD( 2451 MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT, 2452 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE, 2453 UVERBS_OBJECT_FLOW_ACTION, 2454 UVERBS_ACCESS_NEW, 2455 UA_MANDATORY), 2456 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF, 2457 UVERBS_ATTR_MIN_SIZE(1), 2458 UA_ALLOC_AND_COPY, 2459 UA_OPTIONAL), 2460 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE, 2461 enum mlx5_ib_uapi_flow_action_packet_reformat_type, 2462 UA_MANDATORY), 2463 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE, 2464 enum mlx5_ib_uapi_flow_table_type, 2465 UA_MANDATORY)); 2466 2467 ADD_UVERBS_METHODS( 2468 mlx5_ib_flow_actions, 2469 UVERBS_OBJECT_FLOW_ACTION, 2470 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER), 2471 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)); 2472 2473 DECLARE_UVERBS_NAMED_METHOD( 2474 MLX5_IB_METHOD_FLOW_MATCHER_CREATE, 2475 UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE, 2476 MLX5_IB_OBJECT_FLOW_MATCHER, 2477 UVERBS_ACCESS_NEW, 2478 UA_MANDATORY), 2479 UVERBS_ATTR_PTR_IN( 2480 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK, 2481 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)), 2482 UA_MANDATORY), 2483 UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE, 2484 mlx5_ib_flow_type, 2485 UA_MANDATORY), 2486 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA, 2487 UVERBS_ATTR_TYPE(u8), 2488 UA_MANDATORY), 2489 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS, 2490 enum ib_flow_flags, 2491 UA_OPTIONAL), 2492 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE, 2493 enum mlx5_ib_uapi_flow_table_type, 2494 UA_OPTIONAL)); 2495 2496 DECLARE_UVERBS_NAMED_METHOD_DESTROY( 2497 MLX5_IB_METHOD_FLOW_MATCHER_DESTROY, 2498 UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE, 2499 MLX5_IB_OBJECT_FLOW_MATCHER, 2500 UVERBS_ACCESS_DESTROY, 2501 UA_MANDATORY)); 2502 2503 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER, 2504 UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup), 2505 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE), 2506 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY)); 2507 2508 const struct uapi_definition mlx5_ib_flow_defs[] = { 2509 UAPI_DEF_CHAIN_OBJ_TREE_NAMED( 2510 MLX5_IB_OBJECT_FLOW_MATCHER), 2511 UAPI_DEF_CHAIN_OBJ_TREE( 2512 UVERBS_OBJECT_FLOW, 2513 &mlx5_ib_fs), 2514 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 2515 &mlx5_ib_flow_actions), 2516 {}, 2517 }; 2518 2519 static const struct ib_device_ops flow_ops = { 2520 .create_flow = mlx5_ib_create_flow, 2521 .destroy_flow = mlx5_ib_destroy_flow, 2522 .destroy_flow_action = mlx5_ib_destroy_flow_action, 2523 }; 2524 2525 static const struct ib_device_ops flow_ipsec_ops = { 2526 .create_flow_action_esp = mlx5_ib_create_flow_action_esp, 2527 .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp, 2528 }; 2529 2530 int mlx5_ib_fs_init(struct mlx5_ib_dev *dev) 2531 { 2532 dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL); 2533 2534 if (!dev->flow_db) 2535 return -ENOMEM; 2536 2537 mutex_init(&dev->flow_db->lock); 2538 2539 ib_set_device_ops(&dev->ib_dev, &flow_ops); 2540 if (mlx5_accel_ipsec_device_caps(dev->mdev) & 2541 MLX5_ACCEL_IPSEC_CAP_DEVICE) 2542 ib_set_device_ops(&dev->ib_dev, &flow_ipsec_ops); 2543 2544 return 0; 2545 } 2546