1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. 4 */ 5 6 #include <rdma/ib_user_verbs.h> 7 #include <rdma/ib_verbs.h> 8 #include <rdma/uverbs_types.h> 9 #include <rdma/uverbs_ioctl.h> 10 #include <rdma/uverbs_std_types.h> 11 #include <rdma/mlx5_user_ioctl_cmds.h> 12 #include <rdma/mlx5_user_ioctl_verbs.h> 13 #include <rdma/ib_umem.h> 14 #include <linux/mlx5/driver.h> 15 #include <linux/mlx5/fs.h> 16 #include <linux/mlx5/fs_helpers.h> 17 #include <linux/mlx5/accel.h> 18 #include <linux/mlx5/eswitch.h> 19 #include "mlx5_ib.h" 20 #include "counters.h" 21 #include "devx.h" 22 #include "fs.h" 23 24 #define UVERBS_MODULE_NAME mlx5_ib 25 #include <rdma/uverbs_named_ioctl.h> 26 27 enum { 28 MATCH_CRITERIA_ENABLE_OUTER_BIT, 29 MATCH_CRITERIA_ENABLE_MISC_BIT, 30 MATCH_CRITERIA_ENABLE_INNER_BIT, 31 MATCH_CRITERIA_ENABLE_MISC2_BIT 32 }; 33 34 #define HEADER_IS_ZERO(match_criteria, headers) \ 35 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 36 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 37 38 static u8 get_match_criteria_enable(u32 *match_criteria) 39 { 40 u8 match_criteria_enable; 41 42 match_criteria_enable = 43 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 44 MATCH_CRITERIA_ENABLE_OUTER_BIT; 45 match_criteria_enable |= 46 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 47 MATCH_CRITERIA_ENABLE_MISC_BIT; 48 match_criteria_enable |= 49 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 50 MATCH_CRITERIA_ENABLE_INNER_BIT; 51 match_criteria_enable |= 52 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) << 53 MATCH_CRITERIA_ENABLE_MISC2_BIT; 54 55 return match_criteria_enable; 56 } 57 58 static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) 59 { 60 u8 entry_mask; 61 u8 entry_val; 62 int err = 0; 63 64 if (!mask) 65 goto out; 66 67 entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c, 68 ip_protocol); 69 entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v, 70 ip_protocol); 71 if (!entry_mask) { 72 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); 73 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 74 goto out; 75 } 76 /* Don't override existing ip protocol */ 77 if (mask != entry_mask || val != entry_val) 78 err = -EINVAL; 79 out: 80 return err; 81 } 82 83 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val, 84 bool inner) 85 { 86 if (inner) { 87 MLX5_SET(fte_match_set_misc, 88 misc_c, inner_ipv6_flow_label, mask); 89 MLX5_SET(fte_match_set_misc, 90 misc_v, inner_ipv6_flow_label, val); 91 } else { 92 MLX5_SET(fte_match_set_misc, 93 misc_c, outer_ipv6_flow_label, mask); 94 MLX5_SET(fte_match_set_misc, 95 misc_v, outer_ipv6_flow_label, val); 96 } 97 } 98 99 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) 100 { 101 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask); 102 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val); 103 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2); 104 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2); 105 } 106 107 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask) 108 { 109 if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) && 110 !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL)) 111 return -EOPNOTSUPP; 112 113 if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) && 114 !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP)) 115 return -EOPNOTSUPP; 116 117 if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) && 118 !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS)) 119 return -EOPNOTSUPP; 120 121 if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) && 122 !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL)) 123 return -EOPNOTSUPP; 124 125 return 0; 126 } 127 128 #define LAST_ETH_FIELD vlan_tag 129 #define LAST_IB_FIELD sl 130 #define LAST_IPV4_FIELD tos 131 #define LAST_IPV6_FIELD traffic_class 132 #define LAST_TCP_UDP_FIELD src_port 133 #define LAST_TUNNEL_FIELD tunnel_id 134 #define LAST_FLOW_TAG_FIELD tag_id 135 #define LAST_DROP_FIELD size 136 #define LAST_COUNTERS_FIELD counters 137 138 /* Field is the last supported field */ 139 #define FIELDS_NOT_SUPPORTED(filter, field) \ 140 memchr_inv((void *)&filter.field + sizeof(filter.field), 0, \ 141 sizeof(filter) - offsetofend(typeof(filter), field)) 142 143 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, 144 bool is_egress, 145 struct mlx5_flow_act *action) 146 { 147 148 switch (maction->ib_action.type) { 149 case IB_FLOW_ACTION_ESP: 150 if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 151 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)) 152 return -EINVAL; 153 /* Currently only AES_GCM keymat is supported by the driver */ 154 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx; 155 action->action |= is_egress ? 156 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT : 157 MLX5_FLOW_CONTEXT_ACTION_DECRYPT; 158 return 0; 159 case IB_FLOW_ACTION_UNSPECIFIED: 160 if (maction->flow_action_raw.sub_type == 161 MLX5_IB_FLOW_ACTION_MODIFY_HEADER) { 162 if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 163 return -EINVAL; 164 action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 165 action->modify_hdr = 166 maction->flow_action_raw.modify_hdr; 167 return 0; 168 } 169 if (maction->flow_action_raw.sub_type == 170 MLX5_IB_FLOW_ACTION_DECAP) { 171 if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) 172 return -EINVAL; 173 action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 174 return 0; 175 } 176 if (maction->flow_action_raw.sub_type == 177 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) { 178 if (action->action & 179 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) 180 return -EINVAL; 181 action->action |= 182 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 183 action->pkt_reformat = 184 maction->flow_action_raw.pkt_reformat; 185 return 0; 186 } 187 fallthrough; 188 default: 189 return -EOPNOTSUPP; 190 } 191 } 192 193 static int parse_flow_attr(struct mlx5_core_dev *mdev, 194 struct mlx5_flow_spec *spec, 195 const union ib_flow_spec *ib_spec, 196 const struct ib_flow_attr *flow_attr, 197 struct mlx5_flow_act *action, u32 prev_type) 198 { 199 struct mlx5_flow_context *flow_context = &spec->flow_context; 200 u32 *match_c = spec->match_criteria; 201 u32 *match_v = spec->match_value; 202 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, 203 misc_parameters); 204 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, 205 misc_parameters); 206 void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c, 207 misc_parameters_2); 208 void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v, 209 misc_parameters_2); 210 void *headers_c; 211 void *headers_v; 212 int match_ipv; 213 int ret; 214 215 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 216 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 217 inner_headers); 218 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 219 inner_headers); 220 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 221 ft_field_support.inner_ip_version); 222 } else { 223 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 224 outer_headers); 225 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 226 outer_headers); 227 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 228 ft_field_support.outer_ip_version); 229 } 230 231 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 232 case IB_FLOW_SPEC_ETH: 233 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) 234 return -EOPNOTSUPP; 235 236 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 237 dmac_47_16), 238 ib_spec->eth.mask.dst_mac); 239 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 240 dmac_47_16), 241 ib_spec->eth.val.dst_mac); 242 243 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 244 smac_47_16), 245 ib_spec->eth.mask.src_mac); 246 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 247 smac_47_16), 248 ib_spec->eth.val.src_mac); 249 250 if (ib_spec->eth.mask.vlan_tag) { 251 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 252 cvlan_tag, 1); 253 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 254 cvlan_tag, 1); 255 256 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 257 first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); 258 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 259 first_vid, ntohs(ib_spec->eth.val.vlan_tag)); 260 261 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 262 first_cfi, 263 ntohs(ib_spec->eth.mask.vlan_tag) >> 12); 264 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 265 first_cfi, 266 ntohs(ib_spec->eth.val.vlan_tag) >> 12); 267 268 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 269 first_prio, 270 ntohs(ib_spec->eth.mask.vlan_tag) >> 13); 271 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 272 first_prio, 273 ntohs(ib_spec->eth.val.vlan_tag) >> 13); 274 } 275 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 276 ethertype, ntohs(ib_spec->eth.mask.ether_type)); 277 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 278 ethertype, ntohs(ib_spec->eth.val.ether_type)); 279 break; 280 case IB_FLOW_SPEC_IPV4: 281 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) 282 return -EOPNOTSUPP; 283 284 if (match_ipv) { 285 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 286 ip_version, 0xf); 287 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 288 ip_version, MLX5_FS_IPV4_VERSION); 289 } else { 290 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 291 ethertype, 0xffff); 292 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 293 ethertype, ETH_P_IP); 294 } 295 296 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 297 src_ipv4_src_ipv6.ipv4_layout.ipv4), 298 &ib_spec->ipv4.mask.src_ip, 299 sizeof(ib_spec->ipv4.mask.src_ip)); 300 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 301 src_ipv4_src_ipv6.ipv4_layout.ipv4), 302 &ib_spec->ipv4.val.src_ip, 303 sizeof(ib_spec->ipv4.val.src_ip)); 304 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 305 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 306 &ib_spec->ipv4.mask.dst_ip, 307 sizeof(ib_spec->ipv4.mask.dst_ip)); 308 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 309 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 310 &ib_spec->ipv4.val.dst_ip, 311 sizeof(ib_spec->ipv4.val.dst_ip)); 312 313 set_tos(headers_c, headers_v, 314 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); 315 316 if (set_proto(headers_c, headers_v, 317 ib_spec->ipv4.mask.proto, 318 ib_spec->ipv4.val.proto)) 319 return -EINVAL; 320 break; 321 case IB_FLOW_SPEC_IPV6: 322 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) 323 return -EOPNOTSUPP; 324 325 if (match_ipv) { 326 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 327 ip_version, 0xf); 328 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 329 ip_version, MLX5_FS_IPV6_VERSION); 330 } else { 331 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 332 ethertype, 0xffff); 333 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 334 ethertype, ETH_P_IPV6); 335 } 336 337 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 338 src_ipv4_src_ipv6.ipv6_layout.ipv6), 339 &ib_spec->ipv6.mask.src_ip, 340 sizeof(ib_spec->ipv6.mask.src_ip)); 341 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 342 src_ipv4_src_ipv6.ipv6_layout.ipv6), 343 &ib_spec->ipv6.val.src_ip, 344 sizeof(ib_spec->ipv6.val.src_ip)); 345 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 346 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 347 &ib_spec->ipv6.mask.dst_ip, 348 sizeof(ib_spec->ipv6.mask.dst_ip)); 349 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 350 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 351 &ib_spec->ipv6.val.dst_ip, 352 sizeof(ib_spec->ipv6.val.dst_ip)); 353 354 set_tos(headers_c, headers_v, 355 ib_spec->ipv6.mask.traffic_class, 356 ib_spec->ipv6.val.traffic_class); 357 358 if (set_proto(headers_c, headers_v, 359 ib_spec->ipv6.mask.next_hdr, 360 ib_spec->ipv6.val.next_hdr)) 361 return -EINVAL; 362 363 set_flow_label(misc_params_c, misc_params_v, 364 ntohl(ib_spec->ipv6.mask.flow_label), 365 ntohl(ib_spec->ipv6.val.flow_label), 366 ib_spec->type & IB_FLOW_SPEC_INNER); 367 break; 368 case IB_FLOW_SPEC_ESP: 369 if (ib_spec->esp.mask.seq) 370 return -EOPNOTSUPP; 371 372 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 373 ntohl(ib_spec->esp.mask.spi)); 374 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 375 ntohl(ib_spec->esp.val.spi)); 376 break; 377 case IB_FLOW_SPEC_TCP: 378 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 379 LAST_TCP_UDP_FIELD)) 380 return -EOPNOTSUPP; 381 382 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP)) 383 return -EINVAL; 384 385 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport, 386 ntohs(ib_spec->tcp_udp.mask.src_port)); 387 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, 388 ntohs(ib_spec->tcp_udp.val.src_port)); 389 390 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport, 391 ntohs(ib_spec->tcp_udp.mask.dst_port)); 392 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport, 393 ntohs(ib_spec->tcp_udp.val.dst_port)); 394 break; 395 case IB_FLOW_SPEC_UDP: 396 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 397 LAST_TCP_UDP_FIELD)) 398 return -EOPNOTSUPP; 399 400 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP)) 401 return -EINVAL; 402 403 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, 404 ntohs(ib_spec->tcp_udp.mask.src_port)); 405 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, 406 ntohs(ib_spec->tcp_udp.val.src_port)); 407 408 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, 409 ntohs(ib_spec->tcp_udp.mask.dst_port)); 410 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 411 ntohs(ib_spec->tcp_udp.val.dst_port)); 412 break; 413 case IB_FLOW_SPEC_GRE: 414 if (ib_spec->gre.mask.c_ks_res0_ver) 415 return -EOPNOTSUPP; 416 417 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE)) 418 return -EINVAL; 419 420 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 421 0xff); 422 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 423 IPPROTO_GRE); 424 425 MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol, 426 ntohs(ib_spec->gre.mask.protocol)); 427 MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol, 428 ntohs(ib_spec->gre.val.protocol)); 429 430 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, 431 gre_key.nvgre.hi), 432 &ib_spec->gre.mask.key, 433 sizeof(ib_spec->gre.mask.key)); 434 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v, 435 gre_key.nvgre.hi), 436 &ib_spec->gre.val.key, 437 sizeof(ib_spec->gre.val.key)); 438 break; 439 case IB_FLOW_SPEC_MPLS: 440 switch (prev_type) { 441 case IB_FLOW_SPEC_UDP: 442 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 443 ft_field_support.outer_first_mpls_over_udp), 444 &ib_spec->mpls.mask.tag)) 445 return -EOPNOTSUPP; 446 447 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 448 outer_first_mpls_over_udp), 449 &ib_spec->mpls.val.tag, 450 sizeof(ib_spec->mpls.val.tag)); 451 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 452 outer_first_mpls_over_udp), 453 &ib_spec->mpls.mask.tag, 454 sizeof(ib_spec->mpls.mask.tag)); 455 break; 456 case IB_FLOW_SPEC_GRE: 457 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 458 ft_field_support.outer_first_mpls_over_gre), 459 &ib_spec->mpls.mask.tag)) 460 return -EOPNOTSUPP; 461 462 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 463 outer_first_mpls_over_gre), 464 &ib_spec->mpls.val.tag, 465 sizeof(ib_spec->mpls.val.tag)); 466 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 467 outer_first_mpls_over_gre), 468 &ib_spec->mpls.mask.tag, 469 sizeof(ib_spec->mpls.mask.tag)); 470 break; 471 default: 472 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 473 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 474 ft_field_support.inner_first_mpls), 475 &ib_spec->mpls.mask.tag)) 476 return -EOPNOTSUPP; 477 478 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 479 inner_first_mpls), 480 &ib_spec->mpls.val.tag, 481 sizeof(ib_spec->mpls.val.tag)); 482 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 483 inner_first_mpls), 484 &ib_spec->mpls.mask.tag, 485 sizeof(ib_spec->mpls.mask.tag)); 486 } else { 487 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 488 ft_field_support.outer_first_mpls), 489 &ib_spec->mpls.mask.tag)) 490 return -EOPNOTSUPP; 491 492 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 493 outer_first_mpls), 494 &ib_spec->mpls.val.tag, 495 sizeof(ib_spec->mpls.val.tag)); 496 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 497 outer_first_mpls), 498 &ib_spec->mpls.mask.tag, 499 sizeof(ib_spec->mpls.mask.tag)); 500 } 501 } 502 break; 503 case IB_FLOW_SPEC_VXLAN_TUNNEL: 504 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask, 505 LAST_TUNNEL_FIELD)) 506 return -EOPNOTSUPP; 507 508 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni, 509 ntohl(ib_spec->tunnel.mask.tunnel_id)); 510 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni, 511 ntohl(ib_spec->tunnel.val.tunnel_id)); 512 break; 513 case IB_FLOW_SPEC_ACTION_TAG: 514 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag, 515 LAST_FLOW_TAG_FIELD)) 516 return -EOPNOTSUPP; 517 if (ib_spec->flow_tag.tag_id >= BIT(24)) 518 return -EINVAL; 519 520 flow_context->flow_tag = ib_spec->flow_tag.tag_id; 521 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 522 break; 523 case IB_FLOW_SPEC_ACTION_DROP: 524 if (FIELDS_NOT_SUPPORTED(ib_spec->drop, 525 LAST_DROP_FIELD)) 526 return -EOPNOTSUPP; 527 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 528 break; 529 case IB_FLOW_SPEC_ACTION_HANDLE: 530 ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act), 531 flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action); 532 if (ret) 533 return ret; 534 break; 535 case IB_FLOW_SPEC_ACTION_COUNT: 536 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count, 537 LAST_COUNTERS_FIELD)) 538 return -EOPNOTSUPP; 539 540 /* for now support only one counters spec per flow */ 541 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 542 return -EINVAL; 543 544 action->counters = ib_spec->flow_count.counters; 545 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 546 break; 547 default: 548 return -EINVAL; 549 } 550 551 return 0; 552 } 553 554 /* If a flow could catch both multicast and unicast packets, 555 * it won't fall into the multicast flow steering table and this rule 556 * could steal other multicast packets. 557 */ 558 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr) 559 { 560 union ib_flow_spec *flow_spec; 561 562 if (ib_attr->type != IB_FLOW_ATTR_NORMAL || 563 ib_attr->num_of_specs < 1) 564 return false; 565 566 flow_spec = (union ib_flow_spec *)(ib_attr + 1); 567 if (flow_spec->type == IB_FLOW_SPEC_IPV4) { 568 struct ib_flow_spec_ipv4 *ipv4_spec; 569 570 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec; 571 if (ipv4_is_multicast(ipv4_spec->val.dst_ip)) 572 return true; 573 574 return false; 575 } 576 577 if (flow_spec->type == IB_FLOW_SPEC_ETH) { 578 struct ib_flow_spec_eth *eth_spec; 579 580 eth_spec = (struct ib_flow_spec_eth *)flow_spec; 581 return is_multicast_ether_addr(eth_spec->mask.dst_mac) && 582 is_multicast_ether_addr(eth_spec->val.dst_mac); 583 } 584 585 return false; 586 } 587 588 enum valid_spec { 589 VALID_SPEC_INVALID, 590 VALID_SPEC_VALID, 591 VALID_SPEC_NA, 592 }; 593 594 static enum valid_spec 595 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev, 596 const struct mlx5_flow_spec *spec, 597 const struct mlx5_flow_act *flow_act, 598 bool egress) 599 { 600 const u32 *match_c = spec->match_criteria; 601 bool is_crypto = 602 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 603 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)); 604 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c); 605 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP; 606 607 /* 608 * Currently only crypto is supported in egress, when regular egress 609 * rules would be supported, always return VALID_SPEC_NA. 610 */ 611 if (!is_crypto) 612 return VALID_SPEC_NA; 613 614 return is_crypto && is_ipsec && 615 (!egress || (!is_drop && 616 !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ? 617 VALID_SPEC_VALID : VALID_SPEC_INVALID; 618 } 619 620 static bool is_valid_spec(struct mlx5_core_dev *mdev, 621 const struct mlx5_flow_spec *spec, 622 const struct mlx5_flow_act *flow_act, 623 bool egress) 624 { 625 /* We curretly only support ipsec egress flow */ 626 return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID; 627 } 628 629 static bool is_valid_ethertype(struct mlx5_core_dev *mdev, 630 const struct ib_flow_attr *flow_attr, 631 bool check_inner) 632 { 633 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1); 634 int match_ipv = check_inner ? 635 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 636 ft_field_support.inner_ip_version) : 637 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 638 ft_field_support.outer_ip_version); 639 int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0; 640 bool ipv4_spec_valid, ipv6_spec_valid; 641 unsigned int ip_spec_type = 0; 642 bool has_ethertype = false; 643 unsigned int spec_index; 644 bool mask_valid = true; 645 u16 eth_type = 0; 646 bool type_valid; 647 648 /* Validate that ethertype is correct */ 649 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 650 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) && 651 ib_spec->eth.mask.ether_type) { 652 mask_valid = (ib_spec->eth.mask.ether_type == 653 htons(0xffff)); 654 has_ethertype = true; 655 eth_type = ntohs(ib_spec->eth.val.ether_type); 656 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) || 657 (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) { 658 ip_spec_type = ib_spec->type; 659 } 660 ib_spec = (void *)ib_spec + ib_spec->size; 661 } 662 663 type_valid = (!has_ethertype) || (!ip_spec_type); 664 if (!type_valid && mask_valid) { 665 ipv4_spec_valid = (eth_type == ETH_P_IP) && 666 (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit)); 667 ipv6_spec_valid = (eth_type == ETH_P_IPV6) && 668 (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit)); 669 670 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) || 671 (((eth_type == ETH_P_MPLS_UC) || 672 (eth_type == ETH_P_MPLS_MC)) && match_ipv); 673 } 674 675 return type_valid; 676 } 677 678 static bool is_valid_attr(struct mlx5_core_dev *mdev, 679 const struct ib_flow_attr *flow_attr) 680 { 681 return is_valid_ethertype(mdev, flow_attr, false) && 682 is_valid_ethertype(mdev, flow_attr, true); 683 } 684 685 static void put_flow_table(struct mlx5_ib_dev *dev, 686 struct mlx5_ib_flow_prio *prio, bool ft_added) 687 { 688 prio->refcount -= !!ft_added; 689 if (!prio->refcount) { 690 mlx5_destroy_flow_table(prio->flow_table); 691 prio->flow_table = NULL; 692 } 693 } 694 695 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) 696 { 697 struct mlx5_ib_flow_handler *handler = container_of(flow_id, 698 struct mlx5_ib_flow_handler, 699 ibflow); 700 struct mlx5_ib_flow_handler *iter, *tmp; 701 struct mlx5_ib_dev *dev = handler->dev; 702 703 mutex_lock(&dev->flow_db->lock); 704 705 list_for_each_entry_safe(iter, tmp, &handler->list, list) { 706 mlx5_del_flow_rules(iter->rule); 707 put_flow_table(dev, iter->prio, true); 708 list_del(&iter->list); 709 kfree(iter); 710 } 711 712 mlx5_del_flow_rules(handler->rule); 713 put_flow_table(dev, handler->prio, true); 714 mlx5_ib_counters_clear_description(handler->ibcounters); 715 mutex_unlock(&dev->flow_db->lock); 716 if (handler->flow_matcher) 717 atomic_dec(&handler->flow_matcher->usecnt); 718 kfree(handler); 719 720 return 0; 721 } 722 723 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap) 724 { 725 priority *= 2; 726 if (!dont_trap) 727 priority++; 728 return priority; 729 } 730 731 enum flow_table_type { 732 MLX5_IB_FT_RX, 733 MLX5_IB_FT_TX 734 }; 735 736 #define MLX5_FS_MAX_TYPES 6 737 #define MLX5_FS_MAX_ENTRIES BIT(16) 738 739 static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns, 740 struct mlx5_ib_flow_prio *prio, 741 int priority, 742 int num_entries, int num_groups, 743 u32 flags) 744 { 745 struct mlx5_flow_table_attr ft_attr = {}; 746 struct mlx5_flow_table *ft; 747 748 ft_attr.prio = priority; 749 ft_attr.max_fte = num_entries; 750 ft_attr.flags = flags; 751 ft_attr.autogroup.max_num_groups = num_groups; 752 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); 753 if (IS_ERR(ft)) 754 return ERR_CAST(ft); 755 756 prio->flow_table = ft; 757 prio->refcount = 0; 758 return prio; 759 } 760 761 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, 762 struct ib_flow_attr *flow_attr, 763 enum flow_table_type ft_type) 764 { 765 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP; 766 struct mlx5_flow_namespace *ns = NULL; 767 enum mlx5_flow_namespace_type fn_type; 768 struct mlx5_ib_flow_prio *prio; 769 struct mlx5_flow_table *ft; 770 int max_table_size; 771 int num_entries; 772 int num_groups; 773 bool esw_encap; 774 u32 flags = 0; 775 int priority; 776 777 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 778 log_max_ft_size)); 779 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != 780 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 781 switch (flow_attr->type) { 782 case IB_FLOW_ATTR_NORMAL: 783 if (flow_is_multicast_only(flow_attr) && !dont_trap) 784 priority = MLX5_IB_FLOW_MCAST_PRIO; 785 else 786 priority = ib_prio_to_core_prio(flow_attr->priority, 787 dont_trap); 788 if (ft_type == MLX5_IB_FT_RX) { 789 fn_type = MLX5_FLOW_NAMESPACE_BYPASS; 790 prio = &dev->flow_db->prios[priority]; 791 if (!dev->is_rep && !esw_encap && 792 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap)) 793 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 794 if (!dev->is_rep && !esw_encap && 795 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 796 reformat_l3_tunnel_to_l2)) 797 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 798 } else { 799 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX( 800 dev->mdev, log_max_ft_size)); 801 fn_type = MLX5_FLOW_NAMESPACE_EGRESS; 802 prio = &dev->flow_db->egress_prios[priority]; 803 if (!dev->is_rep && !esw_encap && 804 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat)) 805 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 806 } 807 ns = mlx5_get_flow_namespace(dev->mdev, fn_type); 808 num_entries = MLX5_FS_MAX_ENTRIES; 809 num_groups = MLX5_FS_MAX_TYPES; 810 break; 811 case IB_FLOW_ATTR_ALL_DEFAULT: 812 case IB_FLOW_ATTR_MC_DEFAULT: 813 ns = mlx5_get_flow_namespace(dev->mdev, 814 MLX5_FLOW_NAMESPACE_LEFTOVERS); 815 build_leftovers_ft_param(&priority, &num_entries, &num_groups); 816 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; 817 break; 818 case IB_FLOW_ATTR_SNIFFER: 819 if (!MLX5_CAP_FLOWTABLE(dev->mdev, 820 allow_sniffer_and_nic_rx_shared_tir)) 821 return ERR_PTR(-EOPNOTSUPP); 822 823 ns = mlx5_get_flow_namespace( 824 dev->mdev, ft_type == MLX5_IB_FT_RX ? 825 MLX5_FLOW_NAMESPACE_SNIFFER_RX : 826 MLX5_FLOW_NAMESPACE_SNIFFER_TX); 827 828 prio = &dev->flow_db->sniffer[ft_type]; 829 priority = 0; 830 num_entries = 1; 831 num_groups = 1; 832 break; 833 default: 834 break; 835 } 836 837 if (!ns) 838 return ERR_PTR(-EOPNOTSUPP); 839 840 max_table_size = min_t(int, num_entries, max_table_size); 841 842 ft = prio->flow_table; 843 if (!ft) 844 return _get_prio(ns, prio, priority, max_table_size, num_groups, 845 flags); 846 847 return prio; 848 } 849 850 static void set_underlay_qp(struct mlx5_ib_dev *dev, 851 struct mlx5_flow_spec *spec, 852 u32 underlay_qpn) 853 { 854 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, 855 spec->match_criteria, 856 misc_parameters); 857 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 858 misc_parameters); 859 860 if (underlay_qpn && 861 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 862 ft_field_support.bth_dst_qp)) { 863 MLX5_SET(fte_match_set_misc, 864 misc_params_v, bth_dst_qp, underlay_qpn); 865 MLX5_SET(fte_match_set_misc, 866 misc_params_c, bth_dst_qp, 0xffffff); 867 } 868 } 869 870 static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev, 871 struct mlx5_flow_spec *spec, 872 struct mlx5_eswitch_rep *rep) 873 { 874 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; 875 void *misc; 876 877 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 878 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 879 misc_parameters_2); 880 881 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 882 mlx5_eswitch_get_vport_metadata_for_match(esw, 883 rep->vport)); 884 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 885 misc_parameters_2); 886 887 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 888 mlx5_eswitch_get_vport_metadata_mask()); 889 } else { 890 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 891 misc_parameters); 892 893 MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport); 894 895 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 896 misc_parameters); 897 898 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 899 } 900 } 901 902 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, 903 struct mlx5_ib_flow_prio *ft_prio, 904 const struct ib_flow_attr *flow_attr, 905 struct mlx5_flow_destination *dst, 906 u32 underlay_qpn, 907 struct mlx5_ib_create_flow *ucmd) 908 { 909 struct mlx5_flow_table *ft = ft_prio->flow_table; 910 struct mlx5_ib_flow_handler *handler; 911 struct mlx5_flow_act flow_act = {}; 912 struct mlx5_flow_spec *spec; 913 struct mlx5_flow_destination dest_arr[2] = {}; 914 struct mlx5_flow_destination *rule_dst = dest_arr; 915 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); 916 unsigned int spec_index; 917 u32 prev_type = 0; 918 int err = 0; 919 int dest_num = 0; 920 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 921 922 if (!is_valid_attr(dev->mdev, flow_attr)) 923 return ERR_PTR(-EINVAL); 924 925 if (dev->is_rep && is_egress) 926 return ERR_PTR(-EINVAL); 927 928 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 929 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 930 if (!handler || !spec) { 931 err = -ENOMEM; 932 goto free; 933 } 934 935 INIT_LIST_HEAD(&handler->list); 936 937 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 938 err = parse_flow_attr(dev->mdev, spec, 939 ib_flow, flow_attr, &flow_act, 940 prev_type); 941 if (err < 0) 942 goto free; 943 944 prev_type = ((union ib_flow_spec *)ib_flow)->type; 945 ib_flow += ((union ib_flow_spec *)ib_flow)->size; 946 } 947 948 if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) { 949 memcpy(&dest_arr[0], dst, sizeof(*dst)); 950 dest_num++; 951 } 952 953 if (!flow_is_multicast_only(flow_attr)) 954 set_underlay_qp(dev, spec, underlay_qpn); 955 956 if (dev->is_rep && flow_attr->type != IB_FLOW_ATTR_SNIFFER) { 957 struct mlx5_eswitch_rep *rep; 958 959 rep = dev->port[flow_attr->port - 1].rep; 960 if (!rep) { 961 err = -EINVAL; 962 goto free; 963 } 964 965 mlx5_ib_set_rule_source_port(dev, spec, rep); 966 } 967 968 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); 969 970 if (is_egress && 971 !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) { 972 err = -EINVAL; 973 goto free; 974 } 975 976 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 977 struct mlx5_ib_mcounters *mcounters; 978 979 err = mlx5_ib_flow_counters_set_data(flow_act.counters, ucmd); 980 if (err) 981 goto free; 982 983 mcounters = to_mcounters(flow_act.counters); 984 handler->ibcounters = flow_act.counters; 985 dest_arr[dest_num].type = 986 MLX5_FLOW_DESTINATION_TYPE_COUNTER; 987 dest_arr[dest_num].counter_id = 988 mlx5_fc_id(mcounters->hw_cntrs_hndl); 989 dest_num++; 990 } 991 992 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { 993 if (!dest_num) 994 rule_dst = NULL; 995 } else { 996 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) 997 flow_act.action |= 998 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 999 if (is_egress) 1000 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 1001 else if (dest_num) 1002 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1003 } 1004 1005 if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) && 1006 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 1007 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 1008 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n", 1009 spec->flow_context.flow_tag, flow_attr->type); 1010 err = -EINVAL; 1011 goto free; 1012 } 1013 handler->rule = mlx5_add_flow_rules(ft, spec, 1014 &flow_act, 1015 rule_dst, dest_num); 1016 1017 if (IS_ERR(handler->rule)) { 1018 err = PTR_ERR(handler->rule); 1019 goto free; 1020 } 1021 1022 ft_prio->refcount++; 1023 handler->prio = ft_prio; 1024 handler->dev = dev; 1025 1026 ft_prio->flow_table = ft; 1027 free: 1028 if (err && handler) { 1029 mlx5_ib_counters_clear_description(handler->ibcounters); 1030 kfree(handler); 1031 } 1032 kvfree(spec); 1033 return err ? ERR_PTR(err) : handler; 1034 } 1035 1036 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, 1037 struct mlx5_ib_flow_prio *ft_prio, 1038 const struct ib_flow_attr *flow_attr, 1039 struct mlx5_flow_destination *dst) 1040 { 1041 return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL); 1042 } 1043 1044 enum { 1045 LEFTOVERS_MC, 1046 LEFTOVERS_UC, 1047 }; 1048 1049 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev, 1050 struct mlx5_ib_flow_prio *ft_prio, 1051 struct ib_flow_attr *flow_attr, 1052 struct mlx5_flow_destination *dst) 1053 { 1054 struct mlx5_ib_flow_handler *handler_ucast = NULL; 1055 struct mlx5_ib_flow_handler *handler = NULL; 1056 1057 static struct { 1058 struct ib_flow_attr flow_attr; 1059 struct ib_flow_spec_eth eth_flow; 1060 } leftovers_specs[] = { 1061 [LEFTOVERS_MC] = { 1062 .flow_attr = { 1063 .num_of_specs = 1, 1064 .size = sizeof(leftovers_specs[0]) 1065 }, 1066 .eth_flow = { 1067 .type = IB_FLOW_SPEC_ETH, 1068 .size = sizeof(struct ib_flow_spec_eth), 1069 .mask = {.dst_mac = {0x1} }, 1070 .val = {.dst_mac = {0x1} } 1071 } 1072 }, 1073 [LEFTOVERS_UC] = { 1074 .flow_attr = { 1075 .num_of_specs = 1, 1076 .size = sizeof(leftovers_specs[0]) 1077 }, 1078 .eth_flow = { 1079 .type = IB_FLOW_SPEC_ETH, 1080 .size = sizeof(struct ib_flow_spec_eth), 1081 .mask = {.dst_mac = {0x1} }, 1082 .val = {.dst_mac = {} } 1083 } 1084 } 1085 }; 1086 1087 handler = create_flow_rule(dev, ft_prio, 1088 &leftovers_specs[LEFTOVERS_MC].flow_attr, 1089 dst); 1090 if (!IS_ERR(handler) && 1091 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) { 1092 handler_ucast = create_flow_rule(dev, ft_prio, 1093 &leftovers_specs[LEFTOVERS_UC].flow_attr, 1094 dst); 1095 if (IS_ERR(handler_ucast)) { 1096 mlx5_del_flow_rules(handler->rule); 1097 ft_prio->refcount--; 1098 kfree(handler); 1099 handler = handler_ucast; 1100 } else { 1101 list_add(&handler_ucast->list, &handler->list); 1102 } 1103 } 1104 1105 return handler; 1106 } 1107 1108 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, 1109 struct mlx5_ib_flow_prio *ft_rx, 1110 struct mlx5_ib_flow_prio *ft_tx, 1111 struct mlx5_flow_destination *dst) 1112 { 1113 struct mlx5_ib_flow_handler *handler_rx; 1114 struct mlx5_ib_flow_handler *handler_tx; 1115 int err; 1116 static const struct ib_flow_attr flow_attr = { 1117 .num_of_specs = 0, 1118 .type = IB_FLOW_ATTR_SNIFFER, 1119 .size = sizeof(flow_attr) 1120 }; 1121 1122 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst); 1123 if (IS_ERR(handler_rx)) { 1124 err = PTR_ERR(handler_rx); 1125 goto err; 1126 } 1127 1128 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst); 1129 if (IS_ERR(handler_tx)) { 1130 err = PTR_ERR(handler_tx); 1131 goto err_tx; 1132 } 1133 1134 list_add(&handler_tx->list, &handler_rx->list); 1135 1136 return handler_rx; 1137 1138 err_tx: 1139 mlx5_del_flow_rules(handler_rx->rule); 1140 ft_rx->refcount--; 1141 kfree(handler_rx); 1142 err: 1143 return ERR_PTR(err); 1144 } 1145 1146 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, 1147 struct ib_flow_attr *flow_attr, 1148 struct ib_udata *udata) 1149 { 1150 struct mlx5_ib_dev *dev = to_mdev(qp->device); 1151 struct mlx5_ib_qp *mqp = to_mqp(qp); 1152 struct mlx5_ib_flow_handler *handler = NULL; 1153 struct mlx5_flow_destination *dst = NULL; 1154 struct mlx5_ib_flow_prio *ft_prio_tx = NULL; 1155 struct mlx5_ib_flow_prio *ft_prio; 1156 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 1157 struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr; 1158 size_t min_ucmd_sz, required_ucmd_sz; 1159 int err; 1160 int underlay_qpn; 1161 1162 if (udata && udata->inlen) { 1163 min_ucmd_sz = offsetofend(struct mlx5_ib_create_flow, reserved); 1164 if (udata->inlen < min_ucmd_sz) 1165 return ERR_PTR(-EOPNOTSUPP); 1166 1167 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz); 1168 if (err) 1169 return ERR_PTR(err); 1170 1171 /* currently supports only one counters data */ 1172 if (ucmd_hdr.ncounters_data > 1) 1173 return ERR_PTR(-EINVAL); 1174 1175 required_ucmd_sz = min_ucmd_sz + 1176 sizeof(struct mlx5_ib_flow_counters_data) * 1177 ucmd_hdr.ncounters_data; 1178 if (udata->inlen > required_ucmd_sz && 1179 !ib_is_udata_cleared(udata, required_ucmd_sz, 1180 udata->inlen - required_ucmd_sz)) 1181 return ERR_PTR(-EOPNOTSUPP); 1182 1183 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL); 1184 if (!ucmd) 1185 return ERR_PTR(-ENOMEM); 1186 1187 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz); 1188 if (err) 1189 goto free_ucmd; 1190 } 1191 1192 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) { 1193 err = -ENOMEM; 1194 goto free_ucmd; 1195 } 1196 1197 if (flow_attr->port > dev->num_ports || 1198 (flow_attr->flags & 1199 ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) { 1200 err = -EINVAL; 1201 goto free_ucmd; 1202 } 1203 1204 if (is_egress && 1205 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 1206 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 1207 err = -EINVAL; 1208 goto free_ucmd; 1209 } 1210 1211 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 1212 if (!dst) { 1213 err = -ENOMEM; 1214 goto free_ucmd; 1215 } 1216 1217 mutex_lock(&dev->flow_db->lock); 1218 1219 ft_prio = get_flow_table(dev, flow_attr, 1220 is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX); 1221 if (IS_ERR(ft_prio)) { 1222 err = PTR_ERR(ft_prio); 1223 goto unlock; 1224 } 1225 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 1226 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX); 1227 if (IS_ERR(ft_prio_tx)) { 1228 err = PTR_ERR(ft_prio_tx); 1229 ft_prio_tx = NULL; 1230 goto destroy_ft; 1231 } 1232 } 1233 1234 if (is_egress) { 1235 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT; 1236 } else { 1237 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1238 if (mqp->is_rss) 1239 dst->tir_num = mqp->rss_qp.tirn; 1240 else 1241 dst->tir_num = mqp->raw_packet_qp.rq.tirn; 1242 } 1243 1244 switch (flow_attr->type) { 1245 case IB_FLOW_ATTR_NORMAL: 1246 underlay_qpn = (mqp->flags & IB_QP_CREATE_SOURCE_QPN) ? 1247 mqp->underlay_qpn : 1248 0; 1249 handler = _create_flow_rule(dev, ft_prio, flow_attr, dst, 1250 underlay_qpn, ucmd); 1251 break; 1252 case IB_FLOW_ATTR_ALL_DEFAULT: 1253 case IB_FLOW_ATTR_MC_DEFAULT: 1254 handler = create_leftovers_rule(dev, ft_prio, flow_attr, dst); 1255 break; 1256 case IB_FLOW_ATTR_SNIFFER: 1257 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst); 1258 break; 1259 default: 1260 err = -EINVAL; 1261 goto destroy_ft; 1262 } 1263 1264 if (IS_ERR(handler)) { 1265 err = PTR_ERR(handler); 1266 handler = NULL; 1267 goto destroy_ft; 1268 } 1269 1270 mutex_unlock(&dev->flow_db->lock); 1271 kfree(dst); 1272 kfree(ucmd); 1273 1274 return &handler->ibflow; 1275 1276 destroy_ft: 1277 put_flow_table(dev, ft_prio, false); 1278 if (ft_prio_tx) 1279 put_flow_table(dev, ft_prio_tx, false); 1280 unlock: 1281 mutex_unlock(&dev->flow_db->lock); 1282 kfree(dst); 1283 free_ucmd: 1284 kfree(ucmd); 1285 return ERR_PTR(err); 1286 } 1287 1288 static struct mlx5_ib_flow_prio * 1289 _get_flow_table(struct mlx5_ib_dev *dev, 1290 struct mlx5_ib_flow_matcher *fs_matcher, 1291 bool mcast) 1292 { 1293 struct mlx5_flow_namespace *ns = NULL; 1294 struct mlx5_ib_flow_prio *prio = NULL; 1295 int max_table_size = 0; 1296 bool esw_encap; 1297 u32 flags = 0; 1298 int priority; 1299 1300 if (mcast) 1301 priority = MLX5_IB_FLOW_MCAST_PRIO; 1302 else 1303 priority = ib_prio_to_core_prio(fs_matcher->priority, false); 1304 1305 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != 1306 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 1307 switch (fs_matcher->ns_type) { 1308 case MLX5_FLOW_NAMESPACE_BYPASS: 1309 max_table_size = BIT( 1310 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, log_max_ft_size)); 1311 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap) 1312 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 1313 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 1314 reformat_l3_tunnel_to_l2) && 1315 !esw_encap) 1316 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 1317 break; 1318 case MLX5_FLOW_NAMESPACE_EGRESS: 1319 max_table_size = BIT( 1320 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size)); 1321 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && 1322 !esw_encap) 1323 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 1324 break; 1325 case MLX5_FLOW_NAMESPACE_FDB: 1326 max_table_size = BIT( 1327 MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size)); 1328 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap) 1329 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 1330 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, 1331 reformat_l3_tunnel_to_l2) && 1332 esw_encap) 1333 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 1334 priority = FDB_BYPASS_PATH; 1335 break; 1336 case MLX5_FLOW_NAMESPACE_RDMA_RX: 1337 max_table_size = BIT( 1338 MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, log_max_ft_size)); 1339 priority = fs_matcher->priority; 1340 break; 1341 case MLX5_FLOW_NAMESPACE_RDMA_TX: 1342 max_table_size = BIT( 1343 MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, log_max_ft_size)); 1344 priority = fs_matcher->priority; 1345 break; 1346 default: 1347 break; 1348 } 1349 1350 max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES); 1351 1352 ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type); 1353 if (!ns) 1354 return ERR_PTR(-EOPNOTSUPP); 1355 1356 switch (fs_matcher->ns_type) { 1357 case MLX5_FLOW_NAMESPACE_BYPASS: 1358 prio = &dev->flow_db->prios[priority]; 1359 break; 1360 case MLX5_FLOW_NAMESPACE_EGRESS: 1361 prio = &dev->flow_db->egress_prios[priority]; 1362 break; 1363 case MLX5_FLOW_NAMESPACE_FDB: 1364 prio = &dev->flow_db->fdb; 1365 break; 1366 case MLX5_FLOW_NAMESPACE_RDMA_RX: 1367 prio = &dev->flow_db->rdma_rx[priority]; 1368 break; 1369 case MLX5_FLOW_NAMESPACE_RDMA_TX: 1370 prio = &dev->flow_db->rdma_tx[priority]; 1371 break; 1372 default: return ERR_PTR(-EINVAL); 1373 } 1374 1375 if (!prio) 1376 return ERR_PTR(-EINVAL); 1377 1378 if (prio->flow_table) 1379 return prio; 1380 1381 return _get_prio(ns, prio, priority, max_table_size, 1382 MLX5_FS_MAX_TYPES, flags); 1383 } 1384 1385 static struct mlx5_ib_flow_handler * 1386 _create_raw_flow_rule(struct mlx5_ib_dev *dev, 1387 struct mlx5_ib_flow_prio *ft_prio, 1388 struct mlx5_flow_destination *dst, 1389 struct mlx5_ib_flow_matcher *fs_matcher, 1390 struct mlx5_flow_context *flow_context, 1391 struct mlx5_flow_act *flow_act, 1392 void *cmd_in, int inlen, 1393 int dst_num) 1394 { 1395 struct mlx5_ib_flow_handler *handler; 1396 struct mlx5_flow_spec *spec; 1397 struct mlx5_flow_table *ft = ft_prio->flow_table; 1398 int err = 0; 1399 1400 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1401 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 1402 if (!handler || !spec) { 1403 err = -ENOMEM; 1404 goto free; 1405 } 1406 1407 INIT_LIST_HEAD(&handler->list); 1408 1409 memcpy(spec->match_value, cmd_in, inlen); 1410 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params, 1411 fs_matcher->mask_len); 1412 spec->match_criteria_enable = fs_matcher->match_criteria_enable; 1413 spec->flow_context = *flow_context; 1414 1415 handler->rule = mlx5_add_flow_rules(ft, spec, 1416 flow_act, dst, dst_num); 1417 1418 if (IS_ERR(handler->rule)) { 1419 err = PTR_ERR(handler->rule); 1420 goto free; 1421 } 1422 1423 ft_prio->refcount++; 1424 handler->prio = ft_prio; 1425 handler->dev = dev; 1426 ft_prio->flow_table = ft; 1427 1428 free: 1429 if (err) 1430 kfree(handler); 1431 kvfree(spec); 1432 return err ? ERR_PTR(err) : handler; 1433 } 1434 1435 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher, 1436 void *match_v) 1437 { 1438 void *match_c; 1439 void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4; 1440 void *dmac, *dmac_mask; 1441 void *ipv4, *ipv4_mask; 1442 1443 if (!(fs_matcher->match_criteria_enable & 1444 (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT))) 1445 return false; 1446 1447 match_c = fs_matcher->matcher_mask.match_params; 1448 match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v, 1449 outer_headers); 1450 match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c, 1451 outer_headers); 1452 1453 dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 1454 dmac_47_16); 1455 dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 1456 dmac_47_16); 1457 1458 if (is_multicast_ether_addr(dmac) && 1459 is_multicast_ether_addr(dmac_mask)) 1460 return true; 1461 1462 ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 1463 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 1464 1465 ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 1466 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 1467 1468 if (ipv4_is_multicast(*(__be32 *)(ipv4)) && 1469 ipv4_is_multicast(*(__be32 *)(ipv4_mask))) 1470 return true; 1471 1472 return false; 1473 } 1474 1475 static struct mlx5_ib_flow_handler *raw_fs_rule_add( 1476 struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher, 1477 struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act, 1478 u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type) 1479 { 1480 struct mlx5_flow_destination *dst; 1481 struct mlx5_ib_flow_prio *ft_prio; 1482 struct mlx5_ib_flow_handler *handler; 1483 int dst_num = 0; 1484 bool mcast; 1485 int err; 1486 1487 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL) 1488 return ERR_PTR(-EOPNOTSUPP); 1489 1490 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO) 1491 return ERR_PTR(-ENOMEM); 1492 1493 dst = kcalloc(2, sizeof(*dst), GFP_KERNEL); 1494 if (!dst) 1495 return ERR_PTR(-ENOMEM); 1496 1497 mcast = raw_fs_is_multicast(fs_matcher, cmd_in); 1498 mutex_lock(&dev->flow_db->lock); 1499 1500 ft_prio = _get_flow_table(dev, fs_matcher, mcast); 1501 if (IS_ERR(ft_prio)) { 1502 err = PTR_ERR(ft_prio); 1503 goto unlock; 1504 } 1505 1506 switch (dest_type) { 1507 case MLX5_FLOW_DESTINATION_TYPE_TIR: 1508 dst[dst_num].type = dest_type; 1509 dst[dst_num++].tir_num = dest_id; 1510 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1511 break; 1512 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: 1513 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM; 1514 dst[dst_num++].ft_num = dest_id; 1515 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1516 break; 1517 case MLX5_FLOW_DESTINATION_TYPE_PORT: 1518 dst[dst_num++].type = MLX5_FLOW_DESTINATION_TYPE_PORT; 1519 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 1520 break; 1521 default: 1522 break; 1523 } 1524 1525 if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1526 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1527 dst[dst_num].counter_id = counter_id; 1528 dst_num++; 1529 } 1530 1531 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, 1532 flow_context, flow_act, 1533 cmd_in, inlen, dst_num); 1534 1535 if (IS_ERR(handler)) { 1536 err = PTR_ERR(handler); 1537 goto destroy_ft; 1538 } 1539 1540 mutex_unlock(&dev->flow_db->lock); 1541 atomic_inc(&fs_matcher->usecnt); 1542 handler->flow_matcher = fs_matcher; 1543 1544 kfree(dst); 1545 1546 return handler; 1547 1548 destroy_ft: 1549 put_flow_table(dev, ft_prio, false); 1550 unlock: 1551 mutex_unlock(&dev->flow_db->lock); 1552 kfree(dst); 1553 1554 return ERR_PTR(err); 1555 } 1556 1557 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags) 1558 { 1559 u32 flags = 0; 1560 1561 if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA) 1562 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA; 1563 1564 return flags; 1565 } 1566 1567 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED \ 1568 MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA 1569 static struct ib_flow_action * 1570 mlx5_ib_create_flow_action_esp(struct ib_device *device, 1571 const struct ib_flow_action_attrs_esp *attr, 1572 struct uverbs_attr_bundle *attrs) 1573 { 1574 struct mlx5_ib_dev *mdev = to_mdev(device); 1575 struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm; 1576 struct mlx5_accel_esp_xfrm_attrs accel_attrs = {}; 1577 struct mlx5_ib_flow_action *action; 1578 u64 action_flags; 1579 u64 flags; 1580 int err = 0; 1581 1582 err = uverbs_get_flags64( 1583 &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 1584 ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1)); 1585 if (err) 1586 return ERR_PTR(err); 1587 1588 flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags); 1589 1590 /* We current only support a subset of the standard features. Only a 1591 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn 1592 * (with overlap). Full offload mode isn't supported. 1593 */ 1594 if (!attr->keymat || attr->replay || attr->encap || 1595 attr->spi || attr->seq || attr->tfc_pad || 1596 attr->hard_limit_pkts || 1597 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 1598 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT))) 1599 return ERR_PTR(-EOPNOTSUPP); 1600 1601 if (attr->keymat->protocol != 1602 IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM) 1603 return ERR_PTR(-EOPNOTSUPP); 1604 1605 aes_gcm = &attr->keymat->keymat.aes_gcm; 1606 1607 if (aes_gcm->icv_len != 16 || 1608 aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ) 1609 return ERR_PTR(-EOPNOTSUPP); 1610 1611 action = kmalloc(sizeof(*action), GFP_KERNEL); 1612 if (!action) 1613 return ERR_PTR(-ENOMEM); 1614 1615 action->esp_aes_gcm.ib_flags = attr->flags; 1616 memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key, 1617 sizeof(accel_attrs.keymat.aes_gcm.aes_key)); 1618 accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8; 1619 memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt, 1620 sizeof(accel_attrs.keymat.aes_gcm.salt)); 1621 memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv, 1622 sizeof(accel_attrs.keymat.aes_gcm.seq_iv)); 1623 accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8; 1624 accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ; 1625 accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM; 1626 1627 accel_attrs.esn = attr->esn; 1628 if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) 1629 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; 1630 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 1631 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 1632 1633 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT) 1634 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT; 1635 1636 action->esp_aes_gcm.ctx = 1637 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags); 1638 if (IS_ERR(action->esp_aes_gcm.ctx)) { 1639 err = PTR_ERR(action->esp_aes_gcm.ctx); 1640 goto err_parse; 1641 } 1642 1643 action->esp_aes_gcm.ib_flags = attr->flags; 1644 1645 return &action->ib_action; 1646 1647 err_parse: 1648 kfree(action); 1649 return ERR_PTR(err); 1650 } 1651 1652 static int 1653 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action, 1654 const struct ib_flow_action_attrs_esp *attr, 1655 struct uverbs_attr_bundle *attrs) 1656 { 1657 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 1658 struct mlx5_accel_esp_xfrm_attrs accel_attrs; 1659 int err = 0; 1660 1661 if (attr->keymat || attr->replay || attr->encap || 1662 attr->spi || attr->seq || attr->tfc_pad || 1663 attr->hard_limit_pkts || 1664 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 1665 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS | 1666 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))) 1667 return -EOPNOTSUPP; 1668 1669 /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can 1670 * be modified. 1671 */ 1672 if (!(maction->esp_aes_gcm.ib_flags & 1673 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) && 1674 attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 1675 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)) 1676 return -EINVAL; 1677 1678 memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs, 1679 sizeof(accel_attrs)); 1680 1681 accel_attrs.esn = attr->esn; 1682 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 1683 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 1684 else 1685 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 1686 1687 err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx, 1688 &accel_attrs); 1689 if (err) 1690 return err; 1691 1692 maction->esp_aes_gcm.ib_flags &= 1693 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 1694 maction->esp_aes_gcm.ib_flags |= 1695 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 1696 1697 return 0; 1698 } 1699 1700 static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction) 1701 { 1702 switch (maction->flow_action_raw.sub_type) { 1703 case MLX5_IB_FLOW_ACTION_MODIFY_HEADER: 1704 mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev, 1705 maction->flow_action_raw.modify_hdr); 1706 break; 1707 case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT: 1708 mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev, 1709 maction->flow_action_raw.pkt_reformat); 1710 break; 1711 case MLX5_IB_FLOW_ACTION_DECAP: 1712 break; 1713 default: 1714 break; 1715 } 1716 } 1717 1718 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action) 1719 { 1720 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 1721 1722 switch (action->type) { 1723 case IB_FLOW_ACTION_ESP: 1724 /* 1725 * We only support aes_gcm by now, so we implicitly know this is 1726 * the underline crypto. 1727 */ 1728 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx); 1729 break; 1730 case IB_FLOW_ACTION_UNSPECIFIED: 1731 destroy_flow_action_raw(maction); 1732 break; 1733 default: 1734 WARN_ON(true); 1735 break; 1736 } 1737 1738 kfree(maction); 1739 return 0; 1740 } 1741 1742 static int 1743 mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type, 1744 enum mlx5_flow_namespace_type *namespace) 1745 { 1746 switch (table_type) { 1747 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX: 1748 *namespace = MLX5_FLOW_NAMESPACE_BYPASS; 1749 break; 1750 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX: 1751 *namespace = MLX5_FLOW_NAMESPACE_EGRESS; 1752 break; 1753 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB: 1754 *namespace = MLX5_FLOW_NAMESPACE_FDB; 1755 break; 1756 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX: 1757 *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX; 1758 break; 1759 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX: 1760 *namespace = MLX5_FLOW_NAMESPACE_RDMA_TX; 1761 break; 1762 default: 1763 return -EINVAL; 1764 } 1765 1766 return 0; 1767 } 1768 1769 static const struct uverbs_attr_spec mlx5_ib_flow_type[] = { 1770 [MLX5_IB_FLOW_TYPE_NORMAL] = { 1771 .type = UVERBS_ATTR_TYPE_PTR_IN, 1772 .u.ptr = { 1773 .len = sizeof(u16), /* data is priority */ 1774 .min_len = sizeof(u16), 1775 } 1776 }, 1777 [MLX5_IB_FLOW_TYPE_SNIFFER] = { 1778 .type = UVERBS_ATTR_TYPE_PTR_IN, 1779 UVERBS_ATTR_NO_DATA(), 1780 }, 1781 [MLX5_IB_FLOW_TYPE_ALL_DEFAULT] = { 1782 .type = UVERBS_ATTR_TYPE_PTR_IN, 1783 UVERBS_ATTR_NO_DATA(), 1784 }, 1785 [MLX5_IB_FLOW_TYPE_MC_DEFAULT] = { 1786 .type = UVERBS_ATTR_TYPE_PTR_IN, 1787 UVERBS_ATTR_NO_DATA(), 1788 }, 1789 }; 1790 1791 static bool is_flow_dest(void *obj, int *dest_id, int *dest_type) 1792 { 1793 struct devx_obj *devx_obj = obj; 1794 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode); 1795 1796 switch (opcode) { 1797 case MLX5_CMD_OP_DESTROY_TIR: 1798 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1799 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, 1800 obj_id); 1801 return true; 1802 1803 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 1804 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1805 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox, 1806 table_id); 1807 return true; 1808 default: 1809 return false; 1810 } 1811 } 1812 1813 static int get_dests(struct uverbs_attr_bundle *attrs, 1814 struct mlx5_ib_flow_matcher *fs_matcher, int *dest_id, 1815 int *dest_type, struct ib_qp **qp, u32 *flags) 1816 { 1817 bool dest_devx, dest_qp; 1818 void *devx_obj; 1819 int err; 1820 1821 dest_devx = uverbs_attr_is_valid(attrs, 1822 MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX); 1823 dest_qp = uverbs_attr_is_valid(attrs, 1824 MLX5_IB_ATTR_CREATE_FLOW_DEST_QP); 1825 1826 *flags = 0; 1827 err = uverbs_get_flags32(flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_FLAGS, 1828 MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS | 1829 MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP); 1830 if (err) 1831 return err; 1832 1833 /* Both flags are not allowed */ 1834 if (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS && 1835 *flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP) 1836 return -EINVAL; 1837 1838 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) { 1839 if (dest_devx && (dest_qp || *flags)) 1840 return -EINVAL; 1841 else if (dest_qp && *flags) 1842 return -EINVAL; 1843 } 1844 1845 /* Allow only DEVX object, drop as dest for FDB */ 1846 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !(dest_devx || 1847 (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP))) 1848 return -EINVAL; 1849 1850 /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */ 1851 if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) && 1852 ((!dest_devx && !dest_qp) || (dest_devx && dest_qp))) 1853 return -EINVAL; 1854 1855 *qp = NULL; 1856 if (dest_devx) { 1857 devx_obj = 1858 uverbs_attr_get_obj(attrs, 1859 MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX); 1860 1861 /* Verify that the given DEVX object is a flow 1862 * steering destination. 1863 */ 1864 if (!is_flow_dest(devx_obj, dest_id, dest_type)) 1865 return -EINVAL; 1866 /* Allow only flow table as dest when inserting to FDB or RDMA_RX */ 1867 if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB || 1868 fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) && 1869 *dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) 1870 return -EINVAL; 1871 } else if (dest_qp) { 1872 struct mlx5_ib_qp *mqp; 1873 1874 *qp = uverbs_attr_get_obj(attrs, 1875 MLX5_IB_ATTR_CREATE_FLOW_DEST_QP); 1876 if (IS_ERR(*qp)) 1877 return PTR_ERR(*qp); 1878 1879 if ((*qp)->qp_type != IB_QPT_RAW_PACKET) 1880 return -EINVAL; 1881 1882 mqp = to_mqp(*qp); 1883 if (mqp->is_rss) 1884 *dest_id = mqp->rss_qp.tirn; 1885 else 1886 *dest_id = mqp->raw_packet_qp.rq.tirn; 1887 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1888 } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS || 1889 fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) { 1890 *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT; 1891 } 1892 1893 if (*dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR && 1894 (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS || 1895 fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX)) 1896 return -EINVAL; 1897 1898 return 0; 1899 } 1900 1901 static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id) 1902 { 1903 struct devx_obj *devx_obj = obj; 1904 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode); 1905 1906 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) { 1907 1908 if (offset && offset >= devx_obj->flow_counter_bulk_size) 1909 return false; 1910 1911 *counter_id = MLX5_GET(dealloc_flow_counter_in, 1912 devx_obj->dinbox, 1913 flow_counter_id); 1914 *counter_id += offset; 1915 return true; 1916 } 1917 1918 return false; 1919 } 1920 1921 #define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2 1922 static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( 1923 struct uverbs_attr_bundle *attrs) 1924 { 1925 struct mlx5_flow_context flow_context = {.flow_tag = 1926 MLX5_FS_DEFAULT_FLOW_TAG}; 1927 u32 *offset_attr, offset = 0, counter_id = 0; 1928 int dest_id, dest_type = -1, inlen, len, ret, i; 1929 struct mlx5_ib_flow_handler *flow_handler; 1930 struct mlx5_ib_flow_matcher *fs_matcher; 1931 struct ib_uobject **arr_flow_actions; 1932 struct ib_uflow_resources *uflow_res; 1933 struct mlx5_flow_act flow_act = {}; 1934 struct ib_qp *qp = NULL; 1935 void *devx_obj, *cmd_in; 1936 struct ib_uobject *uobj; 1937 struct mlx5_ib_dev *dev; 1938 u32 flags; 1939 1940 if (!capable(CAP_NET_RAW)) 1941 return -EPERM; 1942 1943 fs_matcher = uverbs_attr_get_obj(attrs, 1944 MLX5_IB_ATTR_CREATE_FLOW_MATCHER); 1945 uobj = uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE); 1946 dev = mlx5_udata_to_mdev(&attrs->driver_udata); 1947 1948 if (get_dests(attrs, fs_matcher, &dest_id, &dest_type, &qp, &flags)) 1949 return -EINVAL; 1950 1951 if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS) 1952 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS; 1953 1954 if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP) 1955 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 1956 1957 len = uverbs_attr_get_uobjs_arr(attrs, 1958 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions); 1959 if (len) { 1960 devx_obj = arr_flow_actions[0]->object; 1961 1962 if (uverbs_attr_is_valid(attrs, 1963 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET)) { 1964 1965 int num_offsets = uverbs_attr_ptr_get_array_size( 1966 attrs, 1967 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET, 1968 sizeof(u32)); 1969 1970 if (num_offsets != 1) 1971 return -EINVAL; 1972 1973 offset_attr = uverbs_attr_get_alloced_ptr( 1974 attrs, 1975 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET); 1976 offset = *offset_attr; 1977 } 1978 1979 if (!is_flow_counter(devx_obj, offset, &counter_id)) 1980 return -EINVAL; 1981 1982 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 1983 } 1984 1985 cmd_in = uverbs_attr_get_alloced_ptr( 1986 attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE); 1987 inlen = uverbs_attr_get_len(attrs, 1988 MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE); 1989 1990 uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS); 1991 if (!uflow_res) 1992 return -ENOMEM; 1993 1994 len = uverbs_attr_get_uobjs_arr(attrs, 1995 MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions); 1996 for (i = 0; i < len; i++) { 1997 struct mlx5_ib_flow_action *maction = 1998 to_mflow_act(arr_flow_actions[i]->object); 1999 2000 ret = parse_flow_flow_action(maction, false, &flow_act); 2001 if (ret) 2002 goto err_out; 2003 flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_HANDLE, 2004 arr_flow_actions[i]->object); 2005 } 2006 2007 ret = uverbs_copy_from(&flow_context.flow_tag, attrs, 2008 MLX5_IB_ATTR_CREATE_FLOW_TAG); 2009 if (!ret) { 2010 if (flow_context.flow_tag >= BIT(24)) { 2011 ret = -EINVAL; 2012 goto err_out; 2013 } 2014 flow_context.flags |= FLOW_CONTEXT_HAS_TAG; 2015 } 2016 2017 flow_handler = 2018 raw_fs_rule_add(dev, fs_matcher, &flow_context, &flow_act, 2019 counter_id, cmd_in, inlen, dest_id, dest_type); 2020 if (IS_ERR(flow_handler)) { 2021 ret = PTR_ERR(flow_handler); 2022 goto err_out; 2023 } 2024 2025 ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev, uflow_res); 2026 2027 return 0; 2028 err_out: 2029 ib_uverbs_flow_resources_free(uflow_res); 2030 return ret; 2031 } 2032 2033 static int flow_matcher_cleanup(struct ib_uobject *uobject, 2034 enum rdma_remove_reason why, 2035 struct uverbs_attr_bundle *attrs) 2036 { 2037 struct mlx5_ib_flow_matcher *obj = uobject->object; 2038 int ret; 2039 2040 ret = ib_destroy_usecnt(&obj->usecnt, why, uobject); 2041 if (ret) 2042 return ret; 2043 2044 kfree(obj); 2045 return 0; 2046 } 2047 2048 static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs, 2049 struct mlx5_ib_flow_matcher *obj) 2050 { 2051 enum mlx5_ib_uapi_flow_table_type ft_type = 2052 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX; 2053 u32 flags; 2054 int err; 2055 2056 /* New users should use MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE and older 2057 * users should switch to it. We leave this to not break userspace 2058 */ 2059 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE) && 2060 uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS)) 2061 return -EINVAL; 2062 2063 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE)) { 2064 err = uverbs_get_const(&ft_type, attrs, 2065 MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE); 2066 if (err) 2067 return err; 2068 2069 err = mlx5_ib_ft_type_to_namespace(ft_type, &obj->ns_type); 2070 if (err) 2071 return err; 2072 2073 return 0; 2074 } 2075 2076 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS)) { 2077 err = uverbs_get_flags32(&flags, attrs, 2078 MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS, 2079 IB_FLOW_ATTR_FLAGS_EGRESS); 2080 if (err) 2081 return err; 2082 2083 if (flags) { 2084 mlx5_ib_ft_type_to_namespace( 2085 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX, 2086 &obj->ns_type); 2087 return 0; 2088 } 2089 } 2090 2091 obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS; 2092 2093 return 0; 2094 } 2095 2096 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)( 2097 struct uverbs_attr_bundle *attrs) 2098 { 2099 struct ib_uobject *uobj = uverbs_attr_get_uobject( 2100 attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE); 2101 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata); 2102 struct mlx5_ib_flow_matcher *obj; 2103 int err; 2104 2105 obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL); 2106 if (!obj) 2107 return -ENOMEM; 2108 2109 obj->mask_len = uverbs_attr_get_len( 2110 attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK); 2111 err = uverbs_copy_from(&obj->matcher_mask, 2112 attrs, 2113 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK); 2114 if (err) 2115 goto end; 2116 2117 obj->flow_type = uverbs_attr_get_enum_id( 2118 attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE); 2119 2120 if (obj->flow_type == MLX5_IB_FLOW_TYPE_NORMAL) { 2121 err = uverbs_copy_from(&obj->priority, 2122 attrs, 2123 MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE); 2124 if (err) 2125 goto end; 2126 } 2127 2128 err = uverbs_copy_from(&obj->match_criteria_enable, 2129 attrs, 2130 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA); 2131 if (err) 2132 goto end; 2133 2134 err = mlx5_ib_matcher_ns(attrs, obj); 2135 if (err) 2136 goto end; 2137 2138 uobj->object = obj; 2139 obj->mdev = dev->mdev; 2140 atomic_set(&obj->usecnt, 0); 2141 return 0; 2142 2143 end: 2144 kfree(obj); 2145 return err; 2146 } 2147 2148 static struct ib_flow_action * 2149 mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev, 2150 enum mlx5_ib_uapi_flow_table_type ft_type, 2151 u8 num_actions, void *in) 2152 { 2153 enum mlx5_flow_namespace_type namespace; 2154 struct mlx5_ib_flow_action *maction; 2155 int ret; 2156 2157 ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace); 2158 if (ret) 2159 return ERR_PTR(-EINVAL); 2160 2161 maction = kzalloc(sizeof(*maction), GFP_KERNEL); 2162 if (!maction) 2163 return ERR_PTR(-ENOMEM); 2164 2165 maction->flow_action_raw.modify_hdr = 2166 mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in); 2167 2168 if (IS_ERR(maction->flow_action_raw.modify_hdr)) { 2169 ret = PTR_ERR(maction->flow_action_raw.modify_hdr); 2170 kfree(maction); 2171 return ERR_PTR(ret); 2172 } 2173 maction->flow_action_raw.sub_type = 2174 MLX5_IB_FLOW_ACTION_MODIFY_HEADER; 2175 maction->flow_action_raw.dev = dev; 2176 2177 return &maction->ib_action; 2178 } 2179 2180 static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev) 2181 { 2182 return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 2183 max_modify_header_actions) || 2184 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, 2185 max_modify_header_actions) || 2186 MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, 2187 max_modify_header_actions); 2188 } 2189 2190 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)( 2191 struct uverbs_attr_bundle *attrs) 2192 { 2193 struct ib_uobject *uobj = uverbs_attr_get_uobject( 2194 attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE); 2195 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata); 2196 enum mlx5_ib_uapi_flow_table_type ft_type; 2197 struct ib_flow_action *action; 2198 int num_actions; 2199 void *in; 2200 int ret; 2201 2202 if (!mlx5_ib_modify_header_supported(mdev)) 2203 return -EOPNOTSUPP; 2204 2205 in = uverbs_attr_get_alloced_ptr(attrs, 2206 MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM); 2207 2208 num_actions = uverbs_attr_ptr_get_array_size( 2209 attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM, 2210 MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)); 2211 if (num_actions < 0) 2212 return num_actions; 2213 2214 ret = uverbs_get_const(&ft_type, attrs, 2215 MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE); 2216 if (ret) 2217 return ret; 2218 action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in); 2219 if (IS_ERR(action)) 2220 return PTR_ERR(action); 2221 2222 uverbs_flow_action_fill_action(action, uobj, &mdev->ib_dev, 2223 IB_FLOW_ACTION_UNSPECIFIED); 2224 2225 return 0; 2226 } 2227 2228 static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev *ibdev, 2229 u8 packet_reformat_type, 2230 u8 ft_type) 2231 { 2232 switch (packet_reformat_type) { 2233 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL: 2234 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX) 2235 return MLX5_CAP_FLOWTABLE(ibdev->mdev, 2236 encap_general_header); 2237 break; 2238 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL: 2239 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX) 2240 return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev, 2241 reformat_l2_to_l3_tunnel); 2242 break; 2243 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2: 2244 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX) 2245 return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, 2246 reformat_l3_tunnel_to_l2); 2247 break; 2248 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2: 2249 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX) 2250 return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap); 2251 break; 2252 default: 2253 break; 2254 } 2255 2256 return false; 2257 } 2258 2259 static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt, u8 *prm_prt) 2260 { 2261 switch (dv_prt) { 2262 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL: 2263 *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL; 2264 break; 2265 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2: 2266 *prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; 2267 break; 2268 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL: 2269 *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL; 2270 break; 2271 default: 2272 return -EINVAL; 2273 } 2274 2275 return 0; 2276 } 2277 2278 static int mlx5_ib_flow_action_create_packet_reformat_ctx( 2279 struct mlx5_ib_dev *dev, 2280 struct mlx5_ib_flow_action *maction, 2281 u8 ft_type, u8 dv_prt, 2282 void *in, size_t len) 2283 { 2284 enum mlx5_flow_namespace_type namespace; 2285 u8 prm_prt; 2286 int ret; 2287 2288 ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace); 2289 if (ret) 2290 return ret; 2291 2292 ret = mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt, &prm_prt); 2293 if (ret) 2294 return ret; 2295 2296 maction->flow_action_raw.pkt_reformat = 2297 mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len, 2298 in, namespace); 2299 if (IS_ERR(maction->flow_action_raw.pkt_reformat)) { 2300 ret = PTR_ERR(maction->flow_action_raw.pkt_reformat); 2301 return ret; 2302 } 2303 2304 maction->flow_action_raw.sub_type = 2305 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT; 2306 maction->flow_action_raw.dev = dev; 2307 2308 return 0; 2309 } 2310 2311 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)( 2312 struct uverbs_attr_bundle *attrs) 2313 { 2314 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, 2315 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE); 2316 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata); 2317 enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt; 2318 enum mlx5_ib_uapi_flow_table_type ft_type; 2319 struct mlx5_ib_flow_action *maction; 2320 int ret; 2321 2322 ret = uverbs_get_const(&ft_type, attrs, 2323 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE); 2324 if (ret) 2325 return ret; 2326 2327 ret = uverbs_get_const(&dv_prt, attrs, 2328 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE); 2329 if (ret) 2330 return ret; 2331 2332 if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type)) 2333 return -EOPNOTSUPP; 2334 2335 maction = kzalloc(sizeof(*maction), GFP_KERNEL); 2336 if (!maction) 2337 return -ENOMEM; 2338 2339 if (dv_prt == 2340 MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2) { 2341 maction->flow_action_raw.sub_type = 2342 MLX5_IB_FLOW_ACTION_DECAP; 2343 maction->flow_action_raw.dev = mdev; 2344 } else { 2345 void *in; 2346 int len; 2347 2348 in = uverbs_attr_get_alloced_ptr(attrs, 2349 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF); 2350 if (IS_ERR(in)) { 2351 ret = PTR_ERR(in); 2352 goto free_maction; 2353 } 2354 2355 len = uverbs_attr_get_len(attrs, 2356 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF); 2357 2358 ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev, 2359 maction, ft_type, dv_prt, in, len); 2360 if (ret) 2361 goto free_maction; 2362 } 2363 2364 uverbs_flow_action_fill_action(&maction->ib_action, uobj, &mdev->ib_dev, 2365 IB_FLOW_ACTION_UNSPECIFIED); 2366 return 0; 2367 2368 free_maction: 2369 kfree(maction); 2370 return ret; 2371 } 2372 2373 DECLARE_UVERBS_NAMED_METHOD( 2374 MLX5_IB_METHOD_CREATE_FLOW, 2375 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE, 2376 UVERBS_OBJECT_FLOW, 2377 UVERBS_ACCESS_NEW, 2378 UA_MANDATORY), 2379 UVERBS_ATTR_PTR_IN( 2380 MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE, 2381 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)), 2382 UA_MANDATORY, 2383 UA_ALLOC_AND_COPY), 2384 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER, 2385 MLX5_IB_OBJECT_FLOW_MATCHER, 2386 UVERBS_ACCESS_READ, 2387 UA_MANDATORY), 2388 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP, 2389 UVERBS_OBJECT_QP, 2390 UVERBS_ACCESS_READ), 2391 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX, 2392 MLX5_IB_OBJECT_DEVX_OBJ, 2393 UVERBS_ACCESS_READ), 2394 UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, 2395 UVERBS_OBJECT_FLOW_ACTION, 2396 UVERBS_ACCESS_READ, 1, 2397 MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS, 2398 UA_OPTIONAL), 2399 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_TAG, 2400 UVERBS_ATTR_TYPE(u32), 2401 UA_OPTIONAL), 2402 UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, 2403 MLX5_IB_OBJECT_DEVX_OBJ, 2404 UVERBS_ACCESS_READ, 1, 1, 2405 UA_OPTIONAL), 2406 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET, 2407 UVERBS_ATTR_MIN_SIZE(sizeof(u32)), 2408 UA_OPTIONAL, 2409 UA_ALLOC_AND_COPY), 2410 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_FLAGS, 2411 enum mlx5_ib_create_flow_flags, 2412 UA_OPTIONAL)); 2413 2414 DECLARE_UVERBS_NAMED_METHOD_DESTROY( 2415 MLX5_IB_METHOD_DESTROY_FLOW, 2416 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE, 2417 UVERBS_OBJECT_FLOW, 2418 UVERBS_ACCESS_DESTROY, 2419 UA_MANDATORY)); 2420 2421 ADD_UVERBS_METHODS(mlx5_ib_fs, 2422 UVERBS_OBJECT_FLOW, 2423 &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW), 2424 &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW)); 2425 2426 DECLARE_UVERBS_NAMED_METHOD( 2427 MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER, 2428 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE, 2429 UVERBS_OBJECT_FLOW_ACTION, 2430 UVERBS_ACCESS_NEW, 2431 UA_MANDATORY), 2432 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM, 2433 UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES( 2434 set_add_copy_action_in_auto)), 2435 UA_MANDATORY, 2436 UA_ALLOC_AND_COPY), 2437 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE, 2438 enum mlx5_ib_uapi_flow_table_type, 2439 UA_MANDATORY)); 2440 2441 DECLARE_UVERBS_NAMED_METHOD( 2442 MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT, 2443 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE, 2444 UVERBS_OBJECT_FLOW_ACTION, 2445 UVERBS_ACCESS_NEW, 2446 UA_MANDATORY), 2447 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF, 2448 UVERBS_ATTR_MIN_SIZE(1), 2449 UA_ALLOC_AND_COPY, 2450 UA_OPTIONAL), 2451 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE, 2452 enum mlx5_ib_uapi_flow_action_packet_reformat_type, 2453 UA_MANDATORY), 2454 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE, 2455 enum mlx5_ib_uapi_flow_table_type, 2456 UA_MANDATORY)); 2457 2458 ADD_UVERBS_METHODS( 2459 mlx5_ib_flow_actions, 2460 UVERBS_OBJECT_FLOW_ACTION, 2461 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER), 2462 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)); 2463 2464 DECLARE_UVERBS_NAMED_METHOD( 2465 MLX5_IB_METHOD_FLOW_MATCHER_CREATE, 2466 UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE, 2467 MLX5_IB_OBJECT_FLOW_MATCHER, 2468 UVERBS_ACCESS_NEW, 2469 UA_MANDATORY), 2470 UVERBS_ATTR_PTR_IN( 2471 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK, 2472 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)), 2473 UA_MANDATORY), 2474 UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE, 2475 mlx5_ib_flow_type, 2476 UA_MANDATORY), 2477 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA, 2478 UVERBS_ATTR_TYPE(u8), 2479 UA_MANDATORY), 2480 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS, 2481 enum ib_flow_flags, 2482 UA_OPTIONAL), 2483 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE, 2484 enum mlx5_ib_uapi_flow_table_type, 2485 UA_OPTIONAL)); 2486 2487 DECLARE_UVERBS_NAMED_METHOD_DESTROY( 2488 MLX5_IB_METHOD_FLOW_MATCHER_DESTROY, 2489 UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE, 2490 MLX5_IB_OBJECT_FLOW_MATCHER, 2491 UVERBS_ACCESS_DESTROY, 2492 UA_MANDATORY)); 2493 2494 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER, 2495 UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup), 2496 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE), 2497 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY)); 2498 2499 const struct uapi_definition mlx5_ib_flow_defs[] = { 2500 UAPI_DEF_CHAIN_OBJ_TREE_NAMED( 2501 MLX5_IB_OBJECT_FLOW_MATCHER), 2502 UAPI_DEF_CHAIN_OBJ_TREE( 2503 UVERBS_OBJECT_FLOW, 2504 &mlx5_ib_fs), 2505 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 2506 &mlx5_ib_flow_actions), 2507 {}, 2508 }; 2509 2510 static const struct ib_device_ops flow_ops = { 2511 .create_flow = mlx5_ib_create_flow, 2512 .destroy_flow = mlx5_ib_destroy_flow, 2513 .destroy_flow_action = mlx5_ib_destroy_flow_action, 2514 }; 2515 2516 static const struct ib_device_ops flow_ipsec_ops = { 2517 .create_flow_action_esp = mlx5_ib_create_flow_action_esp, 2518 .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp, 2519 }; 2520 2521 int mlx5_ib_fs_init(struct mlx5_ib_dev *dev) 2522 { 2523 dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL); 2524 2525 if (!dev->flow_db) 2526 return -ENOMEM; 2527 2528 mutex_init(&dev->flow_db->lock); 2529 2530 ib_set_device_ops(&dev->ib_dev, &flow_ops); 2531 if (mlx5_accel_ipsec_device_caps(dev->mdev) & 2532 MLX5_ACCEL_IPSEC_CAP_DEVICE) 2533 ib_set_device_ops(&dev->ib_dev, &flow_ipsec_ops); 2534 2535 return 0; 2536 } 2537