1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. 4 */ 5 6 #include <rdma/ib_user_verbs.h> 7 #include <rdma/ib_verbs.h> 8 #include <rdma/uverbs_types.h> 9 #include <rdma/uverbs_ioctl.h> 10 #include <rdma/uverbs_std_types.h> 11 #include <rdma/mlx5_user_ioctl_cmds.h> 12 #include <rdma/mlx5_user_ioctl_verbs.h> 13 #include <rdma/ib_umem.h> 14 #include <linux/mlx5/driver.h> 15 #include <linux/mlx5/fs.h> 16 #include <linux/mlx5/fs_helpers.h> 17 #include <linux/mlx5/accel.h> 18 #include <linux/mlx5/eswitch.h> 19 #include "mlx5_ib.h" 20 #include "counters.h" 21 #include "devx.h" 22 #include "fs.h" 23 24 #define UVERBS_MODULE_NAME mlx5_ib 25 #include <rdma/uverbs_named_ioctl.h> 26 27 enum { 28 MATCH_CRITERIA_ENABLE_OUTER_BIT, 29 MATCH_CRITERIA_ENABLE_MISC_BIT, 30 MATCH_CRITERIA_ENABLE_INNER_BIT, 31 MATCH_CRITERIA_ENABLE_MISC2_BIT 32 }; 33 34 #define HEADER_IS_ZERO(match_criteria, headers) \ 35 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 36 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 37 38 static u8 get_match_criteria_enable(u32 *match_criteria) 39 { 40 u8 match_criteria_enable; 41 42 match_criteria_enable = 43 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 44 MATCH_CRITERIA_ENABLE_OUTER_BIT; 45 match_criteria_enable |= 46 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 47 MATCH_CRITERIA_ENABLE_MISC_BIT; 48 match_criteria_enable |= 49 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 50 MATCH_CRITERIA_ENABLE_INNER_BIT; 51 match_criteria_enable |= 52 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) << 53 MATCH_CRITERIA_ENABLE_MISC2_BIT; 54 55 return match_criteria_enable; 56 } 57 58 static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) 59 { 60 u8 entry_mask; 61 u8 entry_val; 62 int err = 0; 63 64 if (!mask) 65 goto out; 66 67 entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c, 68 ip_protocol); 69 entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v, 70 ip_protocol); 71 if (!entry_mask) { 72 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); 73 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 74 goto out; 75 } 76 /* Don't override existing ip protocol */ 77 if (mask != entry_mask || val != entry_val) 78 err = -EINVAL; 79 out: 80 return err; 81 } 82 83 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val, 84 bool inner) 85 { 86 if (inner) { 87 MLX5_SET(fte_match_set_misc, 88 misc_c, inner_ipv6_flow_label, mask); 89 MLX5_SET(fte_match_set_misc, 90 misc_v, inner_ipv6_flow_label, val); 91 } else { 92 MLX5_SET(fte_match_set_misc, 93 misc_c, outer_ipv6_flow_label, mask); 94 MLX5_SET(fte_match_set_misc, 95 misc_v, outer_ipv6_flow_label, val); 96 } 97 } 98 99 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) 100 { 101 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask); 102 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val); 103 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2); 104 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2); 105 } 106 107 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask) 108 { 109 if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) && 110 !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL)) 111 return -EOPNOTSUPP; 112 113 if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) && 114 !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP)) 115 return -EOPNOTSUPP; 116 117 if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) && 118 !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS)) 119 return -EOPNOTSUPP; 120 121 if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) && 122 !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL)) 123 return -EOPNOTSUPP; 124 125 return 0; 126 } 127 128 #define LAST_ETH_FIELD vlan_tag 129 #define LAST_IB_FIELD sl 130 #define LAST_IPV4_FIELD tos 131 #define LAST_IPV6_FIELD traffic_class 132 #define LAST_TCP_UDP_FIELD src_port 133 #define LAST_TUNNEL_FIELD tunnel_id 134 #define LAST_FLOW_TAG_FIELD tag_id 135 #define LAST_DROP_FIELD size 136 #define LAST_COUNTERS_FIELD counters 137 138 /* Field is the last supported field */ 139 #define FIELDS_NOT_SUPPORTED(filter, field) \ 140 memchr_inv((void *)&filter.field + sizeof(filter.field), 0, \ 141 sizeof(filter) - offsetofend(typeof(filter), field)) 142 143 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, 144 bool is_egress, 145 struct mlx5_flow_act *action) 146 { 147 148 switch (maction->ib_action.type) { 149 case IB_FLOW_ACTION_ESP: 150 if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 151 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)) 152 return -EINVAL; 153 /* Currently only AES_GCM keymat is supported by the driver */ 154 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx; 155 action->action |= is_egress ? 156 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT : 157 MLX5_FLOW_CONTEXT_ACTION_DECRYPT; 158 return 0; 159 case IB_FLOW_ACTION_UNSPECIFIED: 160 if (maction->flow_action_raw.sub_type == 161 MLX5_IB_FLOW_ACTION_MODIFY_HEADER) { 162 if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 163 return -EINVAL; 164 action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 165 action->modify_hdr = 166 maction->flow_action_raw.modify_hdr; 167 return 0; 168 } 169 if (maction->flow_action_raw.sub_type == 170 MLX5_IB_FLOW_ACTION_DECAP) { 171 if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) 172 return -EINVAL; 173 action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 174 return 0; 175 } 176 if (maction->flow_action_raw.sub_type == 177 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) { 178 if (action->action & 179 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) 180 return -EINVAL; 181 action->action |= 182 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 183 action->pkt_reformat = 184 maction->flow_action_raw.pkt_reformat; 185 return 0; 186 } 187 fallthrough; 188 default: 189 return -EOPNOTSUPP; 190 } 191 } 192 193 static int parse_flow_attr(struct mlx5_core_dev *mdev, 194 struct mlx5_flow_spec *spec, 195 const union ib_flow_spec *ib_spec, 196 const struct ib_flow_attr *flow_attr, 197 struct mlx5_flow_act *action, u32 prev_type) 198 { 199 struct mlx5_flow_context *flow_context = &spec->flow_context; 200 u32 *match_c = spec->match_criteria; 201 u32 *match_v = spec->match_value; 202 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, 203 misc_parameters); 204 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, 205 misc_parameters); 206 void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c, 207 misc_parameters_2); 208 void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v, 209 misc_parameters_2); 210 void *headers_c; 211 void *headers_v; 212 int match_ipv; 213 int ret; 214 215 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 216 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 217 inner_headers); 218 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 219 inner_headers); 220 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 221 ft_field_support.inner_ip_version); 222 } else { 223 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 224 outer_headers); 225 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 226 outer_headers); 227 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 228 ft_field_support.outer_ip_version); 229 } 230 231 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 232 case IB_FLOW_SPEC_ETH: 233 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) 234 return -EOPNOTSUPP; 235 236 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 237 dmac_47_16), 238 ib_spec->eth.mask.dst_mac); 239 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 240 dmac_47_16), 241 ib_spec->eth.val.dst_mac); 242 243 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 244 smac_47_16), 245 ib_spec->eth.mask.src_mac); 246 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 247 smac_47_16), 248 ib_spec->eth.val.src_mac); 249 250 if (ib_spec->eth.mask.vlan_tag) { 251 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 252 cvlan_tag, 1); 253 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 254 cvlan_tag, 1); 255 256 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 257 first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); 258 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 259 first_vid, ntohs(ib_spec->eth.val.vlan_tag)); 260 261 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 262 first_cfi, 263 ntohs(ib_spec->eth.mask.vlan_tag) >> 12); 264 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 265 first_cfi, 266 ntohs(ib_spec->eth.val.vlan_tag) >> 12); 267 268 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 269 first_prio, 270 ntohs(ib_spec->eth.mask.vlan_tag) >> 13); 271 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 272 first_prio, 273 ntohs(ib_spec->eth.val.vlan_tag) >> 13); 274 } 275 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 276 ethertype, ntohs(ib_spec->eth.mask.ether_type)); 277 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 278 ethertype, ntohs(ib_spec->eth.val.ether_type)); 279 break; 280 case IB_FLOW_SPEC_IPV4: 281 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) 282 return -EOPNOTSUPP; 283 284 if (match_ipv) { 285 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 286 ip_version, 0xf); 287 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 288 ip_version, MLX5_FS_IPV4_VERSION); 289 } else { 290 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 291 ethertype, 0xffff); 292 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 293 ethertype, ETH_P_IP); 294 } 295 296 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 297 src_ipv4_src_ipv6.ipv4_layout.ipv4), 298 &ib_spec->ipv4.mask.src_ip, 299 sizeof(ib_spec->ipv4.mask.src_ip)); 300 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 301 src_ipv4_src_ipv6.ipv4_layout.ipv4), 302 &ib_spec->ipv4.val.src_ip, 303 sizeof(ib_spec->ipv4.val.src_ip)); 304 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 305 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 306 &ib_spec->ipv4.mask.dst_ip, 307 sizeof(ib_spec->ipv4.mask.dst_ip)); 308 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 309 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 310 &ib_spec->ipv4.val.dst_ip, 311 sizeof(ib_spec->ipv4.val.dst_ip)); 312 313 set_tos(headers_c, headers_v, 314 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); 315 316 if (set_proto(headers_c, headers_v, 317 ib_spec->ipv4.mask.proto, 318 ib_spec->ipv4.val.proto)) 319 return -EINVAL; 320 break; 321 case IB_FLOW_SPEC_IPV6: 322 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) 323 return -EOPNOTSUPP; 324 325 if (match_ipv) { 326 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 327 ip_version, 0xf); 328 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 329 ip_version, MLX5_FS_IPV6_VERSION); 330 } else { 331 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 332 ethertype, 0xffff); 333 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 334 ethertype, ETH_P_IPV6); 335 } 336 337 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 338 src_ipv4_src_ipv6.ipv6_layout.ipv6), 339 &ib_spec->ipv6.mask.src_ip, 340 sizeof(ib_spec->ipv6.mask.src_ip)); 341 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 342 src_ipv4_src_ipv6.ipv6_layout.ipv6), 343 &ib_spec->ipv6.val.src_ip, 344 sizeof(ib_spec->ipv6.val.src_ip)); 345 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 346 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 347 &ib_spec->ipv6.mask.dst_ip, 348 sizeof(ib_spec->ipv6.mask.dst_ip)); 349 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 350 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 351 &ib_spec->ipv6.val.dst_ip, 352 sizeof(ib_spec->ipv6.val.dst_ip)); 353 354 set_tos(headers_c, headers_v, 355 ib_spec->ipv6.mask.traffic_class, 356 ib_spec->ipv6.val.traffic_class); 357 358 if (set_proto(headers_c, headers_v, 359 ib_spec->ipv6.mask.next_hdr, 360 ib_spec->ipv6.val.next_hdr)) 361 return -EINVAL; 362 363 set_flow_label(misc_params_c, misc_params_v, 364 ntohl(ib_spec->ipv6.mask.flow_label), 365 ntohl(ib_spec->ipv6.val.flow_label), 366 ib_spec->type & IB_FLOW_SPEC_INNER); 367 break; 368 case IB_FLOW_SPEC_ESP: 369 if (ib_spec->esp.mask.seq) 370 return -EOPNOTSUPP; 371 372 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 373 ntohl(ib_spec->esp.mask.spi)); 374 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 375 ntohl(ib_spec->esp.val.spi)); 376 break; 377 case IB_FLOW_SPEC_TCP: 378 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 379 LAST_TCP_UDP_FIELD)) 380 return -EOPNOTSUPP; 381 382 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP)) 383 return -EINVAL; 384 385 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport, 386 ntohs(ib_spec->tcp_udp.mask.src_port)); 387 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, 388 ntohs(ib_spec->tcp_udp.val.src_port)); 389 390 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport, 391 ntohs(ib_spec->tcp_udp.mask.dst_port)); 392 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport, 393 ntohs(ib_spec->tcp_udp.val.dst_port)); 394 break; 395 case IB_FLOW_SPEC_UDP: 396 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 397 LAST_TCP_UDP_FIELD)) 398 return -EOPNOTSUPP; 399 400 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP)) 401 return -EINVAL; 402 403 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, 404 ntohs(ib_spec->tcp_udp.mask.src_port)); 405 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, 406 ntohs(ib_spec->tcp_udp.val.src_port)); 407 408 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, 409 ntohs(ib_spec->tcp_udp.mask.dst_port)); 410 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 411 ntohs(ib_spec->tcp_udp.val.dst_port)); 412 break; 413 case IB_FLOW_SPEC_GRE: 414 if (ib_spec->gre.mask.c_ks_res0_ver) 415 return -EOPNOTSUPP; 416 417 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE)) 418 return -EINVAL; 419 420 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 421 0xff); 422 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 423 IPPROTO_GRE); 424 425 MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol, 426 ntohs(ib_spec->gre.mask.protocol)); 427 MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol, 428 ntohs(ib_spec->gre.val.protocol)); 429 430 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, 431 gre_key.nvgre.hi), 432 &ib_spec->gre.mask.key, 433 sizeof(ib_spec->gre.mask.key)); 434 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v, 435 gre_key.nvgre.hi), 436 &ib_spec->gre.val.key, 437 sizeof(ib_spec->gre.val.key)); 438 break; 439 case IB_FLOW_SPEC_MPLS: 440 switch (prev_type) { 441 case IB_FLOW_SPEC_UDP: 442 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 443 ft_field_support.outer_first_mpls_over_udp), 444 &ib_spec->mpls.mask.tag)) 445 return -EOPNOTSUPP; 446 447 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 448 outer_first_mpls_over_udp), 449 &ib_spec->mpls.val.tag, 450 sizeof(ib_spec->mpls.val.tag)); 451 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 452 outer_first_mpls_over_udp), 453 &ib_spec->mpls.mask.tag, 454 sizeof(ib_spec->mpls.mask.tag)); 455 break; 456 case IB_FLOW_SPEC_GRE: 457 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 458 ft_field_support.outer_first_mpls_over_gre), 459 &ib_spec->mpls.mask.tag)) 460 return -EOPNOTSUPP; 461 462 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 463 outer_first_mpls_over_gre), 464 &ib_spec->mpls.val.tag, 465 sizeof(ib_spec->mpls.val.tag)); 466 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 467 outer_first_mpls_over_gre), 468 &ib_spec->mpls.mask.tag, 469 sizeof(ib_spec->mpls.mask.tag)); 470 break; 471 default: 472 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 473 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 474 ft_field_support.inner_first_mpls), 475 &ib_spec->mpls.mask.tag)) 476 return -EOPNOTSUPP; 477 478 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 479 inner_first_mpls), 480 &ib_spec->mpls.val.tag, 481 sizeof(ib_spec->mpls.val.tag)); 482 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 483 inner_first_mpls), 484 &ib_spec->mpls.mask.tag, 485 sizeof(ib_spec->mpls.mask.tag)); 486 } else { 487 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 488 ft_field_support.outer_first_mpls), 489 &ib_spec->mpls.mask.tag)) 490 return -EOPNOTSUPP; 491 492 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 493 outer_first_mpls), 494 &ib_spec->mpls.val.tag, 495 sizeof(ib_spec->mpls.val.tag)); 496 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 497 outer_first_mpls), 498 &ib_spec->mpls.mask.tag, 499 sizeof(ib_spec->mpls.mask.tag)); 500 } 501 } 502 break; 503 case IB_FLOW_SPEC_VXLAN_TUNNEL: 504 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask, 505 LAST_TUNNEL_FIELD)) 506 return -EOPNOTSUPP; 507 508 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni, 509 ntohl(ib_spec->tunnel.mask.tunnel_id)); 510 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni, 511 ntohl(ib_spec->tunnel.val.tunnel_id)); 512 break; 513 case IB_FLOW_SPEC_ACTION_TAG: 514 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag, 515 LAST_FLOW_TAG_FIELD)) 516 return -EOPNOTSUPP; 517 if (ib_spec->flow_tag.tag_id >= BIT(24)) 518 return -EINVAL; 519 520 flow_context->flow_tag = ib_spec->flow_tag.tag_id; 521 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 522 break; 523 case IB_FLOW_SPEC_ACTION_DROP: 524 if (FIELDS_NOT_SUPPORTED(ib_spec->drop, 525 LAST_DROP_FIELD)) 526 return -EOPNOTSUPP; 527 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 528 break; 529 case IB_FLOW_SPEC_ACTION_HANDLE: 530 ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act), 531 flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action); 532 if (ret) 533 return ret; 534 break; 535 case IB_FLOW_SPEC_ACTION_COUNT: 536 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count, 537 LAST_COUNTERS_FIELD)) 538 return -EOPNOTSUPP; 539 540 /* for now support only one counters spec per flow */ 541 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 542 return -EINVAL; 543 544 action->counters = ib_spec->flow_count.counters; 545 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 546 break; 547 default: 548 return -EINVAL; 549 } 550 551 return 0; 552 } 553 554 /* If a flow could catch both multicast and unicast packets, 555 * it won't fall into the multicast flow steering table and this rule 556 * could steal other multicast packets. 557 */ 558 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr) 559 { 560 union ib_flow_spec *flow_spec; 561 562 if (ib_attr->type != IB_FLOW_ATTR_NORMAL || 563 ib_attr->num_of_specs < 1) 564 return false; 565 566 flow_spec = (union ib_flow_spec *)(ib_attr + 1); 567 if (flow_spec->type == IB_FLOW_SPEC_IPV4) { 568 struct ib_flow_spec_ipv4 *ipv4_spec; 569 570 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec; 571 if (ipv4_is_multicast(ipv4_spec->val.dst_ip)) 572 return true; 573 574 return false; 575 } 576 577 if (flow_spec->type == IB_FLOW_SPEC_ETH) { 578 struct ib_flow_spec_eth *eth_spec; 579 580 eth_spec = (struct ib_flow_spec_eth *)flow_spec; 581 return is_multicast_ether_addr(eth_spec->mask.dst_mac) && 582 is_multicast_ether_addr(eth_spec->val.dst_mac); 583 } 584 585 return false; 586 } 587 588 enum valid_spec { 589 VALID_SPEC_INVALID, 590 VALID_SPEC_VALID, 591 VALID_SPEC_NA, 592 }; 593 594 static enum valid_spec 595 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev, 596 const struct mlx5_flow_spec *spec, 597 const struct mlx5_flow_act *flow_act, 598 bool egress) 599 { 600 const u32 *match_c = spec->match_criteria; 601 bool is_crypto = 602 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 603 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)); 604 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c); 605 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP; 606 607 /* 608 * Currently only crypto is supported in egress, when regular egress 609 * rules would be supported, always return VALID_SPEC_NA. 610 */ 611 if (!is_crypto) 612 return VALID_SPEC_NA; 613 614 return is_crypto && is_ipsec && 615 (!egress || (!is_drop && 616 !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ? 617 VALID_SPEC_VALID : VALID_SPEC_INVALID; 618 } 619 620 static bool is_valid_spec(struct mlx5_core_dev *mdev, 621 const struct mlx5_flow_spec *spec, 622 const struct mlx5_flow_act *flow_act, 623 bool egress) 624 { 625 /* We curretly only support ipsec egress flow */ 626 return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID; 627 } 628 629 static bool is_valid_ethertype(struct mlx5_core_dev *mdev, 630 const struct ib_flow_attr *flow_attr, 631 bool check_inner) 632 { 633 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1); 634 int match_ipv = check_inner ? 635 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 636 ft_field_support.inner_ip_version) : 637 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 638 ft_field_support.outer_ip_version); 639 int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0; 640 bool ipv4_spec_valid, ipv6_spec_valid; 641 unsigned int ip_spec_type = 0; 642 bool has_ethertype = false; 643 unsigned int spec_index; 644 bool mask_valid = true; 645 u16 eth_type = 0; 646 bool type_valid; 647 648 /* Validate that ethertype is correct */ 649 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 650 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) && 651 ib_spec->eth.mask.ether_type) { 652 mask_valid = (ib_spec->eth.mask.ether_type == 653 htons(0xffff)); 654 has_ethertype = true; 655 eth_type = ntohs(ib_spec->eth.val.ether_type); 656 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) || 657 (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) { 658 ip_spec_type = ib_spec->type; 659 } 660 ib_spec = (void *)ib_spec + ib_spec->size; 661 } 662 663 type_valid = (!has_ethertype) || (!ip_spec_type); 664 if (!type_valid && mask_valid) { 665 ipv4_spec_valid = (eth_type == ETH_P_IP) && 666 (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit)); 667 ipv6_spec_valid = (eth_type == ETH_P_IPV6) && 668 (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit)); 669 670 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) || 671 (((eth_type == ETH_P_MPLS_UC) || 672 (eth_type == ETH_P_MPLS_MC)) && match_ipv); 673 } 674 675 return type_valid; 676 } 677 678 static bool is_valid_attr(struct mlx5_core_dev *mdev, 679 const struct ib_flow_attr *flow_attr) 680 { 681 return is_valid_ethertype(mdev, flow_attr, false) && 682 is_valid_ethertype(mdev, flow_attr, true); 683 } 684 685 static void put_flow_table(struct mlx5_ib_dev *dev, 686 struct mlx5_ib_flow_prio *prio, bool ft_added) 687 { 688 prio->refcount -= !!ft_added; 689 if (!prio->refcount) { 690 mlx5_destroy_flow_table(prio->flow_table); 691 prio->flow_table = NULL; 692 } 693 } 694 695 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) 696 { 697 struct mlx5_ib_flow_handler *handler = container_of(flow_id, 698 struct mlx5_ib_flow_handler, 699 ibflow); 700 struct mlx5_ib_flow_handler *iter, *tmp; 701 struct mlx5_ib_dev *dev = handler->dev; 702 703 mutex_lock(&dev->flow_db->lock); 704 705 list_for_each_entry_safe(iter, tmp, &handler->list, list) { 706 mlx5_del_flow_rules(iter->rule); 707 put_flow_table(dev, iter->prio, true); 708 list_del(&iter->list); 709 kfree(iter); 710 } 711 712 mlx5_del_flow_rules(handler->rule); 713 put_flow_table(dev, handler->prio, true); 714 mlx5_ib_counters_clear_description(handler->ibcounters); 715 mutex_unlock(&dev->flow_db->lock); 716 if (handler->flow_matcher) 717 atomic_dec(&handler->flow_matcher->usecnt); 718 kfree(handler); 719 720 return 0; 721 } 722 723 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap) 724 { 725 priority *= 2; 726 if (!dont_trap) 727 priority++; 728 return priority; 729 } 730 731 enum flow_table_type { 732 MLX5_IB_FT_RX, 733 MLX5_IB_FT_TX 734 }; 735 736 #define MLX5_FS_MAX_TYPES 6 737 #define MLX5_FS_MAX_ENTRIES BIT(16) 738 739 static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns, 740 struct mlx5_ib_flow_prio *prio, 741 int priority, 742 int num_entries, int num_groups, 743 u32 flags) 744 { 745 struct mlx5_flow_table_attr ft_attr = {}; 746 struct mlx5_flow_table *ft; 747 748 ft_attr.prio = priority; 749 ft_attr.max_fte = num_entries; 750 ft_attr.flags = flags; 751 ft_attr.autogroup.max_num_groups = num_groups; 752 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); 753 if (IS_ERR(ft)) 754 return ERR_CAST(ft); 755 756 prio->flow_table = ft; 757 prio->refcount = 0; 758 return prio; 759 } 760 761 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, 762 struct ib_flow_attr *flow_attr, 763 enum flow_table_type ft_type) 764 { 765 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP; 766 struct mlx5_flow_namespace *ns = NULL; 767 enum mlx5_flow_namespace_type fn_type; 768 struct mlx5_ib_flow_prio *prio; 769 struct mlx5_flow_table *ft; 770 int max_table_size; 771 int num_entries; 772 int num_groups; 773 bool esw_encap; 774 u32 flags = 0; 775 int priority; 776 777 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 778 log_max_ft_size)); 779 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != 780 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 781 switch (flow_attr->type) { 782 case IB_FLOW_ATTR_NORMAL: 783 if (flow_is_multicast_only(flow_attr) && !dont_trap) 784 priority = MLX5_IB_FLOW_MCAST_PRIO; 785 else 786 priority = ib_prio_to_core_prio(flow_attr->priority, 787 dont_trap); 788 if (ft_type == MLX5_IB_FT_RX) { 789 fn_type = MLX5_FLOW_NAMESPACE_BYPASS; 790 prio = &dev->flow_db->prios[priority]; 791 if (!dev->is_rep && !esw_encap && 792 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap)) 793 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 794 if (!dev->is_rep && !esw_encap && 795 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 796 reformat_l3_tunnel_to_l2)) 797 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 798 } else { 799 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX( 800 dev->mdev, log_max_ft_size)); 801 fn_type = MLX5_FLOW_NAMESPACE_EGRESS; 802 prio = &dev->flow_db->egress_prios[priority]; 803 if (!dev->is_rep && !esw_encap && 804 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat)) 805 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 806 } 807 ns = mlx5_get_flow_namespace(dev->mdev, fn_type); 808 num_entries = MLX5_FS_MAX_ENTRIES; 809 num_groups = MLX5_FS_MAX_TYPES; 810 break; 811 case IB_FLOW_ATTR_ALL_DEFAULT: 812 case IB_FLOW_ATTR_MC_DEFAULT: 813 ns = mlx5_get_flow_namespace(dev->mdev, 814 MLX5_FLOW_NAMESPACE_LEFTOVERS); 815 build_leftovers_ft_param(&priority, &num_entries, &num_groups); 816 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; 817 break; 818 case IB_FLOW_ATTR_SNIFFER: 819 if (!MLX5_CAP_FLOWTABLE(dev->mdev, 820 allow_sniffer_and_nic_rx_shared_tir)) 821 return ERR_PTR(-EOPNOTSUPP); 822 823 ns = mlx5_get_flow_namespace( 824 dev->mdev, ft_type == MLX5_IB_FT_RX ? 825 MLX5_FLOW_NAMESPACE_SNIFFER_RX : 826 MLX5_FLOW_NAMESPACE_SNIFFER_TX); 827 828 prio = &dev->flow_db->sniffer[ft_type]; 829 priority = 0; 830 num_entries = 1; 831 num_groups = 1; 832 break; 833 default: 834 break; 835 } 836 837 if (!ns) 838 return ERR_PTR(-EOPNOTSUPP); 839 840 max_table_size = min_t(int, num_entries, max_table_size); 841 842 ft = prio->flow_table; 843 if (!ft) 844 return _get_prio(ns, prio, priority, max_table_size, num_groups, 845 flags); 846 847 return prio; 848 } 849 850 static void set_underlay_qp(struct mlx5_ib_dev *dev, 851 struct mlx5_flow_spec *spec, 852 u32 underlay_qpn) 853 { 854 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, 855 spec->match_criteria, 856 misc_parameters); 857 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 858 misc_parameters); 859 860 if (underlay_qpn && 861 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 862 ft_field_support.bth_dst_qp)) { 863 MLX5_SET(fte_match_set_misc, 864 misc_params_v, bth_dst_qp, underlay_qpn); 865 MLX5_SET(fte_match_set_misc, 866 misc_params_c, bth_dst_qp, 0xffffff); 867 } 868 } 869 870 static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev, 871 struct mlx5_flow_spec *spec, 872 struct mlx5_eswitch_rep *rep) 873 { 874 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; 875 void *misc; 876 877 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 878 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 879 misc_parameters_2); 880 881 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 882 mlx5_eswitch_get_vport_metadata_for_match(esw, 883 rep->vport)); 884 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 885 misc_parameters_2); 886 887 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 888 mlx5_eswitch_get_vport_metadata_mask()); 889 } else { 890 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 891 misc_parameters); 892 893 MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport); 894 895 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 896 misc_parameters); 897 898 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 899 } 900 } 901 902 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, 903 struct mlx5_ib_flow_prio *ft_prio, 904 const struct ib_flow_attr *flow_attr, 905 struct mlx5_flow_destination *dst, 906 u32 underlay_qpn, 907 struct mlx5_ib_create_flow *ucmd) 908 { 909 struct mlx5_flow_table *ft = ft_prio->flow_table; 910 struct mlx5_ib_flow_handler *handler; 911 struct mlx5_flow_act flow_act = {}; 912 struct mlx5_flow_spec *spec; 913 struct mlx5_flow_destination dest_arr[2] = {}; 914 struct mlx5_flow_destination *rule_dst = dest_arr; 915 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); 916 unsigned int spec_index; 917 u32 prev_type = 0; 918 int err = 0; 919 int dest_num = 0; 920 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 921 922 if (!is_valid_attr(dev->mdev, flow_attr)) 923 return ERR_PTR(-EINVAL); 924 925 if (dev->is_rep && is_egress) 926 return ERR_PTR(-EINVAL); 927 928 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 929 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 930 if (!handler || !spec) { 931 err = -ENOMEM; 932 goto free; 933 } 934 935 INIT_LIST_HEAD(&handler->list); 936 937 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 938 err = parse_flow_attr(dev->mdev, spec, 939 ib_flow, flow_attr, &flow_act, 940 prev_type); 941 if (err < 0) 942 goto free; 943 944 prev_type = ((union ib_flow_spec *)ib_flow)->type; 945 ib_flow += ((union ib_flow_spec *)ib_flow)->size; 946 } 947 948 if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) { 949 memcpy(&dest_arr[0], dst, sizeof(*dst)); 950 dest_num++; 951 } 952 953 if (!flow_is_multicast_only(flow_attr)) 954 set_underlay_qp(dev, spec, underlay_qpn); 955 956 if (dev->is_rep && flow_attr->type != IB_FLOW_ATTR_SNIFFER) { 957 struct mlx5_eswitch_rep *rep; 958 959 rep = dev->port[flow_attr->port - 1].rep; 960 if (!rep) { 961 err = -EINVAL; 962 goto free; 963 } 964 965 mlx5_ib_set_rule_source_port(dev, spec, rep); 966 } 967 968 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); 969 970 if (is_egress && 971 !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) { 972 err = -EINVAL; 973 goto free; 974 } 975 976 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 977 struct mlx5_ib_mcounters *mcounters; 978 979 err = mlx5_ib_flow_counters_set_data(flow_act.counters, ucmd); 980 if (err) 981 goto free; 982 983 mcounters = to_mcounters(flow_act.counters); 984 handler->ibcounters = flow_act.counters; 985 dest_arr[dest_num].type = 986 MLX5_FLOW_DESTINATION_TYPE_COUNTER; 987 dest_arr[dest_num].counter_id = 988 mlx5_fc_id(mcounters->hw_cntrs_hndl); 989 dest_num++; 990 } 991 992 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { 993 if (!dest_num) 994 rule_dst = NULL; 995 } else { 996 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) 997 flow_act.action |= 998 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 999 if (is_egress) 1000 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 1001 else if (dest_num) 1002 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1003 } 1004 1005 if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) && 1006 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 1007 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 1008 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n", 1009 spec->flow_context.flow_tag, flow_attr->type); 1010 err = -EINVAL; 1011 goto free; 1012 } 1013 handler->rule = mlx5_add_flow_rules(ft, spec, 1014 &flow_act, 1015 rule_dst, dest_num); 1016 1017 if (IS_ERR(handler->rule)) { 1018 err = PTR_ERR(handler->rule); 1019 goto free; 1020 } 1021 1022 ft_prio->refcount++; 1023 handler->prio = ft_prio; 1024 handler->dev = dev; 1025 1026 ft_prio->flow_table = ft; 1027 free: 1028 if (err && handler) { 1029 mlx5_ib_counters_clear_description(handler->ibcounters); 1030 kfree(handler); 1031 } 1032 kvfree(spec); 1033 return err ? ERR_PTR(err) : handler; 1034 } 1035 1036 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, 1037 struct mlx5_ib_flow_prio *ft_prio, 1038 const struct ib_flow_attr *flow_attr, 1039 struct mlx5_flow_destination *dst) 1040 { 1041 return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL); 1042 } 1043 1044 enum { 1045 LEFTOVERS_MC, 1046 LEFTOVERS_UC, 1047 }; 1048 1049 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev, 1050 struct mlx5_ib_flow_prio *ft_prio, 1051 struct ib_flow_attr *flow_attr, 1052 struct mlx5_flow_destination *dst) 1053 { 1054 struct mlx5_ib_flow_handler *handler_ucast = NULL; 1055 struct mlx5_ib_flow_handler *handler = NULL; 1056 1057 static struct { 1058 struct ib_flow_attr flow_attr; 1059 struct ib_flow_spec_eth eth_flow; 1060 } leftovers_specs[] = { 1061 [LEFTOVERS_MC] = { 1062 .flow_attr = { 1063 .num_of_specs = 1, 1064 .size = sizeof(leftovers_specs[0]) 1065 }, 1066 .eth_flow = { 1067 .type = IB_FLOW_SPEC_ETH, 1068 .size = sizeof(struct ib_flow_spec_eth), 1069 .mask = {.dst_mac = {0x1} }, 1070 .val = {.dst_mac = {0x1} } 1071 } 1072 }, 1073 [LEFTOVERS_UC] = { 1074 .flow_attr = { 1075 .num_of_specs = 1, 1076 .size = sizeof(leftovers_specs[0]) 1077 }, 1078 .eth_flow = { 1079 .type = IB_FLOW_SPEC_ETH, 1080 .size = sizeof(struct ib_flow_spec_eth), 1081 .mask = {.dst_mac = {0x1} }, 1082 .val = {.dst_mac = {} } 1083 } 1084 } 1085 }; 1086 1087 handler = create_flow_rule(dev, ft_prio, 1088 &leftovers_specs[LEFTOVERS_MC].flow_attr, 1089 dst); 1090 if (!IS_ERR(handler) && 1091 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) { 1092 handler_ucast = create_flow_rule(dev, ft_prio, 1093 &leftovers_specs[LEFTOVERS_UC].flow_attr, 1094 dst); 1095 if (IS_ERR(handler_ucast)) { 1096 mlx5_del_flow_rules(handler->rule); 1097 ft_prio->refcount--; 1098 kfree(handler); 1099 handler = handler_ucast; 1100 } else { 1101 list_add(&handler_ucast->list, &handler->list); 1102 } 1103 } 1104 1105 return handler; 1106 } 1107 1108 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, 1109 struct mlx5_ib_flow_prio *ft_rx, 1110 struct mlx5_ib_flow_prio *ft_tx, 1111 struct mlx5_flow_destination *dst) 1112 { 1113 struct mlx5_ib_flow_handler *handler_rx; 1114 struct mlx5_ib_flow_handler *handler_tx; 1115 int err; 1116 static const struct ib_flow_attr flow_attr = { 1117 .num_of_specs = 0, 1118 .type = IB_FLOW_ATTR_SNIFFER, 1119 .size = sizeof(flow_attr) 1120 }; 1121 1122 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst); 1123 if (IS_ERR(handler_rx)) { 1124 err = PTR_ERR(handler_rx); 1125 goto err; 1126 } 1127 1128 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst); 1129 if (IS_ERR(handler_tx)) { 1130 err = PTR_ERR(handler_tx); 1131 goto err_tx; 1132 } 1133 1134 list_add(&handler_tx->list, &handler_rx->list); 1135 1136 return handler_rx; 1137 1138 err_tx: 1139 mlx5_del_flow_rules(handler_rx->rule); 1140 ft_rx->refcount--; 1141 kfree(handler_rx); 1142 err: 1143 return ERR_PTR(err); 1144 } 1145 1146 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, 1147 struct ib_flow_attr *flow_attr, 1148 struct ib_udata *udata) 1149 { 1150 struct mlx5_ib_dev *dev = to_mdev(qp->device); 1151 struct mlx5_ib_qp *mqp = to_mqp(qp); 1152 struct mlx5_ib_flow_handler *handler = NULL; 1153 struct mlx5_flow_destination *dst = NULL; 1154 struct mlx5_ib_flow_prio *ft_prio_tx = NULL; 1155 struct mlx5_ib_flow_prio *ft_prio; 1156 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 1157 struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr; 1158 size_t min_ucmd_sz, required_ucmd_sz; 1159 int err; 1160 int underlay_qpn; 1161 1162 if (udata && udata->inlen) { 1163 min_ucmd_sz = offsetofend(struct mlx5_ib_create_flow, reserved); 1164 if (udata->inlen < min_ucmd_sz) 1165 return ERR_PTR(-EOPNOTSUPP); 1166 1167 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz); 1168 if (err) 1169 return ERR_PTR(err); 1170 1171 /* currently supports only one counters data */ 1172 if (ucmd_hdr.ncounters_data > 1) 1173 return ERR_PTR(-EINVAL); 1174 1175 required_ucmd_sz = min_ucmd_sz + 1176 sizeof(struct mlx5_ib_flow_counters_data) * 1177 ucmd_hdr.ncounters_data; 1178 if (udata->inlen > required_ucmd_sz && 1179 !ib_is_udata_cleared(udata, required_ucmd_sz, 1180 udata->inlen - required_ucmd_sz)) 1181 return ERR_PTR(-EOPNOTSUPP); 1182 1183 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL); 1184 if (!ucmd) 1185 return ERR_PTR(-ENOMEM); 1186 1187 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz); 1188 if (err) 1189 goto free_ucmd; 1190 } 1191 1192 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) { 1193 err = -ENOMEM; 1194 goto free_ucmd; 1195 } 1196 1197 if (flow_attr->port > dev->num_ports || 1198 (flow_attr->flags & 1199 ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) { 1200 err = -EINVAL; 1201 goto free_ucmd; 1202 } 1203 1204 if (is_egress && 1205 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 1206 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 1207 err = -EINVAL; 1208 goto free_ucmd; 1209 } 1210 1211 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 1212 if (!dst) { 1213 err = -ENOMEM; 1214 goto free_ucmd; 1215 } 1216 1217 mutex_lock(&dev->flow_db->lock); 1218 1219 ft_prio = get_flow_table(dev, flow_attr, 1220 is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX); 1221 if (IS_ERR(ft_prio)) { 1222 err = PTR_ERR(ft_prio); 1223 goto unlock; 1224 } 1225 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 1226 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX); 1227 if (IS_ERR(ft_prio_tx)) { 1228 err = PTR_ERR(ft_prio_tx); 1229 ft_prio_tx = NULL; 1230 goto destroy_ft; 1231 } 1232 } 1233 1234 if (is_egress) { 1235 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT; 1236 } else { 1237 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1238 if (mqp->is_rss) 1239 dst->tir_num = mqp->rss_qp.tirn; 1240 else 1241 dst->tir_num = mqp->raw_packet_qp.rq.tirn; 1242 } 1243 1244 switch (flow_attr->type) { 1245 case IB_FLOW_ATTR_NORMAL: 1246 underlay_qpn = (mqp->flags & IB_QP_CREATE_SOURCE_QPN) ? 1247 mqp->underlay_qpn : 1248 0; 1249 handler = _create_flow_rule(dev, ft_prio, flow_attr, dst, 1250 underlay_qpn, ucmd); 1251 break; 1252 case IB_FLOW_ATTR_ALL_DEFAULT: 1253 case IB_FLOW_ATTR_MC_DEFAULT: 1254 handler = create_leftovers_rule(dev, ft_prio, flow_attr, dst); 1255 break; 1256 case IB_FLOW_ATTR_SNIFFER: 1257 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst); 1258 break; 1259 default: 1260 err = -EINVAL; 1261 goto destroy_ft; 1262 } 1263 1264 if (IS_ERR(handler)) { 1265 err = PTR_ERR(handler); 1266 handler = NULL; 1267 goto destroy_ft; 1268 } 1269 1270 mutex_unlock(&dev->flow_db->lock); 1271 kfree(dst); 1272 kfree(ucmd); 1273 1274 return &handler->ibflow; 1275 1276 destroy_ft: 1277 put_flow_table(dev, ft_prio, false); 1278 if (ft_prio_tx) 1279 put_flow_table(dev, ft_prio_tx, false); 1280 unlock: 1281 mutex_unlock(&dev->flow_db->lock); 1282 kfree(dst); 1283 free_ucmd: 1284 kfree(ucmd); 1285 return ERR_PTR(err); 1286 } 1287 1288 static struct mlx5_ib_flow_prio * 1289 _get_flow_table(struct mlx5_ib_dev *dev, 1290 struct mlx5_ib_flow_matcher *fs_matcher, 1291 bool mcast) 1292 { 1293 struct mlx5_flow_namespace *ns = NULL; 1294 struct mlx5_ib_flow_prio *prio = NULL; 1295 int max_table_size = 0; 1296 bool esw_encap; 1297 u32 flags = 0; 1298 int priority; 1299 1300 if (mcast) 1301 priority = MLX5_IB_FLOW_MCAST_PRIO; 1302 else 1303 priority = ib_prio_to_core_prio(fs_matcher->priority, false); 1304 1305 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != 1306 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 1307 switch (fs_matcher->ns_type) { 1308 case MLX5_FLOW_NAMESPACE_BYPASS: 1309 max_table_size = BIT( 1310 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, log_max_ft_size)); 1311 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap) 1312 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 1313 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 1314 reformat_l3_tunnel_to_l2) && 1315 !esw_encap) 1316 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 1317 break; 1318 case MLX5_FLOW_NAMESPACE_EGRESS: 1319 max_table_size = BIT( 1320 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size)); 1321 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && 1322 !esw_encap) 1323 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 1324 break; 1325 case MLX5_FLOW_NAMESPACE_FDB: 1326 max_table_size = BIT( 1327 MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size)); 1328 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap) 1329 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 1330 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, 1331 reformat_l3_tunnel_to_l2) && 1332 esw_encap) 1333 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 1334 priority = FDB_BYPASS_PATH; 1335 break; 1336 case MLX5_FLOW_NAMESPACE_RDMA_RX: 1337 max_table_size = BIT( 1338 MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, log_max_ft_size)); 1339 priority = fs_matcher->priority; 1340 break; 1341 case MLX5_FLOW_NAMESPACE_RDMA_TX: 1342 max_table_size = BIT( 1343 MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, log_max_ft_size)); 1344 priority = fs_matcher->priority; 1345 break; 1346 default: 1347 break; 1348 } 1349 1350 max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES); 1351 1352 ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type); 1353 if (!ns) 1354 return ERR_PTR(-EOPNOTSUPP); 1355 1356 switch (fs_matcher->ns_type) { 1357 case MLX5_FLOW_NAMESPACE_BYPASS: 1358 prio = &dev->flow_db->prios[priority]; 1359 break; 1360 case MLX5_FLOW_NAMESPACE_EGRESS: 1361 prio = &dev->flow_db->egress_prios[priority]; 1362 break; 1363 case MLX5_FLOW_NAMESPACE_FDB: 1364 prio = &dev->flow_db->fdb; 1365 break; 1366 case MLX5_FLOW_NAMESPACE_RDMA_RX: 1367 prio = &dev->flow_db->rdma_rx[priority]; 1368 break; 1369 case MLX5_FLOW_NAMESPACE_RDMA_TX: 1370 prio = &dev->flow_db->rdma_tx[priority]; 1371 break; 1372 default: return ERR_PTR(-EINVAL); 1373 } 1374 1375 if (!prio) 1376 return ERR_PTR(-EINVAL); 1377 1378 if (prio->flow_table) 1379 return prio; 1380 1381 return _get_prio(ns, prio, priority, max_table_size, 1382 MLX5_FS_MAX_TYPES, flags); 1383 } 1384 1385 static struct mlx5_ib_flow_handler * 1386 _create_raw_flow_rule(struct mlx5_ib_dev *dev, 1387 struct mlx5_ib_flow_prio *ft_prio, 1388 struct mlx5_flow_destination *dst, 1389 struct mlx5_ib_flow_matcher *fs_matcher, 1390 struct mlx5_flow_context *flow_context, 1391 struct mlx5_flow_act *flow_act, 1392 void *cmd_in, int inlen, 1393 int dst_num) 1394 { 1395 struct mlx5_ib_flow_handler *handler; 1396 struct mlx5_flow_spec *spec; 1397 struct mlx5_flow_table *ft = ft_prio->flow_table; 1398 int err = 0; 1399 1400 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1401 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 1402 if (!handler || !spec) { 1403 err = -ENOMEM; 1404 goto free; 1405 } 1406 1407 INIT_LIST_HEAD(&handler->list); 1408 1409 memcpy(spec->match_value, cmd_in, inlen); 1410 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params, 1411 fs_matcher->mask_len); 1412 spec->match_criteria_enable = fs_matcher->match_criteria_enable; 1413 spec->flow_context = *flow_context; 1414 1415 handler->rule = mlx5_add_flow_rules(ft, spec, 1416 flow_act, dst, dst_num); 1417 1418 if (IS_ERR(handler->rule)) { 1419 err = PTR_ERR(handler->rule); 1420 goto free; 1421 } 1422 1423 ft_prio->refcount++; 1424 handler->prio = ft_prio; 1425 handler->dev = dev; 1426 ft_prio->flow_table = ft; 1427 1428 free: 1429 if (err) 1430 kfree(handler); 1431 kvfree(spec); 1432 return err ? ERR_PTR(err) : handler; 1433 } 1434 1435 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher, 1436 void *match_v) 1437 { 1438 void *match_c; 1439 void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4; 1440 void *dmac, *dmac_mask; 1441 void *ipv4, *ipv4_mask; 1442 1443 if (!(fs_matcher->match_criteria_enable & 1444 (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT))) 1445 return false; 1446 1447 match_c = fs_matcher->matcher_mask.match_params; 1448 match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v, 1449 outer_headers); 1450 match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c, 1451 outer_headers); 1452 1453 dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 1454 dmac_47_16); 1455 dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 1456 dmac_47_16); 1457 1458 if (is_multicast_ether_addr(dmac) && 1459 is_multicast_ether_addr(dmac_mask)) 1460 return true; 1461 1462 ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 1463 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 1464 1465 ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 1466 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 1467 1468 if (ipv4_is_multicast(*(__be32 *)(ipv4)) && 1469 ipv4_is_multicast(*(__be32 *)(ipv4_mask))) 1470 return true; 1471 1472 return false; 1473 } 1474 1475 static struct mlx5_ib_flow_handler *raw_fs_rule_add( 1476 struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher, 1477 struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act, 1478 u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type) 1479 { 1480 struct mlx5_flow_destination *dst; 1481 struct mlx5_ib_flow_prio *ft_prio; 1482 struct mlx5_ib_flow_handler *handler; 1483 int dst_num = 0; 1484 bool mcast; 1485 int err; 1486 1487 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL) 1488 return ERR_PTR(-EOPNOTSUPP); 1489 1490 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO) 1491 return ERR_PTR(-ENOMEM); 1492 1493 dst = kcalloc(2, sizeof(*dst), GFP_KERNEL); 1494 if (!dst) 1495 return ERR_PTR(-ENOMEM); 1496 1497 mcast = raw_fs_is_multicast(fs_matcher, cmd_in); 1498 mutex_lock(&dev->flow_db->lock); 1499 1500 ft_prio = _get_flow_table(dev, fs_matcher, mcast); 1501 if (IS_ERR(ft_prio)) { 1502 err = PTR_ERR(ft_prio); 1503 goto unlock; 1504 } 1505 1506 switch (dest_type) { 1507 case MLX5_FLOW_DESTINATION_TYPE_TIR: 1508 dst[dst_num].type = dest_type; 1509 dst[dst_num++].tir_num = dest_id; 1510 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1511 break; 1512 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: 1513 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM; 1514 dst[dst_num++].ft_num = dest_id; 1515 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1516 break; 1517 case MLX5_FLOW_DESTINATION_TYPE_PORT: 1518 dst[dst_num++].type = MLX5_FLOW_DESTINATION_TYPE_PORT; 1519 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 1520 break; 1521 default: 1522 break; 1523 } 1524 1525 if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1526 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1527 dst[dst_num].counter_id = counter_id; 1528 dst_num++; 1529 } 1530 1531 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, 1532 flow_context, flow_act, 1533 cmd_in, inlen, dst_num); 1534 1535 if (IS_ERR(handler)) { 1536 err = PTR_ERR(handler); 1537 goto destroy_ft; 1538 } 1539 1540 mutex_unlock(&dev->flow_db->lock); 1541 atomic_inc(&fs_matcher->usecnt); 1542 handler->flow_matcher = fs_matcher; 1543 1544 kfree(dst); 1545 1546 return handler; 1547 1548 destroy_ft: 1549 put_flow_table(dev, ft_prio, false); 1550 unlock: 1551 mutex_unlock(&dev->flow_db->lock); 1552 kfree(dst); 1553 1554 return ERR_PTR(err); 1555 } 1556 1557 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags) 1558 { 1559 u32 flags = 0; 1560 1561 if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA) 1562 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA; 1563 1564 return flags; 1565 } 1566 1567 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED \ 1568 MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA 1569 static struct ib_flow_action * 1570 mlx5_ib_create_flow_action_esp(struct ib_device *device, 1571 const struct ib_flow_action_attrs_esp *attr, 1572 struct uverbs_attr_bundle *attrs) 1573 { 1574 struct mlx5_ib_dev *mdev = to_mdev(device); 1575 struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm; 1576 struct mlx5_accel_esp_xfrm_attrs accel_attrs = {}; 1577 struct mlx5_ib_flow_action *action; 1578 u64 action_flags; 1579 u64 flags; 1580 int err = 0; 1581 1582 err = uverbs_get_flags64( 1583 &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 1584 ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1)); 1585 if (err) 1586 return ERR_PTR(err); 1587 1588 flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags); 1589 1590 /* We current only support a subset of the standard features. Only a 1591 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn 1592 * (with overlap). Full offload mode isn't supported. 1593 */ 1594 if (!attr->keymat || attr->replay || attr->encap || 1595 attr->spi || attr->seq || attr->tfc_pad || 1596 attr->hard_limit_pkts || 1597 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 1598 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT))) 1599 return ERR_PTR(-EOPNOTSUPP); 1600 1601 if (attr->keymat->protocol != 1602 IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM) 1603 return ERR_PTR(-EOPNOTSUPP); 1604 1605 aes_gcm = &attr->keymat->keymat.aes_gcm; 1606 1607 if (aes_gcm->icv_len != 16 || 1608 aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ) 1609 return ERR_PTR(-EOPNOTSUPP); 1610 1611 action = kmalloc(sizeof(*action), GFP_KERNEL); 1612 if (!action) 1613 return ERR_PTR(-ENOMEM); 1614 1615 action->esp_aes_gcm.ib_flags = attr->flags; 1616 memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key, 1617 sizeof(accel_attrs.keymat.aes_gcm.aes_key)); 1618 accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8; 1619 memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt, 1620 sizeof(accel_attrs.keymat.aes_gcm.salt)); 1621 memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv, 1622 sizeof(accel_attrs.keymat.aes_gcm.seq_iv)); 1623 accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8; 1624 accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ; 1625 accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM; 1626 1627 accel_attrs.esn = attr->esn; 1628 if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) 1629 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; 1630 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 1631 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 1632 1633 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT) 1634 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT; 1635 1636 action->esp_aes_gcm.ctx = 1637 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags); 1638 if (IS_ERR(action->esp_aes_gcm.ctx)) { 1639 err = PTR_ERR(action->esp_aes_gcm.ctx); 1640 goto err_parse; 1641 } 1642 1643 action->esp_aes_gcm.ib_flags = attr->flags; 1644 1645 return &action->ib_action; 1646 1647 err_parse: 1648 kfree(action); 1649 return ERR_PTR(err); 1650 } 1651 1652 static int 1653 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action, 1654 const struct ib_flow_action_attrs_esp *attr, 1655 struct uverbs_attr_bundle *attrs) 1656 { 1657 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 1658 struct mlx5_accel_esp_xfrm_attrs accel_attrs; 1659 int err = 0; 1660 1661 if (attr->keymat || attr->replay || attr->encap || 1662 attr->spi || attr->seq || attr->tfc_pad || 1663 attr->hard_limit_pkts || 1664 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 1665 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS | 1666 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))) 1667 return -EOPNOTSUPP; 1668 1669 /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can 1670 * be modified. 1671 */ 1672 if (!(maction->esp_aes_gcm.ib_flags & 1673 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) && 1674 attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 1675 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)) 1676 return -EINVAL; 1677 1678 memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs, 1679 sizeof(accel_attrs)); 1680 1681 accel_attrs.esn = attr->esn; 1682 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 1683 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 1684 else 1685 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 1686 1687 err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx, 1688 &accel_attrs); 1689 if (err) 1690 return err; 1691 1692 maction->esp_aes_gcm.ib_flags &= 1693 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 1694 maction->esp_aes_gcm.ib_flags |= 1695 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 1696 1697 return 0; 1698 } 1699 1700 static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction) 1701 { 1702 switch (maction->flow_action_raw.sub_type) { 1703 case MLX5_IB_FLOW_ACTION_MODIFY_HEADER: 1704 mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev, 1705 maction->flow_action_raw.modify_hdr); 1706 break; 1707 case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT: 1708 mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev, 1709 maction->flow_action_raw.pkt_reformat); 1710 break; 1711 case MLX5_IB_FLOW_ACTION_DECAP: 1712 break; 1713 default: 1714 break; 1715 } 1716 } 1717 1718 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action) 1719 { 1720 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 1721 1722 switch (action->type) { 1723 case IB_FLOW_ACTION_ESP: 1724 /* 1725 * We only support aes_gcm by now, so we implicitly know this is 1726 * the underline crypto. 1727 */ 1728 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx); 1729 break; 1730 case IB_FLOW_ACTION_UNSPECIFIED: 1731 destroy_flow_action_raw(maction); 1732 break; 1733 default: 1734 WARN_ON(true); 1735 break; 1736 } 1737 1738 kfree(maction); 1739 return 0; 1740 } 1741 1742 static int 1743 mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type, 1744 enum mlx5_flow_namespace_type *namespace) 1745 { 1746 switch (table_type) { 1747 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX: 1748 *namespace = MLX5_FLOW_NAMESPACE_BYPASS; 1749 break; 1750 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX: 1751 *namespace = MLX5_FLOW_NAMESPACE_EGRESS; 1752 break; 1753 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB: 1754 *namespace = MLX5_FLOW_NAMESPACE_FDB; 1755 break; 1756 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX: 1757 *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX; 1758 break; 1759 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX: 1760 *namespace = MLX5_FLOW_NAMESPACE_RDMA_TX; 1761 break; 1762 default: 1763 return -EINVAL; 1764 } 1765 1766 return 0; 1767 } 1768 1769 static const struct uverbs_attr_spec mlx5_ib_flow_type[] = { 1770 [MLX5_IB_FLOW_TYPE_NORMAL] = { 1771 .type = UVERBS_ATTR_TYPE_PTR_IN, 1772 .u.ptr = { 1773 .len = sizeof(u16), /* data is priority */ 1774 .min_len = sizeof(u16), 1775 } 1776 }, 1777 [MLX5_IB_FLOW_TYPE_SNIFFER] = { 1778 .type = UVERBS_ATTR_TYPE_PTR_IN, 1779 UVERBS_ATTR_NO_DATA(), 1780 }, 1781 [MLX5_IB_FLOW_TYPE_ALL_DEFAULT] = { 1782 .type = UVERBS_ATTR_TYPE_PTR_IN, 1783 UVERBS_ATTR_NO_DATA(), 1784 }, 1785 [MLX5_IB_FLOW_TYPE_MC_DEFAULT] = { 1786 .type = UVERBS_ATTR_TYPE_PTR_IN, 1787 UVERBS_ATTR_NO_DATA(), 1788 }, 1789 }; 1790 1791 static bool is_flow_dest(void *obj, int *dest_id, int *dest_type) 1792 { 1793 struct devx_obj *devx_obj = obj; 1794 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode); 1795 1796 switch (opcode) { 1797 case MLX5_CMD_OP_DESTROY_TIR: 1798 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1799 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, 1800 obj_id); 1801 return true; 1802 1803 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 1804 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1805 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox, 1806 table_id); 1807 return true; 1808 default: 1809 return false; 1810 } 1811 } 1812 1813 static int get_dests(struct uverbs_attr_bundle *attrs, 1814 struct mlx5_ib_flow_matcher *fs_matcher, int *dest_id, 1815 int *dest_type, struct ib_qp **qp, u32 *flags) 1816 { 1817 bool dest_devx, dest_qp; 1818 void *devx_obj; 1819 int err; 1820 1821 dest_devx = uverbs_attr_is_valid(attrs, 1822 MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX); 1823 dest_qp = uverbs_attr_is_valid(attrs, 1824 MLX5_IB_ATTR_CREATE_FLOW_DEST_QP); 1825 1826 *flags = 0; 1827 err = uverbs_get_flags32(flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_FLAGS, 1828 MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS | 1829 MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP); 1830 if (err) 1831 return err; 1832 1833 /* Both flags are not allowed */ 1834 if (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS && 1835 *flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP) 1836 return -EINVAL; 1837 1838 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) { 1839 if (dest_devx && (dest_qp || *flags)) 1840 return -EINVAL; 1841 else if (dest_qp && *flags) 1842 return -EINVAL; 1843 } 1844 1845 /* Allow only DEVX object, drop as dest for FDB */ 1846 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !(dest_devx || 1847 (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP))) 1848 return -EINVAL; 1849 1850 /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */ 1851 if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) && 1852 ((!dest_devx && !dest_qp) || (dest_devx && dest_qp))) 1853 return -EINVAL; 1854 1855 *qp = NULL; 1856 if (dest_devx) { 1857 devx_obj = 1858 uverbs_attr_get_obj(attrs, 1859 MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX); 1860 1861 /* Verify that the given DEVX object is a flow 1862 * steering destination. 1863 */ 1864 if (!is_flow_dest(devx_obj, dest_id, dest_type)) 1865 return -EINVAL; 1866 /* Allow only flow table as dest when inserting to FDB or RDMA_RX */ 1867 if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB || 1868 fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) && 1869 *dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) 1870 return -EINVAL; 1871 } else if (dest_qp) { 1872 struct mlx5_ib_qp *mqp; 1873 1874 *qp = uverbs_attr_get_obj(attrs, 1875 MLX5_IB_ATTR_CREATE_FLOW_DEST_QP); 1876 if (IS_ERR(*qp)) 1877 return PTR_ERR(*qp); 1878 1879 if ((*qp)->qp_type != IB_QPT_RAW_PACKET) 1880 return -EINVAL; 1881 1882 mqp = to_mqp(*qp); 1883 if (mqp->is_rss) 1884 *dest_id = mqp->rss_qp.tirn; 1885 else 1886 *dest_id = mqp->raw_packet_qp.rq.tirn; 1887 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1888 } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS || 1889 fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) { 1890 *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT; 1891 } 1892 1893 if (*dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR && 1894 (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS || 1895 fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX)) 1896 return -EINVAL; 1897 1898 return 0; 1899 } 1900 1901 static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id) 1902 { 1903 struct devx_obj *devx_obj = obj; 1904 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode); 1905 1906 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) { 1907 1908 if (offset && offset >= devx_obj->flow_counter_bulk_size) 1909 return false; 1910 1911 *counter_id = MLX5_GET(dealloc_flow_counter_in, 1912 devx_obj->dinbox, 1913 flow_counter_id); 1914 *counter_id += offset; 1915 return true; 1916 } 1917 1918 return false; 1919 } 1920 1921 #define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2 1922 static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( 1923 struct uverbs_attr_bundle *attrs) 1924 { 1925 struct mlx5_flow_context flow_context = {.flow_tag = 1926 MLX5_FS_DEFAULT_FLOW_TAG}; 1927 u32 *offset_attr, offset = 0, counter_id = 0; 1928 int dest_id, dest_type = -1, inlen, len, ret, i; 1929 struct mlx5_ib_flow_handler *flow_handler; 1930 struct mlx5_ib_flow_matcher *fs_matcher; 1931 struct ib_uobject **arr_flow_actions; 1932 struct ib_uflow_resources *uflow_res; 1933 struct mlx5_flow_act flow_act = {}; 1934 struct ib_qp *qp = NULL; 1935 void *devx_obj, *cmd_in; 1936 struct ib_uobject *uobj; 1937 struct mlx5_ib_dev *dev; 1938 u32 flags; 1939 1940 if (!capable(CAP_NET_RAW)) 1941 return -EPERM; 1942 1943 fs_matcher = uverbs_attr_get_obj(attrs, 1944 MLX5_IB_ATTR_CREATE_FLOW_MATCHER); 1945 uobj = uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE); 1946 dev = mlx5_udata_to_mdev(&attrs->driver_udata); 1947 1948 if (get_dests(attrs, fs_matcher, &dest_id, &dest_type, &qp, &flags)) 1949 return -EINVAL; 1950 1951 if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS) 1952 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS; 1953 1954 if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP) 1955 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 1956 1957 len = uverbs_attr_get_uobjs_arr(attrs, 1958 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions); 1959 if (len) { 1960 devx_obj = arr_flow_actions[0]->object; 1961 1962 if (uverbs_attr_is_valid(attrs, 1963 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET)) { 1964 1965 int num_offsets = uverbs_attr_ptr_get_array_size( 1966 attrs, 1967 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET, 1968 sizeof(u32)); 1969 1970 if (num_offsets != 1) 1971 return -EINVAL; 1972 1973 offset_attr = uverbs_attr_get_alloced_ptr( 1974 attrs, 1975 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET); 1976 offset = *offset_attr; 1977 } 1978 1979 if (!is_flow_counter(devx_obj, offset, &counter_id)) 1980 return -EINVAL; 1981 1982 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 1983 } 1984 1985 cmd_in = uverbs_attr_get_alloced_ptr( 1986 attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE); 1987 inlen = uverbs_attr_get_len(attrs, 1988 MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE); 1989 1990 uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS); 1991 if (!uflow_res) 1992 return -ENOMEM; 1993 1994 len = uverbs_attr_get_uobjs_arr(attrs, 1995 MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions); 1996 for (i = 0; i < len; i++) { 1997 struct mlx5_ib_flow_action *maction = 1998 to_mflow_act(arr_flow_actions[i]->object); 1999 2000 ret = parse_flow_flow_action(maction, false, &flow_act); 2001 if (ret) 2002 goto err_out; 2003 flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_HANDLE, 2004 arr_flow_actions[i]->object); 2005 } 2006 2007 ret = uverbs_copy_from(&flow_context.flow_tag, attrs, 2008 MLX5_IB_ATTR_CREATE_FLOW_TAG); 2009 if (!ret) { 2010 if (flow_context.flow_tag >= BIT(24)) { 2011 ret = -EINVAL; 2012 goto err_out; 2013 } 2014 flow_context.flags |= FLOW_CONTEXT_HAS_TAG; 2015 } 2016 2017 flow_handler = 2018 raw_fs_rule_add(dev, fs_matcher, &flow_context, &flow_act, 2019 counter_id, cmd_in, inlen, dest_id, dest_type); 2020 if (IS_ERR(flow_handler)) { 2021 ret = PTR_ERR(flow_handler); 2022 goto err_out; 2023 } 2024 2025 ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev, uflow_res); 2026 2027 return 0; 2028 err_out: 2029 ib_uverbs_flow_resources_free(uflow_res); 2030 return ret; 2031 } 2032 2033 static int flow_matcher_cleanup(struct ib_uobject *uobject, 2034 enum rdma_remove_reason why, 2035 struct uverbs_attr_bundle *attrs) 2036 { 2037 struct mlx5_ib_flow_matcher *obj = uobject->object; 2038 2039 if (atomic_read(&obj->usecnt)) 2040 return -EBUSY; 2041 2042 kfree(obj); 2043 return 0; 2044 } 2045 2046 static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs, 2047 struct mlx5_ib_flow_matcher *obj) 2048 { 2049 enum mlx5_ib_uapi_flow_table_type ft_type = 2050 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX; 2051 u32 flags; 2052 int err; 2053 2054 /* New users should use MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE and older 2055 * users should switch to it. We leave this to not break userspace 2056 */ 2057 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE) && 2058 uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS)) 2059 return -EINVAL; 2060 2061 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE)) { 2062 err = uverbs_get_const(&ft_type, attrs, 2063 MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE); 2064 if (err) 2065 return err; 2066 2067 err = mlx5_ib_ft_type_to_namespace(ft_type, &obj->ns_type); 2068 if (err) 2069 return err; 2070 2071 return 0; 2072 } 2073 2074 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS)) { 2075 err = uverbs_get_flags32(&flags, attrs, 2076 MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS, 2077 IB_FLOW_ATTR_FLAGS_EGRESS); 2078 if (err) 2079 return err; 2080 2081 if (flags) { 2082 mlx5_ib_ft_type_to_namespace( 2083 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX, 2084 &obj->ns_type); 2085 return 0; 2086 } 2087 } 2088 2089 obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS; 2090 2091 return 0; 2092 } 2093 2094 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)( 2095 struct uverbs_attr_bundle *attrs) 2096 { 2097 struct ib_uobject *uobj = uverbs_attr_get_uobject( 2098 attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE); 2099 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata); 2100 struct mlx5_ib_flow_matcher *obj; 2101 int err; 2102 2103 obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL); 2104 if (!obj) 2105 return -ENOMEM; 2106 2107 obj->mask_len = uverbs_attr_get_len( 2108 attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK); 2109 err = uverbs_copy_from(&obj->matcher_mask, 2110 attrs, 2111 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK); 2112 if (err) 2113 goto end; 2114 2115 obj->flow_type = uverbs_attr_get_enum_id( 2116 attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE); 2117 2118 if (obj->flow_type == MLX5_IB_FLOW_TYPE_NORMAL) { 2119 err = uverbs_copy_from(&obj->priority, 2120 attrs, 2121 MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE); 2122 if (err) 2123 goto end; 2124 } 2125 2126 err = uverbs_copy_from(&obj->match_criteria_enable, 2127 attrs, 2128 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA); 2129 if (err) 2130 goto end; 2131 2132 err = mlx5_ib_matcher_ns(attrs, obj); 2133 if (err) 2134 goto end; 2135 2136 uobj->object = obj; 2137 obj->mdev = dev->mdev; 2138 atomic_set(&obj->usecnt, 0); 2139 return 0; 2140 2141 end: 2142 kfree(obj); 2143 return err; 2144 } 2145 2146 static struct ib_flow_action * 2147 mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev, 2148 enum mlx5_ib_uapi_flow_table_type ft_type, 2149 u8 num_actions, void *in) 2150 { 2151 enum mlx5_flow_namespace_type namespace; 2152 struct mlx5_ib_flow_action *maction; 2153 int ret; 2154 2155 ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace); 2156 if (ret) 2157 return ERR_PTR(-EINVAL); 2158 2159 maction = kzalloc(sizeof(*maction), GFP_KERNEL); 2160 if (!maction) 2161 return ERR_PTR(-ENOMEM); 2162 2163 maction->flow_action_raw.modify_hdr = 2164 mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in); 2165 2166 if (IS_ERR(maction->flow_action_raw.modify_hdr)) { 2167 ret = PTR_ERR(maction->flow_action_raw.modify_hdr); 2168 kfree(maction); 2169 return ERR_PTR(ret); 2170 } 2171 maction->flow_action_raw.sub_type = 2172 MLX5_IB_FLOW_ACTION_MODIFY_HEADER; 2173 maction->flow_action_raw.dev = dev; 2174 2175 return &maction->ib_action; 2176 } 2177 2178 static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev) 2179 { 2180 return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 2181 max_modify_header_actions) || 2182 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, 2183 max_modify_header_actions) || 2184 MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, 2185 max_modify_header_actions); 2186 } 2187 2188 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)( 2189 struct uverbs_attr_bundle *attrs) 2190 { 2191 struct ib_uobject *uobj = uverbs_attr_get_uobject( 2192 attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE); 2193 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata); 2194 enum mlx5_ib_uapi_flow_table_type ft_type; 2195 struct ib_flow_action *action; 2196 int num_actions; 2197 void *in; 2198 int ret; 2199 2200 if (!mlx5_ib_modify_header_supported(mdev)) 2201 return -EOPNOTSUPP; 2202 2203 in = uverbs_attr_get_alloced_ptr(attrs, 2204 MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM); 2205 2206 num_actions = uverbs_attr_ptr_get_array_size( 2207 attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM, 2208 MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)); 2209 if (num_actions < 0) 2210 return num_actions; 2211 2212 ret = uverbs_get_const(&ft_type, attrs, 2213 MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE); 2214 if (ret) 2215 return ret; 2216 action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in); 2217 if (IS_ERR(action)) 2218 return PTR_ERR(action); 2219 2220 uverbs_flow_action_fill_action(action, uobj, &mdev->ib_dev, 2221 IB_FLOW_ACTION_UNSPECIFIED); 2222 2223 return 0; 2224 } 2225 2226 static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev *ibdev, 2227 u8 packet_reformat_type, 2228 u8 ft_type) 2229 { 2230 switch (packet_reformat_type) { 2231 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL: 2232 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX) 2233 return MLX5_CAP_FLOWTABLE(ibdev->mdev, 2234 encap_general_header); 2235 break; 2236 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL: 2237 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX) 2238 return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev, 2239 reformat_l2_to_l3_tunnel); 2240 break; 2241 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2: 2242 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX) 2243 return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, 2244 reformat_l3_tunnel_to_l2); 2245 break; 2246 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2: 2247 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX) 2248 return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap); 2249 break; 2250 default: 2251 break; 2252 } 2253 2254 return false; 2255 } 2256 2257 static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt, u8 *prm_prt) 2258 { 2259 switch (dv_prt) { 2260 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL: 2261 *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL; 2262 break; 2263 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2: 2264 *prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; 2265 break; 2266 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL: 2267 *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL; 2268 break; 2269 default: 2270 return -EINVAL; 2271 } 2272 2273 return 0; 2274 } 2275 2276 static int mlx5_ib_flow_action_create_packet_reformat_ctx( 2277 struct mlx5_ib_dev *dev, 2278 struct mlx5_ib_flow_action *maction, 2279 u8 ft_type, u8 dv_prt, 2280 void *in, size_t len) 2281 { 2282 enum mlx5_flow_namespace_type namespace; 2283 u8 prm_prt; 2284 int ret; 2285 2286 ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace); 2287 if (ret) 2288 return ret; 2289 2290 ret = mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt, &prm_prt); 2291 if (ret) 2292 return ret; 2293 2294 maction->flow_action_raw.pkt_reformat = 2295 mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len, 2296 in, namespace); 2297 if (IS_ERR(maction->flow_action_raw.pkt_reformat)) { 2298 ret = PTR_ERR(maction->flow_action_raw.pkt_reformat); 2299 return ret; 2300 } 2301 2302 maction->flow_action_raw.sub_type = 2303 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT; 2304 maction->flow_action_raw.dev = dev; 2305 2306 return 0; 2307 } 2308 2309 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)( 2310 struct uverbs_attr_bundle *attrs) 2311 { 2312 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, 2313 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE); 2314 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata); 2315 enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt; 2316 enum mlx5_ib_uapi_flow_table_type ft_type; 2317 struct mlx5_ib_flow_action *maction; 2318 int ret; 2319 2320 ret = uverbs_get_const(&ft_type, attrs, 2321 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE); 2322 if (ret) 2323 return ret; 2324 2325 ret = uverbs_get_const(&dv_prt, attrs, 2326 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE); 2327 if (ret) 2328 return ret; 2329 2330 if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type)) 2331 return -EOPNOTSUPP; 2332 2333 maction = kzalloc(sizeof(*maction), GFP_KERNEL); 2334 if (!maction) 2335 return -ENOMEM; 2336 2337 if (dv_prt == 2338 MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2) { 2339 maction->flow_action_raw.sub_type = 2340 MLX5_IB_FLOW_ACTION_DECAP; 2341 maction->flow_action_raw.dev = mdev; 2342 } else { 2343 void *in; 2344 int len; 2345 2346 in = uverbs_attr_get_alloced_ptr(attrs, 2347 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF); 2348 if (IS_ERR(in)) { 2349 ret = PTR_ERR(in); 2350 goto free_maction; 2351 } 2352 2353 len = uverbs_attr_get_len(attrs, 2354 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF); 2355 2356 ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev, 2357 maction, ft_type, dv_prt, in, len); 2358 if (ret) 2359 goto free_maction; 2360 } 2361 2362 uverbs_flow_action_fill_action(&maction->ib_action, uobj, &mdev->ib_dev, 2363 IB_FLOW_ACTION_UNSPECIFIED); 2364 return 0; 2365 2366 free_maction: 2367 kfree(maction); 2368 return ret; 2369 } 2370 2371 DECLARE_UVERBS_NAMED_METHOD( 2372 MLX5_IB_METHOD_CREATE_FLOW, 2373 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE, 2374 UVERBS_OBJECT_FLOW, 2375 UVERBS_ACCESS_NEW, 2376 UA_MANDATORY), 2377 UVERBS_ATTR_PTR_IN( 2378 MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE, 2379 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)), 2380 UA_MANDATORY, 2381 UA_ALLOC_AND_COPY), 2382 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER, 2383 MLX5_IB_OBJECT_FLOW_MATCHER, 2384 UVERBS_ACCESS_READ, 2385 UA_MANDATORY), 2386 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP, 2387 UVERBS_OBJECT_QP, 2388 UVERBS_ACCESS_READ), 2389 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX, 2390 MLX5_IB_OBJECT_DEVX_OBJ, 2391 UVERBS_ACCESS_READ), 2392 UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, 2393 UVERBS_OBJECT_FLOW_ACTION, 2394 UVERBS_ACCESS_READ, 1, 2395 MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS, 2396 UA_OPTIONAL), 2397 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_TAG, 2398 UVERBS_ATTR_TYPE(u32), 2399 UA_OPTIONAL), 2400 UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, 2401 MLX5_IB_OBJECT_DEVX_OBJ, 2402 UVERBS_ACCESS_READ, 1, 1, 2403 UA_OPTIONAL), 2404 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET, 2405 UVERBS_ATTR_MIN_SIZE(sizeof(u32)), 2406 UA_OPTIONAL, 2407 UA_ALLOC_AND_COPY), 2408 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_FLAGS, 2409 enum mlx5_ib_create_flow_flags, 2410 UA_OPTIONAL)); 2411 2412 DECLARE_UVERBS_NAMED_METHOD_DESTROY( 2413 MLX5_IB_METHOD_DESTROY_FLOW, 2414 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE, 2415 UVERBS_OBJECT_FLOW, 2416 UVERBS_ACCESS_DESTROY, 2417 UA_MANDATORY)); 2418 2419 ADD_UVERBS_METHODS(mlx5_ib_fs, 2420 UVERBS_OBJECT_FLOW, 2421 &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW), 2422 &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW)); 2423 2424 DECLARE_UVERBS_NAMED_METHOD( 2425 MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER, 2426 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE, 2427 UVERBS_OBJECT_FLOW_ACTION, 2428 UVERBS_ACCESS_NEW, 2429 UA_MANDATORY), 2430 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM, 2431 UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES( 2432 set_add_copy_action_in_auto)), 2433 UA_MANDATORY, 2434 UA_ALLOC_AND_COPY), 2435 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE, 2436 enum mlx5_ib_uapi_flow_table_type, 2437 UA_MANDATORY)); 2438 2439 DECLARE_UVERBS_NAMED_METHOD( 2440 MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT, 2441 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE, 2442 UVERBS_OBJECT_FLOW_ACTION, 2443 UVERBS_ACCESS_NEW, 2444 UA_MANDATORY), 2445 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF, 2446 UVERBS_ATTR_MIN_SIZE(1), 2447 UA_ALLOC_AND_COPY, 2448 UA_OPTIONAL), 2449 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE, 2450 enum mlx5_ib_uapi_flow_action_packet_reformat_type, 2451 UA_MANDATORY), 2452 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE, 2453 enum mlx5_ib_uapi_flow_table_type, 2454 UA_MANDATORY)); 2455 2456 ADD_UVERBS_METHODS( 2457 mlx5_ib_flow_actions, 2458 UVERBS_OBJECT_FLOW_ACTION, 2459 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER), 2460 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)); 2461 2462 DECLARE_UVERBS_NAMED_METHOD( 2463 MLX5_IB_METHOD_FLOW_MATCHER_CREATE, 2464 UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE, 2465 MLX5_IB_OBJECT_FLOW_MATCHER, 2466 UVERBS_ACCESS_NEW, 2467 UA_MANDATORY), 2468 UVERBS_ATTR_PTR_IN( 2469 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK, 2470 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)), 2471 UA_MANDATORY), 2472 UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE, 2473 mlx5_ib_flow_type, 2474 UA_MANDATORY), 2475 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA, 2476 UVERBS_ATTR_TYPE(u8), 2477 UA_MANDATORY), 2478 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS, 2479 enum ib_flow_flags, 2480 UA_OPTIONAL), 2481 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE, 2482 enum mlx5_ib_uapi_flow_table_type, 2483 UA_OPTIONAL)); 2484 2485 DECLARE_UVERBS_NAMED_METHOD_DESTROY( 2486 MLX5_IB_METHOD_FLOW_MATCHER_DESTROY, 2487 UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE, 2488 MLX5_IB_OBJECT_FLOW_MATCHER, 2489 UVERBS_ACCESS_DESTROY, 2490 UA_MANDATORY)); 2491 2492 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER, 2493 UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup), 2494 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE), 2495 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY)); 2496 2497 const struct uapi_definition mlx5_ib_flow_defs[] = { 2498 UAPI_DEF_CHAIN_OBJ_TREE_NAMED( 2499 MLX5_IB_OBJECT_FLOW_MATCHER), 2500 UAPI_DEF_CHAIN_OBJ_TREE( 2501 UVERBS_OBJECT_FLOW, 2502 &mlx5_ib_fs), 2503 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 2504 &mlx5_ib_flow_actions), 2505 {}, 2506 }; 2507 2508 static const struct ib_device_ops flow_ops = { 2509 .create_flow = mlx5_ib_create_flow, 2510 .destroy_flow = mlx5_ib_destroy_flow, 2511 .destroy_flow_action = mlx5_ib_destroy_flow_action, 2512 }; 2513 2514 static const struct ib_device_ops flow_ipsec_ops = { 2515 .create_flow_action_esp = mlx5_ib_create_flow_action_esp, 2516 .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp, 2517 }; 2518 2519 int mlx5_ib_fs_init(struct mlx5_ib_dev *dev) 2520 { 2521 dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL); 2522 2523 if (!dev->flow_db) 2524 return -ENOMEM; 2525 2526 mutex_init(&dev->flow_db->lock); 2527 2528 ib_set_device_ops(&dev->ib_dev, &flow_ops); 2529 if (mlx5_accel_ipsec_device_caps(dev->mdev) & 2530 MLX5_ACCEL_IPSEC_CAP_DEVICE) 2531 ib_set_device_ops(&dev->ib_dev, &flow_ipsec_ops); 2532 2533 return 0; 2534 } 2535