1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/debugfs.h> 34 #include <linux/highmem.h> 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/errno.h> 38 #include <linux/pci.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/slab.h> 41 #include <linux/bitmap.h> 42 #if defined(CONFIG_X86) 43 #include <asm/memtype.h> 44 #endif 45 #include <linux/sched.h> 46 #include <linux/sched/mm.h> 47 #include <linux/sched/task.h> 48 #include <linux/delay.h> 49 #include <rdma/ib_user_verbs.h> 50 #include <rdma/ib_addr.h> 51 #include <rdma/ib_cache.h> 52 #include <linux/mlx5/port.h> 53 #include <linux/mlx5/vport.h> 54 #include <linux/mlx5/fs.h> 55 #include <linux/mlx5/eswitch.h> 56 #include <linux/list.h> 57 #include <rdma/ib_smi.h> 58 #include <rdma/ib_umem.h> 59 #include <linux/in.h> 60 #include <linux/etherdevice.h> 61 #include "mlx5_ib.h" 62 #include "ib_rep.h" 63 #include "cmd.h" 64 #include "srq.h" 65 #include <linux/mlx5/fs_helpers.h> 66 #include <linux/mlx5/accel.h> 67 #include <rdma/uverbs_std_types.h> 68 #include <rdma/mlx5_user_ioctl_verbs.h> 69 #include <rdma/mlx5_user_ioctl_cmds.h> 70 #include <rdma/ib_umem_odp.h> 71 72 #define UVERBS_MODULE_NAME mlx5_ib 73 #include <rdma/uverbs_named_ioctl.h> 74 75 #define DRIVER_NAME "mlx5_ib" 76 #define DRIVER_VERSION "5.0-0" 77 78 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 79 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); 80 MODULE_LICENSE("Dual BSD/GPL"); 81 82 static char mlx5_version[] = 83 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" 84 DRIVER_VERSION "\n"; 85 86 struct mlx5_ib_event_work { 87 struct work_struct work; 88 union { 89 struct mlx5_ib_dev *dev; 90 struct mlx5_ib_multiport_info *mpi; 91 }; 92 bool is_slave; 93 unsigned int event; 94 void *param; 95 }; 96 97 enum { 98 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, 99 }; 100 101 static struct workqueue_struct *mlx5_ib_event_wq; 102 static LIST_HEAD(mlx5_ib_unaffiliated_port_list); 103 static LIST_HEAD(mlx5_ib_dev_list); 104 /* 105 * This mutex should be held when accessing either of the above lists 106 */ 107 static DEFINE_MUTEX(mlx5_ib_multiport_mutex); 108 109 /* We can't use an array for xlt_emergency_page because dma_map_single 110 * doesn't work on kernel modules memory 111 */ 112 static unsigned long xlt_emergency_page; 113 static struct mutex xlt_emergency_page_mutex; 114 115 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi) 116 { 117 struct mlx5_ib_dev *dev; 118 119 mutex_lock(&mlx5_ib_multiport_mutex); 120 dev = mpi->ibdev; 121 mutex_unlock(&mlx5_ib_multiport_mutex); 122 return dev; 123 } 124 125 static enum rdma_link_layer 126 mlx5_port_type_cap_to_rdma_ll(int port_type_cap) 127 { 128 switch (port_type_cap) { 129 case MLX5_CAP_PORT_TYPE_IB: 130 return IB_LINK_LAYER_INFINIBAND; 131 case MLX5_CAP_PORT_TYPE_ETH: 132 return IB_LINK_LAYER_ETHERNET; 133 default: 134 return IB_LINK_LAYER_UNSPECIFIED; 135 } 136 } 137 138 static enum rdma_link_layer 139 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num) 140 { 141 struct mlx5_ib_dev *dev = to_mdev(device); 142 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); 143 144 return mlx5_port_type_cap_to_rdma_ll(port_type_cap); 145 } 146 147 static int get_port_state(struct ib_device *ibdev, 148 u8 port_num, 149 enum ib_port_state *state) 150 { 151 struct ib_port_attr attr; 152 int ret; 153 154 memset(&attr, 0, sizeof(attr)); 155 ret = ibdev->ops.query_port(ibdev, port_num, &attr); 156 if (!ret) 157 *state = attr.state; 158 return ret; 159 } 160 161 static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev, 162 struct net_device *ndev, 163 u8 *port_num) 164 { 165 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; 166 struct net_device *rep_ndev; 167 struct mlx5_ib_port *port; 168 int i; 169 170 for (i = 0; i < dev->num_ports; i++) { 171 port = &dev->port[i]; 172 if (!port->rep) 173 continue; 174 175 read_lock(&port->roce.netdev_lock); 176 rep_ndev = mlx5_ib_get_rep_netdev(esw, 177 port->rep->vport); 178 if (rep_ndev == ndev) { 179 read_unlock(&port->roce.netdev_lock); 180 *port_num = i + 1; 181 return &port->roce; 182 } 183 read_unlock(&port->roce.netdev_lock); 184 } 185 186 return NULL; 187 } 188 189 static int mlx5_netdev_event(struct notifier_block *this, 190 unsigned long event, void *ptr) 191 { 192 struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb); 193 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 194 u8 port_num = roce->native_port_num; 195 struct mlx5_core_dev *mdev; 196 struct mlx5_ib_dev *ibdev; 197 198 ibdev = roce->dev; 199 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); 200 if (!mdev) 201 return NOTIFY_DONE; 202 203 switch (event) { 204 case NETDEV_REGISTER: 205 /* Should already be registered during the load */ 206 if (ibdev->is_rep) 207 break; 208 write_lock(&roce->netdev_lock); 209 if (ndev->dev.parent == mdev->device) 210 roce->netdev = ndev; 211 write_unlock(&roce->netdev_lock); 212 break; 213 214 case NETDEV_UNREGISTER: 215 /* In case of reps, ib device goes away before the netdevs */ 216 write_lock(&roce->netdev_lock); 217 if (roce->netdev == ndev) 218 roce->netdev = NULL; 219 write_unlock(&roce->netdev_lock); 220 break; 221 222 case NETDEV_CHANGE: 223 case NETDEV_UP: 224 case NETDEV_DOWN: { 225 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev); 226 struct net_device *upper = NULL; 227 228 if (lag_ndev) { 229 upper = netdev_master_upper_dev_get(lag_ndev); 230 dev_put(lag_ndev); 231 } 232 233 if (ibdev->is_rep) 234 roce = mlx5_get_rep_roce(ibdev, ndev, &port_num); 235 if (!roce) 236 return NOTIFY_DONE; 237 if ((upper == ndev || (!upper && ndev == roce->netdev)) 238 && ibdev->ib_active) { 239 struct ib_event ibev = { }; 240 enum ib_port_state port_state; 241 242 if (get_port_state(&ibdev->ib_dev, port_num, 243 &port_state)) 244 goto done; 245 246 if (roce->last_port_state == port_state) 247 goto done; 248 249 roce->last_port_state = port_state; 250 ibev.device = &ibdev->ib_dev; 251 if (port_state == IB_PORT_DOWN) 252 ibev.event = IB_EVENT_PORT_ERR; 253 else if (port_state == IB_PORT_ACTIVE) 254 ibev.event = IB_EVENT_PORT_ACTIVE; 255 else 256 goto done; 257 258 ibev.element.port_num = port_num; 259 ib_dispatch_event(&ibev); 260 } 261 break; 262 } 263 264 default: 265 break; 266 } 267 done: 268 mlx5_ib_put_native_port_mdev(ibdev, port_num); 269 return NOTIFY_DONE; 270 } 271 272 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device, 273 u8 port_num) 274 { 275 struct mlx5_ib_dev *ibdev = to_mdev(device); 276 struct net_device *ndev; 277 struct mlx5_core_dev *mdev; 278 279 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); 280 if (!mdev) 281 return NULL; 282 283 ndev = mlx5_lag_get_roce_netdev(mdev); 284 if (ndev) 285 goto out; 286 287 /* Ensure ndev does not disappear before we invoke dev_hold() 288 */ 289 read_lock(&ibdev->port[port_num - 1].roce.netdev_lock); 290 ndev = ibdev->port[port_num - 1].roce.netdev; 291 if (ndev) 292 dev_hold(ndev); 293 read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock); 294 295 out: 296 mlx5_ib_put_native_port_mdev(ibdev, port_num); 297 return ndev; 298 } 299 300 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, 301 u8 ib_port_num, 302 u8 *native_port_num) 303 { 304 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, 305 ib_port_num); 306 struct mlx5_core_dev *mdev = NULL; 307 struct mlx5_ib_multiport_info *mpi; 308 struct mlx5_ib_port *port; 309 310 if (!mlx5_core_mp_enabled(ibdev->mdev) || 311 ll != IB_LINK_LAYER_ETHERNET) { 312 if (native_port_num) 313 *native_port_num = ib_port_num; 314 return ibdev->mdev; 315 } 316 317 if (native_port_num) 318 *native_port_num = 1; 319 320 port = &ibdev->port[ib_port_num - 1]; 321 if (!port) 322 return NULL; 323 324 spin_lock(&port->mp.mpi_lock); 325 mpi = ibdev->port[ib_port_num - 1].mp.mpi; 326 if (mpi && !mpi->unaffiliate) { 327 mdev = mpi->mdev; 328 /* If it's the master no need to refcount, it'll exist 329 * as long as the ib_dev exists. 330 */ 331 if (!mpi->is_master) 332 mpi->mdev_refcnt++; 333 } 334 spin_unlock(&port->mp.mpi_lock); 335 336 return mdev; 337 } 338 339 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num) 340 { 341 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, 342 port_num); 343 struct mlx5_ib_multiport_info *mpi; 344 struct mlx5_ib_port *port; 345 346 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 347 return; 348 349 port = &ibdev->port[port_num - 1]; 350 351 spin_lock(&port->mp.mpi_lock); 352 mpi = ibdev->port[port_num - 1].mp.mpi; 353 if (mpi->is_master) 354 goto out; 355 356 mpi->mdev_refcnt--; 357 if (mpi->unaffiliate) 358 complete(&mpi->unref_comp); 359 out: 360 spin_unlock(&port->mp.mpi_lock); 361 } 362 363 static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed, 364 u8 *active_width) 365 { 366 switch (eth_proto_oper) { 367 case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII): 368 case MLX5E_PROT_MASK(MLX5E_1000BASE_KX): 369 case MLX5E_PROT_MASK(MLX5E_100BASE_TX): 370 case MLX5E_PROT_MASK(MLX5E_1000BASE_T): 371 *active_width = IB_WIDTH_1X; 372 *active_speed = IB_SPEED_SDR; 373 break; 374 case MLX5E_PROT_MASK(MLX5E_10GBASE_T): 375 case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4): 376 case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4): 377 case MLX5E_PROT_MASK(MLX5E_10GBASE_KR): 378 case MLX5E_PROT_MASK(MLX5E_10GBASE_CR): 379 case MLX5E_PROT_MASK(MLX5E_10GBASE_SR): 380 case MLX5E_PROT_MASK(MLX5E_10GBASE_ER): 381 *active_width = IB_WIDTH_1X; 382 *active_speed = IB_SPEED_QDR; 383 break; 384 case MLX5E_PROT_MASK(MLX5E_25GBASE_CR): 385 case MLX5E_PROT_MASK(MLX5E_25GBASE_KR): 386 case MLX5E_PROT_MASK(MLX5E_25GBASE_SR): 387 *active_width = IB_WIDTH_1X; 388 *active_speed = IB_SPEED_EDR; 389 break; 390 case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4): 391 case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4): 392 case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4): 393 case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4): 394 *active_width = IB_WIDTH_4X; 395 *active_speed = IB_SPEED_QDR; 396 break; 397 case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2): 398 case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2): 399 case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2): 400 *active_width = IB_WIDTH_1X; 401 *active_speed = IB_SPEED_HDR; 402 break; 403 case MLX5E_PROT_MASK(MLX5E_56GBASE_R4): 404 *active_width = IB_WIDTH_4X; 405 *active_speed = IB_SPEED_FDR; 406 break; 407 case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4): 408 case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4): 409 case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4): 410 case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4): 411 *active_width = IB_WIDTH_4X; 412 *active_speed = IB_SPEED_EDR; 413 break; 414 default: 415 return -EINVAL; 416 } 417 418 return 0; 419 } 420 421 static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed, 422 u8 *active_width) 423 { 424 switch (eth_proto_oper) { 425 case MLX5E_PROT_MASK(MLX5E_SGMII_100M): 426 case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII): 427 *active_width = IB_WIDTH_1X; 428 *active_speed = IB_SPEED_SDR; 429 break; 430 case MLX5E_PROT_MASK(MLX5E_5GBASE_R): 431 *active_width = IB_WIDTH_1X; 432 *active_speed = IB_SPEED_DDR; 433 break; 434 case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1): 435 *active_width = IB_WIDTH_1X; 436 *active_speed = IB_SPEED_QDR; 437 break; 438 case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4): 439 *active_width = IB_WIDTH_4X; 440 *active_speed = IB_SPEED_QDR; 441 break; 442 case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR): 443 *active_width = IB_WIDTH_1X; 444 *active_speed = IB_SPEED_EDR; 445 break; 446 case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2): 447 *active_width = IB_WIDTH_2X; 448 *active_speed = IB_SPEED_EDR; 449 break; 450 case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR): 451 *active_width = IB_WIDTH_1X; 452 *active_speed = IB_SPEED_HDR; 453 break; 454 case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4): 455 *active_width = IB_WIDTH_4X; 456 *active_speed = IB_SPEED_EDR; 457 break; 458 case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2): 459 *active_width = IB_WIDTH_2X; 460 *active_speed = IB_SPEED_HDR; 461 break; 462 case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4): 463 *active_width = IB_WIDTH_4X; 464 *active_speed = IB_SPEED_HDR; 465 break; 466 default: 467 return -EINVAL; 468 } 469 470 return 0; 471 } 472 473 static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, 474 u8 *active_width, bool ext) 475 { 476 return ext ? 477 translate_eth_ext_proto_oper(eth_proto_oper, active_speed, 478 active_width) : 479 translate_eth_legacy_proto_oper(eth_proto_oper, active_speed, 480 active_width); 481 } 482 483 static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, 484 struct ib_port_attr *props) 485 { 486 struct mlx5_ib_dev *dev = to_mdev(device); 487 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; 488 struct mlx5_core_dev *mdev; 489 struct net_device *ndev, *upper; 490 enum ib_mtu ndev_ib_mtu; 491 bool put_mdev = true; 492 u16 qkey_viol_cntr; 493 u32 eth_prot_oper; 494 u8 mdev_port_num; 495 bool ext; 496 int err; 497 498 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); 499 if (!mdev) { 500 /* This means the port isn't affiliated yet. Get the 501 * info for the master port instead. 502 */ 503 put_mdev = false; 504 mdev = dev->mdev; 505 mdev_port_num = 1; 506 port_num = 1; 507 } 508 509 /* Possible bad flows are checked before filling out props so in case 510 * of an error it will still be zeroed out. 511 * Use native port in case of reps 512 */ 513 if (dev->is_rep) 514 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 515 1); 516 else 517 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 518 mdev_port_num); 519 if (err) 520 goto out; 521 ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet); 522 eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); 523 524 props->active_width = IB_WIDTH_4X; 525 props->active_speed = IB_SPEED_QDR; 526 527 translate_eth_proto_oper(eth_prot_oper, &props->active_speed, 528 &props->active_width, ext); 529 530 props->port_cap_flags |= IB_PORT_CM_SUP; 531 props->ip_gids = true; 532 533 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev, 534 roce_address_table_size); 535 props->max_mtu = IB_MTU_4096; 536 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); 537 props->pkey_tbl_len = 1; 538 props->state = IB_PORT_DOWN; 539 props->phys_state = IB_PORT_PHYS_STATE_DISABLED; 540 541 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr); 542 props->qkey_viol_cntr = qkey_viol_cntr; 543 544 /* If this is a stub query for an unaffiliated port stop here */ 545 if (!put_mdev) 546 goto out; 547 548 ndev = mlx5_ib_get_netdev(device, port_num); 549 if (!ndev) 550 goto out; 551 552 if (dev->lag_active) { 553 rcu_read_lock(); 554 upper = netdev_master_upper_dev_get_rcu(ndev); 555 if (upper) { 556 dev_put(ndev); 557 ndev = upper; 558 dev_hold(ndev); 559 } 560 rcu_read_unlock(); 561 } 562 563 if (netif_running(ndev) && netif_carrier_ok(ndev)) { 564 props->state = IB_PORT_ACTIVE; 565 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 566 } 567 568 ndev_ib_mtu = iboe_get_mtu(ndev->mtu); 569 570 dev_put(ndev); 571 572 props->active_mtu = min(props->max_mtu, ndev_ib_mtu); 573 out: 574 if (put_mdev) 575 mlx5_ib_put_native_port_mdev(dev, port_num); 576 return err; 577 } 578 579 static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, 580 unsigned int index, const union ib_gid *gid, 581 const struct ib_gid_attr *attr) 582 { 583 enum ib_gid_type gid_type = IB_GID_TYPE_IB; 584 u16 vlan_id = 0xffff; 585 u8 roce_version = 0; 586 u8 roce_l3_type = 0; 587 u8 mac[ETH_ALEN]; 588 int ret; 589 590 if (gid) { 591 gid_type = attr->gid_type; 592 ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]); 593 if (ret) 594 return ret; 595 } 596 597 switch (gid_type) { 598 case IB_GID_TYPE_IB: 599 roce_version = MLX5_ROCE_VERSION_1; 600 break; 601 case IB_GID_TYPE_ROCE_UDP_ENCAP: 602 roce_version = MLX5_ROCE_VERSION_2; 603 if (ipv6_addr_v4mapped((void *)gid)) 604 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4; 605 else 606 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6; 607 break; 608 609 default: 610 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type); 611 } 612 613 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version, 614 roce_l3_type, gid->raw, mac, 615 vlan_id < VLAN_CFI_MASK, vlan_id, 616 port_num); 617 } 618 619 static int mlx5_ib_add_gid(const struct ib_gid_attr *attr, 620 __always_unused void **context) 621 { 622 return set_roce_addr(to_mdev(attr->device), attr->port_num, 623 attr->index, &attr->gid, attr); 624 } 625 626 static int mlx5_ib_del_gid(const struct ib_gid_attr *attr, 627 __always_unused void **context) 628 { 629 return set_roce_addr(to_mdev(attr->device), attr->port_num, 630 attr->index, NULL, NULL); 631 } 632 633 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, 634 const struct ib_gid_attr *attr) 635 { 636 if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) 637 return 0; 638 639 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port)); 640 } 641 642 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) 643 { 644 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) 645 return !MLX5_CAP_GEN(dev->mdev, ib_virt); 646 return 0; 647 } 648 649 enum { 650 MLX5_VPORT_ACCESS_METHOD_MAD, 651 MLX5_VPORT_ACCESS_METHOD_HCA, 652 MLX5_VPORT_ACCESS_METHOD_NIC, 653 }; 654 655 static int mlx5_get_vport_access_method(struct ib_device *ibdev) 656 { 657 if (mlx5_use_mad_ifc(to_mdev(ibdev))) 658 return MLX5_VPORT_ACCESS_METHOD_MAD; 659 660 if (mlx5_ib_port_link_layer(ibdev, 1) == 661 IB_LINK_LAYER_ETHERNET) 662 return MLX5_VPORT_ACCESS_METHOD_NIC; 663 664 return MLX5_VPORT_ACCESS_METHOD_HCA; 665 } 666 667 static void get_atomic_caps(struct mlx5_ib_dev *dev, 668 u8 atomic_size_qp, 669 struct ib_device_attr *props) 670 { 671 u8 tmp; 672 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); 673 u8 atomic_req_8B_endianness_mode = 674 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode); 675 676 /* Check if HW supports 8 bytes standard atomic operations and capable 677 * of host endianness respond 678 */ 679 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD; 680 if (((atomic_operations & tmp) == tmp) && 681 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) && 682 (atomic_req_8B_endianness_mode)) { 683 props->atomic_cap = IB_ATOMIC_HCA; 684 } else { 685 props->atomic_cap = IB_ATOMIC_NONE; 686 } 687 } 688 689 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev, 690 struct ib_device_attr *props) 691 { 692 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); 693 694 get_atomic_caps(dev, atomic_size_qp, props); 695 } 696 697 static int mlx5_query_system_image_guid(struct ib_device *ibdev, 698 __be64 *sys_image_guid) 699 { 700 struct mlx5_ib_dev *dev = to_mdev(ibdev); 701 struct mlx5_core_dev *mdev = dev->mdev; 702 u64 tmp; 703 int err; 704 705 switch (mlx5_get_vport_access_method(ibdev)) { 706 case MLX5_VPORT_ACCESS_METHOD_MAD: 707 return mlx5_query_mad_ifc_system_image_guid(ibdev, 708 sys_image_guid); 709 710 case MLX5_VPORT_ACCESS_METHOD_HCA: 711 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); 712 break; 713 714 case MLX5_VPORT_ACCESS_METHOD_NIC: 715 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp); 716 break; 717 718 default: 719 return -EINVAL; 720 } 721 722 if (!err) 723 *sys_image_guid = cpu_to_be64(tmp); 724 725 return err; 726 727 } 728 729 static int mlx5_query_max_pkeys(struct ib_device *ibdev, 730 u16 *max_pkeys) 731 { 732 struct mlx5_ib_dev *dev = to_mdev(ibdev); 733 struct mlx5_core_dev *mdev = dev->mdev; 734 735 switch (mlx5_get_vport_access_method(ibdev)) { 736 case MLX5_VPORT_ACCESS_METHOD_MAD: 737 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); 738 739 case MLX5_VPORT_ACCESS_METHOD_HCA: 740 case MLX5_VPORT_ACCESS_METHOD_NIC: 741 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, 742 pkey_table_size)); 743 return 0; 744 745 default: 746 return -EINVAL; 747 } 748 } 749 750 static int mlx5_query_vendor_id(struct ib_device *ibdev, 751 u32 *vendor_id) 752 { 753 struct mlx5_ib_dev *dev = to_mdev(ibdev); 754 755 switch (mlx5_get_vport_access_method(ibdev)) { 756 case MLX5_VPORT_ACCESS_METHOD_MAD: 757 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); 758 759 case MLX5_VPORT_ACCESS_METHOD_HCA: 760 case MLX5_VPORT_ACCESS_METHOD_NIC: 761 return mlx5_core_query_vendor_id(dev->mdev, vendor_id); 762 763 default: 764 return -EINVAL; 765 } 766 } 767 768 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev, 769 __be64 *node_guid) 770 { 771 u64 tmp; 772 int err; 773 774 switch (mlx5_get_vport_access_method(&dev->ib_dev)) { 775 case MLX5_VPORT_ACCESS_METHOD_MAD: 776 return mlx5_query_mad_ifc_node_guid(dev, node_guid); 777 778 case MLX5_VPORT_ACCESS_METHOD_HCA: 779 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp); 780 break; 781 782 case MLX5_VPORT_ACCESS_METHOD_NIC: 783 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp); 784 break; 785 786 default: 787 return -EINVAL; 788 } 789 790 if (!err) 791 *node_guid = cpu_to_be64(tmp); 792 793 return err; 794 } 795 796 struct mlx5_reg_node_desc { 797 u8 desc[IB_DEVICE_NODE_DESC_MAX]; 798 }; 799 800 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc) 801 { 802 struct mlx5_reg_node_desc in; 803 804 if (mlx5_use_mad_ifc(dev)) 805 return mlx5_query_mad_ifc_node_desc(dev, node_desc); 806 807 memset(&in, 0, sizeof(in)); 808 809 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc, 810 sizeof(struct mlx5_reg_node_desc), 811 MLX5_REG_NODE_DESC, 0, 0); 812 } 813 814 static int mlx5_ib_query_device(struct ib_device *ibdev, 815 struct ib_device_attr *props, 816 struct ib_udata *uhw) 817 { 818 size_t uhw_outlen = (uhw) ? uhw->outlen : 0; 819 struct mlx5_ib_dev *dev = to_mdev(ibdev); 820 struct mlx5_core_dev *mdev = dev->mdev; 821 int err = -ENOMEM; 822 int max_sq_desc; 823 int max_rq_sg; 824 int max_sq_sg; 825 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); 826 bool raw_support = !mlx5_core_mp_enabled(mdev); 827 struct mlx5_ib_query_device_resp resp = {}; 828 size_t resp_len; 829 u64 max_tso; 830 831 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); 832 if (uhw_outlen && uhw_outlen < resp_len) 833 return -EINVAL; 834 835 resp.response_length = resp_len; 836 837 if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen)) 838 return -EINVAL; 839 840 memset(props, 0, sizeof(*props)); 841 err = mlx5_query_system_image_guid(ibdev, 842 &props->sys_image_guid); 843 if (err) 844 return err; 845 846 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys); 847 if (err) 848 return err; 849 850 err = mlx5_query_vendor_id(ibdev, &props->vendor_id); 851 if (err) 852 return err; 853 854 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | 855 (fw_rev_min(dev->mdev) << 16) | 856 fw_rev_sub(dev->mdev); 857 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 858 IB_DEVICE_PORT_ACTIVE_EVENT | 859 IB_DEVICE_SYS_IMAGE_GUID | 860 IB_DEVICE_RC_RNR_NAK_GEN; 861 862 if (MLX5_CAP_GEN(mdev, pkv)) 863 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 864 if (MLX5_CAP_GEN(mdev, qkv)) 865 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 866 if (MLX5_CAP_GEN(mdev, apm)) 867 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 868 if (MLX5_CAP_GEN(mdev, xrc)) 869 props->device_cap_flags |= IB_DEVICE_XRC; 870 if (MLX5_CAP_GEN(mdev, imaicl)) { 871 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW | 872 IB_DEVICE_MEM_WINDOW_TYPE_2B; 873 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 874 /* We support 'Gappy' memory registration too */ 875 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; 876 } 877 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 878 if (MLX5_CAP_GEN(mdev, sho)) { 879 props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER; 880 /* At this stage no support for signature handover */ 881 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | 882 IB_PROT_T10DIF_TYPE_2 | 883 IB_PROT_T10DIF_TYPE_3; 884 props->sig_guard_cap = IB_GUARD_T10DIF_CRC | 885 IB_GUARD_T10DIF_CSUM; 886 } 887 if (MLX5_CAP_GEN(mdev, block_lb_mc)) 888 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 889 890 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) { 891 if (MLX5_CAP_ETH(mdev, csum_cap)) { 892 /* Legacy bit to support old userspace libraries */ 893 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; 894 props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM; 895 } 896 897 if (MLX5_CAP_ETH(dev->mdev, vlan_cap)) 898 props->raw_packet_caps |= 899 IB_RAW_PACKET_CAP_CVLAN_STRIPPING; 900 901 if (field_avail(typeof(resp), tso_caps, uhw_outlen)) { 902 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap); 903 if (max_tso) { 904 resp.tso_caps.max_tso = 1 << max_tso; 905 resp.tso_caps.supported_qpts |= 906 1 << IB_QPT_RAW_PACKET; 907 resp.response_length += sizeof(resp.tso_caps); 908 } 909 } 910 911 if (field_avail(typeof(resp), rss_caps, uhw_outlen)) { 912 resp.rss_caps.rx_hash_function = 913 MLX5_RX_HASH_FUNC_TOEPLITZ; 914 resp.rss_caps.rx_hash_fields_mask = 915 MLX5_RX_HASH_SRC_IPV4 | 916 MLX5_RX_HASH_DST_IPV4 | 917 MLX5_RX_HASH_SRC_IPV6 | 918 MLX5_RX_HASH_DST_IPV6 | 919 MLX5_RX_HASH_SRC_PORT_TCP | 920 MLX5_RX_HASH_DST_PORT_TCP | 921 MLX5_RX_HASH_SRC_PORT_UDP | 922 MLX5_RX_HASH_DST_PORT_UDP | 923 MLX5_RX_HASH_INNER; 924 if (mlx5_accel_ipsec_device_caps(dev->mdev) & 925 MLX5_ACCEL_IPSEC_CAP_DEVICE) 926 resp.rss_caps.rx_hash_fields_mask |= 927 MLX5_RX_HASH_IPSEC_SPI; 928 resp.response_length += sizeof(resp.rss_caps); 929 } 930 } else { 931 if (field_avail(typeof(resp), tso_caps, uhw_outlen)) 932 resp.response_length += sizeof(resp.tso_caps); 933 if (field_avail(typeof(resp), rss_caps, uhw_outlen)) 934 resp.response_length += sizeof(resp.rss_caps); 935 } 936 937 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { 938 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 939 props->device_cap_flags |= IB_DEVICE_UD_TSO; 940 } 941 942 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) && 943 MLX5_CAP_GEN(dev->mdev, general_notification_event) && 944 raw_support) 945 props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP; 946 947 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && 948 MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap)) 949 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 950 951 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 952 MLX5_CAP_ETH(dev->mdev, scatter_fcs) && 953 raw_support) { 954 /* Legacy bit to support old userspace libraries */ 955 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; 956 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS; 957 } 958 959 if (MLX5_CAP_DEV_MEM(mdev, memic)) { 960 props->max_dm_size = 961 MLX5_CAP_DEV_MEM(mdev, max_memic_size); 962 } 963 964 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) 965 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; 966 967 if (MLX5_CAP_GEN(mdev, end_pad)) 968 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING; 969 970 props->vendor_part_id = mdev->pdev->device; 971 props->hw_ver = mdev->pdev->revision; 972 973 props->max_mr_size = ~0ull; 974 props->page_size_cap = ~(min_page_size - 1); 975 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); 976 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 977 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / 978 sizeof(struct mlx5_wqe_data_seg); 979 max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512); 980 max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) - 981 sizeof(struct mlx5_wqe_raddr_seg)) / 982 sizeof(struct mlx5_wqe_data_seg); 983 props->max_send_sge = max_sq_sg; 984 props->max_recv_sge = max_rq_sg; 985 props->max_sge_rd = MLX5_MAX_SGE_RD; 986 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 987 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; 988 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 989 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); 990 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); 991 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp); 992 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq); 993 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1; 994 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); 995 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 996 props->max_srq_sge = max_rq_sg - 1; 997 props->max_fast_reg_page_list_len = 998 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size); 999 props->max_pi_fast_reg_page_list_len = 1000 props->max_fast_reg_page_list_len / 2; 1001 props->max_sgl_rd = 1002 MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance); 1003 get_atomic_caps_qp(dev, props); 1004 props->masked_atomic_cap = IB_ATOMIC_NONE; 1005 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); 1006 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); 1007 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 1008 props->max_mcast_grp; 1009 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ 1010 props->max_ah = INT_MAX; 1011 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz); 1012 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; 1013 1014 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 1015 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) 1016 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 1017 props->odp_caps = dev->odp_caps; 1018 if (!uhw) { 1019 /* ODP for kernel QPs is not implemented for receive 1020 * WQEs and SRQ WQEs 1021 */ 1022 props->odp_caps.per_transport_caps.rc_odp_caps &= 1023 ~(IB_ODP_SUPPORT_READ | 1024 IB_ODP_SUPPORT_SRQ_RECV); 1025 props->odp_caps.per_transport_caps.uc_odp_caps &= 1026 ~(IB_ODP_SUPPORT_READ | 1027 IB_ODP_SUPPORT_SRQ_RECV); 1028 props->odp_caps.per_transport_caps.ud_odp_caps &= 1029 ~(IB_ODP_SUPPORT_READ | 1030 IB_ODP_SUPPORT_SRQ_RECV); 1031 props->odp_caps.per_transport_caps.xrc_odp_caps &= 1032 ~(IB_ODP_SUPPORT_READ | 1033 IB_ODP_SUPPORT_SRQ_RECV); 1034 } 1035 } 1036 1037 if (MLX5_CAP_GEN(mdev, cd)) 1038 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; 1039 1040 if (mlx5_core_is_vf(mdev)) 1041 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; 1042 1043 if (mlx5_ib_port_link_layer(ibdev, 1) == 1044 IB_LINK_LAYER_ETHERNET && raw_support) { 1045 props->rss_caps.max_rwq_indirection_tables = 1046 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt); 1047 props->rss_caps.max_rwq_indirection_table_size = 1048 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size); 1049 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; 1050 props->max_wq_type_rq = 1051 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); 1052 } 1053 1054 if (MLX5_CAP_GEN(mdev, tag_matching)) { 1055 props->tm_caps.max_num_tags = 1056 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; 1057 props->tm_caps.max_ops = 1058 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 1059 props->tm_caps.max_sge = MLX5_TM_MAX_SGE; 1060 } 1061 1062 if (MLX5_CAP_GEN(mdev, tag_matching) && 1063 MLX5_CAP_GEN(mdev, rndv_offload_rc)) { 1064 props->tm_caps.flags = IB_TM_CAP_RNDV_RC; 1065 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; 1066 } 1067 1068 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) { 1069 props->cq_caps.max_cq_moderation_count = 1070 MLX5_MAX_CQ_COUNT; 1071 props->cq_caps.max_cq_moderation_period = 1072 MLX5_MAX_CQ_PERIOD; 1073 } 1074 1075 if (field_avail(typeof(resp), cqe_comp_caps, uhw_outlen)) { 1076 resp.response_length += sizeof(resp.cqe_comp_caps); 1077 1078 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) { 1079 resp.cqe_comp_caps.max_num = 1080 MLX5_CAP_GEN(dev->mdev, 1081 cqe_compression_max_num); 1082 1083 resp.cqe_comp_caps.supported_format = 1084 MLX5_IB_CQE_RES_FORMAT_HASH | 1085 MLX5_IB_CQE_RES_FORMAT_CSUM; 1086 1087 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index)) 1088 resp.cqe_comp_caps.supported_format |= 1089 MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX; 1090 } 1091 } 1092 1093 if (field_avail(typeof(resp), packet_pacing_caps, uhw_outlen) && 1094 raw_support) { 1095 if (MLX5_CAP_QOS(mdev, packet_pacing) && 1096 MLX5_CAP_GEN(mdev, qos)) { 1097 resp.packet_pacing_caps.qp_rate_limit_max = 1098 MLX5_CAP_QOS(mdev, packet_pacing_max_rate); 1099 resp.packet_pacing_caps.qp_rate_limit_min = 1100 MLX5_CAP_QOS(mdev, packet_pacing_min_rate); 1101 resp.packet_pacing_caps.supported_qpts |= 1102 1 << IB_QPT_RAW_PACKET; 1103 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) && 1104 MLX5_CAP_QOS(mdev, packet_pacing_typical_size)) 1105 resp.packet_pacing_caps.cap_flags |= 1106 MLX5_IB_PP_SUPPORT_BURST; 1107 } 1108 resp.response_length += sizeof(resp.packet_pacing_caps); 1109 } 1110 1111 if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes, 1112 uhw_outlen)) { 1113 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe)) 1114 resp.mlx5_ib_support_multi_pkt_send_wqes = 1115 MLX5_IB_ALLOW_MPW; 1116 1117 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)) 1118 resp.mlx5_ib_support_multi_pkt_send_wqes |= 1119 MLX5_IB_SUPPORT_EMPW; 1120 1121 resp.response_length += 1122 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); 1123 } 1124 1125 if (field_avail(typeof(resp), flags, uhw_outlen)) { 1126 resp.response_length += sizeof(resp.flags); 1127 1128 if (MLX5_CAP_GEN(mdev, cqe_compression_128)) 1129 resp.flags |= 1130 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP; 1131 1132 if (MLX5_CAP_GEN(mdev, cqe_128_always)) 1133 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD; 1134 if (MLX5_CAP_GEN(mdev, qp_packet_based)) 1135 resp.flags |= 1136 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE; 1137 1138 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT; 1139 } 1140 1141 if (field_avail(typeof(resp), sw_parsing_caps, uhw_outlen)) { 1142 resp.response_length += sizeof(resp.sw_parsing_caps); 1143 if (MLX5_CAP_ETH(mdev, swp)) { 1144 resp.sw_parsing_caps.sw_parsing_offloads |= 1145 MLX5_IB_SW_PARSING; 1146 1147 if (MLX5_CAP_ETH(mdev, swp_csum)) 1148 resp.sw_parsing_caps.sw_parsing_offloads |= 1149 MLX5_IB_SW_PARSING_CSUM; 1150 1151 if (MLX5_CAP_ETH(mdev, swp_lso)) 1152 resp.sw_parsing_caps.sw_parsing_offloads |= 1153 MLX5_IB_SW_PARSING_LSO; 1154 1155 if (resp.sw_parsing_caps.sw_parsing_offloads) 1156 resp.sw_parsing_caps.supported_qpts = 1157 BIT(IB_QPT_RAW_PACKET); 1158 } 1159 } 1160 1161 if (field_avail(typeof(resp), striding_rq_caps, uhw_outlen) && 1162 raw_support) { 1163 resp.response_length += sizeof(resp.striding_rq_caps); 1164 if (MLX5_CAP_GEN(mdev, striding_rq)) { 1165 resp.striding_rq_caps.min_single_stride_log_num_of_bytes = 1166 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 1167 resp.striding_rq_caps.max_single_stride_log_num_of_bytes = 1168 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES; 1169 if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range)) 1170 resp.striding_rq_caps 1171 .min_single_wqe_log_num_of_strides = 1172 MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 1173 else 1174 resp.striding_rq_caps 1175 .min_single_wqe_log_num_of_strides = 1176 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 1177 resp.striding_rq_caps.max_single_wqe_log_num_of_strides = 1178 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES; 1179 resp.striding_rq_caps.supported_qpts = 1180 BIT(IB_QPT_RAW_PACKET); 1181 } 1182 } 1183 1184 if (field_avail(typeof(resp), tunnel_offloads_caps, uhw_outlen)) { 1185 resp.response_length += sizeof(resp.tunnel_offloads_caps); 1186 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan)) 1187 resp.tunnel_offloads_caps |= 1188 MLX5_IB_TUNNELED_OFFLOADS_VXLAN; 1189 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx)) 1190 resp.tunnel_offloads_caps |= 1191 MLX5_IB_TUNNELED_OFFLOADS_GENEVE; 1192 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) 1193 resp.tunnel_offloads_caps |= 1194 MLX5_IB_TUNNELED_OFFLOADS_GRE; 1195 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & 1196 MLX5_FLEX_PROTO_CW_MPLS_GRE) 1197 resp.tunnel_offloads_caps |= 1198 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE; 1199 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & 1200 MLX5_FLEX_PROTO_CW_MPLS_UDP) 1201 resp.tunnel_offloads_caps |= 1202 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP; 1203 } 1204 1205 if (uhw_outlen) { 1206 err = ib_copy_to_udata(uhw, &resp, resp.response_length); 1207 1208 if (err) 1209 return err; 1210 } 1211 1212 return 0; 1213 } 1214 1215 enum mlx5_ib_width { 1216 MLX5_IB_WIDTH_1X = 1 << 0, 1217 MLX5_IB_WIDTH_2X = 1 << 1, 1218 MLX5_IB_WIDTH_4X = 1 << 2, 1219 MLX5_IB_WIDTH_8X = 1 << 3, 1220 MLX5_IB_WIDTH_12X = 1 << 4 1221 }; 1222 1223 static void translate_active_width(struct ib_device *ibdev, u8 active_width, 1224 u8 *ib_width) 1225 { 1226 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1227 1228 if (active_width & MLX5_IB_WIDTH_1X) 1229 *ib_width = IB_WIDTH_1X; 1230 else if (active_width & MLX5_IB_WIDTH_2X) 1231 *ib_width = IB_WIDTH_2X; 1232 else if (active_width & MLX5_IB_WIDTH_4X) 1233 *ib_width = IB_WIDTH_4X; 1234 else if (active_width & MLX5_IB_WIDTH_8X) 1235 *ib_width = IB_WIDTH_8X; 1236 else if (active_width & MLX5_IB_WIDTH_12X) 1237 *ib_width = IB_WIDTH_12X; 1238 else { 1239 mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n", 1240 (int)active_width); 1241 *ib_width = IB_WIDTH_4X; 1242 } 1243 1244 return; 1245 } 1246 1247 static int mlx5_mtu_to_ib_mtu(int mtu) 1248 { 1249 switch (mtu) { 1250 case 256: return 1; 1251 case 512: return 2; 1252 case 1024: return 3; 1253 case 2048: return 4; 1254 case 4096: return 5; 1255 default: 1256 pr_warn("invalid mtu\n"); 1257 return -1; 1258 } 1259 } 1260 1261 enum ib_max_vl_num { 1262 __IB_MAX_VL_0 = 1, 1263 __IB_MAX_VL_0_1 = 2, 1264 __IB_MAX_VL_0_3 = 3, 1265 __IB_MAX_VL_0_7 = 4, 1266 __IB_MAX_VL_0_14 = 5, 1267 }; 1268 1269 enum mlx5_vl_hw_cap { 1270 MLX5_VL_HW_0 = 1, 1271 MLX5_VL_HW_0_1 = 2, 1272 MLX5_VL_HW_0_2 = 3, 1273 MLX5_VL_HW_0_3 = 4, 1274 MLX5_VL_HW_0_4 = 5, 1275 MLX5_VL_HW_0_5 = 6, 1276 MLX5_VL_HW_0_6 = 7, 1277 MLX5_VL_HW_0_7 = 8, 1278 MLX5_VL_HW_0_14 = 15 1279 }; 1280 1281 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, 1282 u8 *max_vl_num) 1283 { 1284 switch (vl_hw_cap) { 1285 case MLX5_VL_HW_0: 1286 *max_vl_num = __IB_MAX_VL_0; 1287 break; 1288 case MLX5_VL_HW_0_1: 1289 *max_vl_num = __IB_MAX_VL_0_1; 1290 break; 1291 case MLX5_VL_HW_0_3: 1292 *max_vl_num = __IB_MAX_VL_0_3; 1293 break; 1294 case MLX5_VL_HW_0_7: 1295 *max_vl_num = __IB_MAX_VL_0_7; 1296 break; 1297 case MLX5_VL_HW_0_14: 1298 *max_vl_num = __IB_MAX_VL_0_14; 1299 break; 1300 1301 default: 1302 return -EINVAL; 1303 } 1304 1305 return 0; 1306 } 1307 1308 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, 1309 struct ib_port_attr *props) 1310 { 1311 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1312 struct mlx5_core_dev *mdev = dev->mdev; 1313 struct mlx5_hca_vport_context *rep; 1314 u16 max_mtu; 1315 u16 oper_mtu; 1316 int err; 1317 u8 ib_link_width_oper; 1318 u8 vl_hw_cap; 1319 1320 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 1321 if (!rep) { 1322 err = -ENOMEM; 1323 goto out; 1324 } 1325 1326 /* props being zeroed by the caller, avoid zeroing it here */ 1327 1328 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep); 1329 if (err) 1330 goto out; 1331 1332 props->lid = rep->lid; 1333 props->lmc = rep->lmc; 1334 props->sm_lid = rep->sm_lid; 1335 props->sm_sl = rep->sm_sl; 1336 props->state = rep->vport_state; 1337 props->phys_state = rep->port_physical_state; 1338 props->port_cap_flags = rep->cap_mask1; 1339 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size)); 1340 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 1341 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); 1342 props->bad_pkey_cntr = rep->pkey_violation_counter; 1343 props->qkey_viol_cntr = rep->qkey_violation_counter; 1344 props->subnet_timeout = rep->subnet_timeout; 1345 props->init_type_reply = rep->init_type_reply; 1346 1347 if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) 1348 props->port_cap_flags2 = rep->cap_mask2; 1349 1350 err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port); 1351 if (err) 1352 goto out; 1353 1354 translate_active_width(ibdev, ib_link_width_oper, &props->active_width); 1355 1356 err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port); 1357 if (err) 1358 goto out; 1359 1360 mlx5_query_port_max_mtu(mdev, &max_mtu, port); 1361 1362 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu); 1363 1364 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port); 1365 1366 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu); 1367 1368 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port); 1369 if (err) 1370 goto out; 1371 1372 err = translate_max_vl_num(ibdev, vl_hw_cap, 1373 &props->max_vl_num); 1374 out: 1375 kfree(rep); 1376 return err; 1377 } 1378 1379 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 1380 struct ib_port_attr *props) 1381 { 1382 unsigned int count; 1383 int ret; 1384 1385 switch (mlx5_get_vport_access_method(ibdev)) { 1386 case MLX5_VPORT_ACCESS_METHOD_MAD: 1387 ret = mlx5_query_mad_ifc_port(ibdev, port, props); 1388 break; 1389 1390 case MLX5_VPORT_ACCESS_METHOD_HCA: 1391 ret = mlx5_query_hca_port(ibdev, port, props); 1392 break; 1393 1394 case MLX5_VPORT_ACCESS_METHOD_NIC: 1395 ret = mlx5_query_port_roce(ibdev, port, props); 1396 break; 1397 1398 default: 1399 ret = -EINVAL; 1400 } 1401 1402 if (!ret && props) { 1403 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1404 struct mlx5_core_dev *mdev; 1405 bool put_mdev = true; 1406 1407 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL); 1408 if (!mdev) { 1409 /* If the port isn't affiliated yet query the master. 1410 * The master and slave will have the same values. 1411 */ 1412 mdev = dev->mdev; 1413 port = 1; 1414 put_mdev = false; 1415 } 1416 count = mlx5_core_reserved_gids_count(mdev); 1417 if (put_mdev) 1418 mlx5_ib_put_native_port_mdev(dev, port); 1419 props->gid_tbl_len -= count; 1420 } 1421 return ret; 1422 } 1423 1424 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port, 1425 struct ib_port_attr *props) 1426 { 1427 int ret; 1428 1429 /* Only link layer == ethernet is valid for representors 1430 * and we always use port 1 1431 */ 1432 ret = mlx5_query_port_roce(ibdev, port, props); 1433 if (ret || !props) 1434 return ret; 1435 1436 /* We don't support GIDS */ 1437 props->gid_tbl_len = 0; 1438 1439 return ret; 1440 } 1441 1442 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 1443 union ib_gid *gid) 1444 { 1445 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1446 struct mlx5_core_dev *mdev = dev->mdev; 1447 1448 switch (mlx5_get_vport_access_method(ibdev)) { 1449 case MLX5_VPORT_ACCESS_METHOD_MAD: 1450 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); 1451 1452 case MLX5_VPORT_ACCESS_METHOD_HCA: 1453 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid); 1454 1455 default: 1456 return -EINVAL; 1457 } 1458 1459 } 1460 1461 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port, 1462 u16 index, u16 *pkey) 1463 { 1464 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1465 struct mlx5_core_dev *mdev; 1466 bool put_mdev = true; 1467 u8 mdev_port_num; 1468 int err; 1469 1470 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num); 1471 if (!mdev) { 1472 /* The port isn't affiliated yet, get the PKey from the master 1473 * port. For RoCE the PKey tables will be the same. 1474 */ 1475 put_mdev = false; 1476 mdev = dev->mdev; 1477 mdev_port_num = 1; 1478 } 1479 1480 err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0, 1481 index, pkey); 1482 if (put_mdev) 1483 mlx5_ib_put_native_port_mdev(dev, port); 1484 1485 return err; 1486 } 1487 1488 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1489 u16 *pkey) 1490 { 1491 switch (mlx5_get_vport_access_method(ibdev)) { 1492 case MLX5_VPORT_ACCESS_METHOD_MAD: 1493 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); 1494 1495 case MLX5_VPORT_ACCESS_METHOD_HCA: 1496 case MLX5_VPORT_ACCESS_METHOD_NIC: 1497 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey); 1498 default: 1499 return -EINVAL; 1500 } 1501 } 1502 1503 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, 1504 struct ib_device_modify *props) 1505 { 1506 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1507 struct mlx5_reg_node_desc in; 1508 struct mlx5_reg_node_desc out; 1509 int err; 1510 1511 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 1512 return -EOPNOTSUPP; 1513 1514 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) 1515 return 0; 1516 1517 /* 1518 * If possible, pass node desc to FW, so it can generate 1519 * a 144 trap. If cmd fails, just ignore. 1520 */ 1521 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 1522 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out, 1523 sizeof(out), MLX5_REG_NODE_DESC, 0, 1); 1524 if (err) 1525 return err; 1526 1527 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 1528 1529 return err; 1530 } 1531 1532 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask, 1533 u32 value) 1534 { 1535 struct mlx5_hca_vport_context ctx = {}; 1536 struct mlx5_core_dev *mdev; 1537 u8 mdev_port_num; 1538 int err; 1539 1540 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); 1541 if (!mdev) 1542 return -ENODEV; 1543 1544 err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx); 1545 if (err) 1546 goto out; 1547 1548 if (~ctx.cap_mask1_perm & mask) { 1549 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n", 1550 mask, ctx.cap_mask1_perm); 1551 err = -EINVAL; 1552 goto out; 1553 } 1554 1555 ctx.cap_mask1 = value; 1556 ctx.cap_mask1_perm = mask; 1557 err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num, 1558 0, &ctx); 1559 1560 out: 1561 mlx5_ib_put_native_port_mdev(dev, port_num); 1562 1563 return err; 1564 } 1565 1566 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 1567 struct ib_port_modify *props) 1568 { 1569 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1570 struct ib_port_attr attr; 1571 u32 tmp; 1572 int err; 1573 u32 change_mask; 1574 u32 value; 1575 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == 1576 IB_LINK_LAYER_INFINIBAND); 1577 1578 /* CM layer calls ib_modify_port() regardless of the link layer. For 1579 * Ethernet ports, qkey violation and Port capabilities are meaningless. 1580 */ 1581 if (!is_ib) 1582 return 0; 1583 1584 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { 1585 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; 1586 value = ~props->clr_port_cap_mask | props->set_port_cap_mask; 1587 return set_port_caps_atomic(dev, port, change_mask, value); 1588 } 1589 1590 mutex_lock(&dev->cap_mask_mutex); 1591 1592 err = ib_query_port(ibdev, port, &attr); 1593 if (err) 1594 goto out; 1595 1596 tmp = (attr.port_cap_flags | props->set_port_cap_mask) & 1597 ~props->clr_port_cap_mask; 1598 1599 err = mlx5_set_port_caps(dev->mdev, port, tmp); 1600 1601 out: 1602 mutex_unlock(&dev->cap_mask_mutex); 1603 return err; 1604 } 1605 1606 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps) 1607 { 1608 mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n", 1609 caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n"); 1610 } 1611 1612 static u16 calc_dynamic_bfregs(int uars_per_sys_page) 1613 { 1614 /* Large page with non 4k uar support might limit the dynamic size */ 1615 if (uars_per_sys_page == 1 && PAGE_SIZE > 4096) 1616 return MLX5_MIN_DYN_BFREGS; 1617 1618 return MLX5_MAX_DYN_BFREGS; 1619 } 1620 1621 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k, 1622 struct mlx5_ib_alloc_ucontext_req_v2 *req, 1623 struct mlx5_bfreg_info *bfregi) 1624 { 1625 int uars_per_sys_page; 1626 int bfregs_per_sys_page; 1627 int ref_bfregs = req->total_num_bfregs; 1628 1629 if (req->total_num_bfregs == 0) 1630 return -EINVAL; 1631 1632 BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE); 1633 BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE); 1634 1635 if (req->total_num_bfregs > MLX5_MAX_BFREGS) 1636 return -ENOMEM; 1637 1638 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k); 1639 bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR; 1640 /* This holds the required static allocation asked by the user */ 1641 req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page); 1642 if (req->num_low_latency_bfregs > req->total_num_bfregs - 1) 1643 return -EINVAL; 1644 1645 bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page; 1646 bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page); 1647 bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs; 1648 bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page; 1649 1650 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n", 1651 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no", 1652 lib_uar_4k ? "yes" : "no", ref_bfregs, 1653 req->total_num_bfregs, bfregi->total_num_bfregs, 1654 bfregi->num_sys_pages); 1655 1656 return 0; 1657 } 1658 1659 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) 1660 { 1661 struct mlx5_bfreg_info *bfregi; 1662 int err; 1663 int i; 1664 1665 bfregi = &context->bfregi; 1666 for (i = 0; i < bfregi->num_static_sys_pages; i++) { 1667 err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]); 1668 if (err) 1669 goto error; 1670 1671 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]); 1672 } 1673 1674 for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++) 1675 bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX; 1676 1677 return 0; 1678 1679 error: 1680 for (--i; i >= 0; i--) 1681 if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i])) 1682 mlx5_ib_warn(dev, "failed to free uar %d\n", i); 1683 1684 return err; 1685 } 1686 1687 static void deallocate_uars(struct mlx5_ib_dev *dev, 1688 struct mlx5_ib_ucontext *context) 1689 { 1690 struct mlx5_bfreg_info *bfregi; 1691 int i; 1692 1693 bfregi = &context->bfregi; 1694 for (i = 0; i < bfregi->num_sys_pages; i++) 1695 if (i < bfregi->num_static_sys_pages || 1696 bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) 1697 mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]); 1698 } 1699 1700 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) 1701 { 1702 int err = 0; 1703 1704 mutex_lock(&dev->lb.mutex); 1705 if (td) 1706 dev->lb.user_td++; 1707 if (qp) 1708 dev->lb.qps++; 1709 1710 if (dev->lb.user_td == 2 || 1711 dev->lb.qps == 1) { 1712 if (!dev->lb.enabled) { 1713 err = mlx5_nic_vport_update_local_lb(dev->mdev, true); 1714 dev->lb.enabled = true; 1715 } 1716 } 1717 1718 mutex_unlock(&dev->lb.mutex); 1719 1720 return err; 1721 } 1722 1723 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) 1724 { 1725 mutex_lock(&dev->lb.mutex); 1726 if (td) 1727 dev->lb.user_td--; 1728 if (qp) 1729 dev->lb.qps--; 1730 1731 if (dev->lb.user_td == 1 && 1732 dev->lb.qps == 0) { 1733 if (dev->lb.enabled) { 1734 mlx5_nic_vport_update_local_lb(dev->mdev, false); 1735 dev->lb.enabled = false; 1736 } 1737 } 1738 1739 mutex_unlock(&dev->lb.mutex); 1740 } 1741 1742 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn, 1743 u16 uid) 1744 { 1745 int err; 1746 1747 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1748 return 0; 1749 1750 err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid); 1751 if (err) 1752 return err; 1753 1754 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || 1755 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && 1756 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 1757 return err; 1758 1759 return mlx5_ib_enable_lb(dev, true, false); 1760 } 1761 1762 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn, 1763 u16 uid) 1764 { 1765 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1766 return; 1767 1768 mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid); 1769 1770 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || 1771 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && 1772 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 1773 return; 1774 1775 mlx5_ib_disable_lb(dev, true, false); 1776 } 1777 1778 static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, 1779 struct ib_udata *udata) 1780 { 1781 struct ib_device *ibdev = uctx->device; 1782 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1783 struct mlx5_ib_alloc_ucontext_req_v2 req = {}; 1784 struct mlx5_ib_alloc_ucontext_resp resp = {}; 1785 struct mlx5_core_dev *mdev = dev->mdev; 1786 struct mlx5_ib_ucontext *context = to_mucontext(uctx); 1787 struct mlx5_bfreg_info *bfregi; 1788 int ver; 1789 int err; 1790 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, 1791 max_cqe_version); 1792 u32 dump_fill_mkey; 1793 bool lib_uar_4k; 1794 1795 if (!dev->ib_active) 1796 return -EAGAIN; 1797 1798 if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) 1799 ver = 0; 1800 else if (udata->inlen >= min_req_v2) 1801 ver = 2; 1802 else 1803 return -EINVAL; 1804 1805 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); 1806 if (err) 1807 return err; 1808 1809 if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX) 1810 return -EOPNOTSUPP; 1811 1812 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) 1813 return -EOPNOTSUPP; 1814 1815 req.total_num_bfregs = ALIGN(req.total_num_bfregs, 1816 MLX5_NON_FP_BFREGS_PER_UAR); 1817 if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) 1818 return -EINVAL; 1819 1820 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); 1821 if (dev->wc_support) 1822 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); 1823 resp.cache_line_size = cache_line_size(); 1824 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); 1825 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); 1826 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1827 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1828 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 1829 resp.cqe_version = min_t(__u8, 1830 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), 1831 req.max_cqe_version); 1832 resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ? 1833 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT; 1834 resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? 1835 MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1; 1836 resp.response_length = min(offsetof(typeof(resp), response_length) + 1837 sizeof(resp.response_length), udata->outlen); 1838 1839 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) { 1840 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS)) 1841 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM; 1842 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA) 1843 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA; 1844 if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) 1845 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING; 1846 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN) 1847 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN; 1848 /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */ 1849 } 1850 1851 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; 1852 bfregi = &context->bfregi; 1853 1854 /* updates req->total_num_bfregs */ 1855 err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi); 1856 if (err) 1857 goto out_ctx; 1858 1859 mutex_init(&bfregi->lock); 1860 bfregi->lib_uar_4k = lib_uar_4k; 1861 bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count), 1862 GFP_KERNEL); 1863 if (!bfregi->count) { 1864 err = -ENOMEM; 1865 goto out_ctx; 1866 } 1867 1868 bfregi->sys_pages = kcalloc(bfregi->num_sys_pages, 1869 sizeof(*bfregi->sys_pages), 1870 GFP_KERNEL); 1871 if (!bfregi->sys_pages) { 1872 err = -ENOMEM; 1873 goto out_count; 1874 } 1875 1876 err = allocate_uars(dev, context); 1877 if (err) 1878 goto out_sys_pages; 1879 1880 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { 1881 err = mlx5_ib_devx_create(dev, true); 1882 if (err < 0) 1883 goto out_uars; 1884 context->devx_uid = err; 1885 } 1886 1887 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn, 1888 context->devx_uid); 1889 if (err) 1890 goto out_devx; 1891 1892 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) { 1893 err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey); 1894 if (err) 1895 goto out_mdev; 1896 } 1897 1898 INIT_LIST_HEAD(&context->db_page_list); 1899 mutex_init(&context->db_page_mutex); 1900 1901 resp.tot_bfregs = req.total_num_bfregs; 1902 resp.num_ports = dev->num_ports; 1903 1904 if (field_avail(typeof(resp), cqe_version, udata->outlen)) 1905 resp.response_length += sizeof(resp.cqe_version); 1906 1907 if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) { 1908 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE | 1909 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH; 1910 resp.response_length += sizeof(resp.cmds_supp_uhw); 1911 } 1912 1913 if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) { 1914 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) { 1915 mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline); 1916 resp.eth_min_inline++; 1917 } 1918 resp.response_length += sizeof(resp.eth_min_inline); 1919 } 1920 1921 if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) { 1922 if (mdev->clock_info) 1923 resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1); 1924 resp.response_length += sizeof(resp.clock_info_versions); 1925 } 1926 1927 /* 1928 * We don't want to expose information from the PCI bar that is located 1929 * after 4096 bytes, so if the arch only supports larger pages, let's 1930 * pretend we don't support reading the HCA's core clock. This is also 1931 * forced by mmap function. 1932 */ 1933 if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { 1934 if (PAGE_SIZE <= 4096) { 1935 resp.comp_mask |= 1936 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; 1937 resp.hca_core_clock_offset = 1938 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE; 1939 } 1940 resp.response_length += sizeof(resp.hca_core_clock_offset); 1941 } 1942 1943 if (field_avail(typeof(resp), log_uar_size, udata->outlen)) 1944 resp.response_length += sizeof(resp.log_uar_size); 1945 1946 if (field_avail(typeof(resp), num_uars_per_page, udata->outlen)) 1947 resp.response_length += sizeof(resp.num_uars_per_page); 1948 1949 if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) { 1950 resp.num_dyn_bfregs = bfregi->num_dyn_bfregs; 1951 resp.response_length += sizeof(resp.num_dyn_bfregs); 1952 } 1953 1954 if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) { 1955 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) { 1956 resp.dump_fill_mkey = dump_fill_mkey; 1957 resp.comp_mask |= 1958 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY; 1959 } 1960 resp.response_length += sizeof(resp.dump_fill_mkey); 1961 } 1962 1963 err = ib_copy_to_udata(udata, &resp, resp.response_length); 1964 if (err) 1965 goto out_mdev; 1966 1967 bfregi->ver = ver; 1968 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs; 1969 context->cqe_version = resp.cqe_version; 1970 context->lib_caps = req.lib_caps; 1971 print_lib_caps(dev, context->lib_caps); 1972 1973 if (dev->lag_active) { 1974 u8 port = mlx5_core_native_port_num(dev->mdev) - 1; 1975 1976 atomic_set(&context->tx_port_affinity, 1977 atomic_add_return( 1978 1, &dev->port[port].roce.tx_port_affinity)); 1979 } 1980 1981 return 0; 1982 1983 out_mdev: 1984 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); 1985 out_devx: 1986 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) 1987 mlx5_ib_devx_destroy(dev, context->devx_uid); 1988 1989 out_uars: 1990 deallocate_uars(dev, context); 1991 1992 out_sys_pages: 1993 kfree(bfregi->sys_pages); 1994 1995 out_count: 1996 kfree(bfregi->count); 1997 1998 out_ctx: 1999 return err; 2000 } 2001 2002 static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 2003 { 2004 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 2005 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 2006 struct mlx5_bfreg_info *bfregi; 2007 2008 bfregi = &context->bfregi; 2009 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); 2010 2011 if (context->devx_uid) 2012 mlx5_ib_devx_destroy(dev, context->devx_uid); 2013 2014 deallocate_uars(dev, context); 2015 kfree(bfregi->sys_pages); 2016 kfree(bfregi->count); 2017 } 2018 2019 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, 2020 int uar_idx) 2021 { 2022 int fw_uars_per_page; 2023 2024 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; 2025 2026 return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page; 2027 } 2028 2029 static int get_command(unsigned long offset) 2030 { 2031 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; 2032 } 2033 2034 static int get_arg(unsigned long offset) 2035 { 2036 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); 2037 } 2038 2039 static int get_index(unsigned long offset) 2040 { 2041 return get_arg(offset); 2042 } 2043 2044 /* Index resides in an extra byte to enable larger values than 255 */ 2045 static int get_extended_index(unsigned long offset) 2046 { 2047 return get_arg(offset) | ((offset >> 16) & 0xff) << 8; 2048 } 2049 2050 2051 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) 2052 { 2053 } 2054 2055 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) 2056 { 2057 switch (cmd) { 2058 case MLX5_IB_MMAP_WC_PAGE: 2059 return "WC"; 2060 case MLX5_IB_MMAP_REGULAR_PAGE: 2061 return "best effort WC"; 2062 case MLX5_IB_MMAP_NC_PAGE: 2063 return "NC"; 2064 case MLX5_IB_MMAP_DEVICE_MEM: 2065 return "Device Memory"; 2066 default: 2067 return NULL; 2068 } 2069 } 2070 2071 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev, 2072 struct vm_area_struct *vma, 2073 struct mlx5_ib_ucontext *context) 2074 { 2075 if ((vma->vm_end - vma->vm_start != PAGE_SIZE) || 2076 !(vma->vm_flags & VM_SHARED)) 2077 return -EINVAL; 2078 2079 if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1) 2080 return -EOPNOTSUPP; 2081 2082 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 2083 return -EPERM; 2084 vma->vm_flags &= ~VM_MAYWRITE; 2085 2086 if (!dev->mdev->clock_info) 2087 return -EOPNOTSUPP; 2088 2089 return vm_insert_page(vma, vma->vm_start, 2090 virt_to_page(dev->mdev->clock_info)); 2091 } 2092 2093 static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry) 2094 { 2095 struct mlx5_user_mmap_entry *mentry = to_mmmap(entry); 2096 struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device); 2097 struct mlx5_ib_dm *mdm; 2098 2099 switch (mentry->mmap_flag) { 2100 case MLX5_IB_MMAP_TYPE_MEMIC: 2101 mdm = container_of(mentry, struct mlx5_ib_dm, mentry); 2102 mlx5_cmd_dealloc_memic(&dev->dm, mdm->dev_addr, 2103 mdm->size); 2104 kfree(mdm); 2105 break; 2106 default: 2107 WARN_ON(true); 2108 } 2109 } 2110 2111 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, 2112 struct vm_area_struct *vma, 2113 struct mlx5_ib_ucontext *context) 2114 { 2115 struct mlx5_bfreg_info *bfregi = &context->bfregi; 2116 int err; 2117 unsigned long idx; 2118 phys_addr_t pfn; 2119 pgprot_t prot; 2120 u32 bfreg_dyn_idx = 0; 2121 u32 uar_index; 2122 int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC); 2123 int max_valid_idx = dyn_uar ? bfregi->num_sys_pages : 2124 bfregi->num_static_sys_pages; 2125 2126 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 2127 return -EINVAL; 2128 2129 if (dyn_uar) 2130 idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages; 2131 else 2132 idx = get_index(vma->vm_pgoff); 2133 2134 if (idx >= max_valid_idx) { 2135 mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n", 2136 idx, max_valid_idx); 2137 return -EINVAL; 2138 } 2139 2140 switch (cmd) { 2141 case MLX5_IB_MMAP_WC_PAGE: 2142 case MLX5_IB_MMAP_ALLOC_WC: 2143 /* Some architectures don't support WC memory */ 2144 #if defined(CONFIG_X86) 2145 if (!pat_enabled()) 2146 return -EPERM; 2147 #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU))) 2148 return -EPERM; 2149 #endif 2150 /* fall through */ 2151 case MLX5_IB_MMAP_REGULAR_PAGE: 2152 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */ 2153 prot = pgprot_writecombine(vma->vm_page_prot); 2154 break; 2155 case MLX5_IB_MMAP_NC_PAGE: 2156 prot = pgprot_noncached(vma->vm_page_prot); 2157 break; 2158 default: 2159 return -EINVAL; 2160 } 2161 2162 if (dyn_uar) { 2163 int uars_per_page; 2164 2165 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k); 2166 bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR); 2167 if (bfreg_dyn_idx >= bfregi->total_num_bfregs) { 2168 mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n", 2169 bfreg_dyn_idx, bfregi->total_num_bfregs); 2170 return -EINVAL; 2171 } 2172 2173 mutex_lock(&bfregi->lock); 2174 /* Fail if uar already allocated, first bfreg index of each 2175 * page holds its count. 2176 */ 2177 if (bfregi->count[bfreg_dyn_idx]) { 2178 mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx); 2179 mutex_unlock(&bfregi->lock); 2180 return -EINVAL; 2181 } 2182 2183 bfregi->count[bfreg_dyn_idx]++; 2184 mutex_unlock(&bfregi->lock); 2185 2186 err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index); 2187 if (err) { 2188 mlx5_ib_warn(dev, "UAR alloc failed\n"); 2189 goto free_bfreg; 2190 } 2191 } else { 2192 uar_index = bfregi->sys_pages[idx]; 2193 } 2194 2195 pfn = uar_index2pfn(dev, uar_index); 2196 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); 2197 2198 err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE, 2199 prot, NULL); 2200 if (err) { 2201 mlx5_ib_err(dev, 2202 "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n", 2203 err, mmap_cmd2str(cmd)); 2204 goto err; 2205 } 2206 2207 if (dyn_uar) 2208 bfregi->sys_pages[idx] = uar_index; 2209 return 0; 2210 2211 err: 2212 if (!dyn_uar) 2213 return err; 2214 2215 mlx5_cmd_free_uar(dev->mdev, idx); 2216 2217 free_bfreg: 2218 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx); 2219 2220 return err; 2221 } 2222 2223 static int add_dm_mmap_entry(struct ib_ucontext *context, 2224 struct mlx5_ib_dm *mdm, 2225 u64 address) 2226 { 2227 mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC; 2228 mdm->mentry.address = address; 2229 return rdma_user_mmap_entry_insert_range( 2230 context, &mdm->mentry.rdma_entry, 2231 mdm->size, 2232 MLX5_IB_MMAP_DEVICE_MEM << 16, 2233 (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1); 2234 } 2235 2236 static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma) 2237 { 2238 unsigned long idx; 2239 u8 command; 2240 2241 command = get_command(vma->vm_pgoff); 2242 idx = get_extended_index(vma->vm_pgoff); 2243 2244 return (command << 16 | idx); 2245 } 2246 2247 static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev, 2248 struct vm_area_struct *vma, 2249 struct ib_ucontext *ucontext) 2250 { 2251 struct mlx5_user_mmap_entry *mentry; 2252 struct rdma_user_mmap_entry *entry; 2253 unsigned long pgoff; 2254 pgprot_t prot; 2255 phys_addr_t pfn; 2256 int ret; 2257 2258 pgoff = mlx5_vma_to_pgoff(vma); 2259 entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff); 2260 if (!entry) 2261 return -EINVAL; 2262 2263 mentry = to_mmmap(entry); 2264 pfn = (mentry->address >> PAGE_SHIFT); 2265 prot = pgprot_writecombine(vma->vm_page_prot); 2266 ret = rdma_user_mmap_io(ucontext, vma, pfn, 2267 entry->npages * PAGE_SIZE, 2268 prot, 2269 entry); 2270 rdma_user_mmap_entry_put(&mentry->rdma_entry); 2271 return ret; 2272 } 2273 2274 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) 2275 { 2276 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 2277 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 2278 unsigned long command; 2279 phys_addr_t pfn; 2280 2281 command = get_command(vma->vm_pgoff); 2282 switch (command) { 2283 case MLX5_IB_MMAP_WC_PAGE: 2284 case MLX5_IB_MMAP_NC_PAGE: 2285 case MLX5_IB_MMAP_REGULAR_PAGE: 2286 case MLX5_IB_MMAP_ALLOC_WC: 2287 return uar_mmap(dev, command, vma, context); 2288 2289 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: 2290 return -ENOSYS; 2291 2292 case MLX5_IB_MMAP_CORE_CLOCK: 2293 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 2294 return -EINVAL; 2295 2296 if (vma->vm_flags & VM_WRITE) 2297 return -EPERM; 2298 vma->vm_flags &= ~VM_MAYWRITE; 2299 2300 /* Don't expose to user-space information it shouldn't have */ 2301 if (PAGE_SIZE > 4096) 2302 return -EOPNOTSUPP; 2303 2304 pfn = (dev->mdev->iseg_base + 2305 offsetof(struct mlx5_init_seg, internal_timer_h)) >> 2306 PAGE_SHIFT; 2307 return rdma_user_mmap_io(&context->ibucontext, vma, pfn, 2308 PAGE_SIZE, 2309 pgprot_noncached(vma->vm_page_prot), 2310 NULL); 2311 case MLX5_IB_MMAP_CLOCK_INFO: 2312 return mlx5_ib_mmap_clock_info_page(dev, vma, context); 2313 2314 default: 2315 return mlx5_ib_mmap_offset(dev, vma, ibcontext); 2316 } 2317 2318 return 0; 2319 } 2320 2321 static inline int check_dm_type_support(struct mlx5_ib_dev *dev, 2322 u32 type) 2323 { 2324 switch (type) { 2325 case MLX5_IB_UAPI_DM_TYPE_MEMIC: 2326 if (!MLX5_CAP_DEV_MEM(dev->mdev, memic)) 2327 return -EOPNOTSUPP; 2328 break; 2329 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2330 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2331 if (!capable(CAP_SYS_RAWIO) || 2332 !capable(CAP_NET_RAW)) 2333 return -EPERM; 2334 2335 if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) || 2336 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner))) 2337 return -EOPNOTSUPP; 2338 break; 2339 } 2340 2341 return 0; 2342 } 2343 2344 static int handle_alloc_dm_memic(struct ib_ucontext *ctx, 2345 struct mlx5_ib_dm *dm, 2346 struct ib_dm_alloc_attr *attr, 2347 struct uverbs_attr_bundle *attrs) 2348 { 2349 struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm; 2350 u64 start_offset; 2351 u16 page_idx; 2352 int err; 2353 u64 address; 2354 2355 dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE); 2356 2357 err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr, 2358 dm->size, attr->alignment); 2359 if (err) 2360 return err; 2361 2362 address = dm->dev_addr & PAGE_MASK; 2363 err = add_dm_mmap_entry(ctx, dm, address); 2364 if (err) 2365 goto err_dealloc; 2366 2367 page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF; 2368 err = uverbs_copy_to(attrs, 2369 MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 2370 &page_idx, 2371 sizeof(page_idx)); 2372 if (err) 2373 goto err_copy; 2374 2375 start_offset = dm->dev_addr & ~PAGE_MASK; 2376 err = uverbs_copy_to(attrs, 2377 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 2378 &start_offset, sizeof(start_offset)); 2379 if (err) 2380 goto err_copy; 2381 2382 return 0; 2383 2384 err_copy: 2385 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry); 2386 err_dealloc: 2387 mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size); 2388 2389 return err; 2390 } 2391 2392 static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx, 2393 struct mlx5_ib_dm *dm, 2394 struct ib_dm_alloc_attr *attr, 2395 struct uverbs_attr_bundle *attrs, 2396 int type) 2397 { 2398 struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev; 2399 u64 act_size; 2400 int err; 2401 2402 /* Allocation size must a multiple of the basic block size 2403 * and a power of 2. 2404 */ 2405 act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev)); 2406 act_size = roundup_pow_of_two(act_size); 2407 2408 dm->size = act_size; 2409 err = mlx5_dm_sw_icm_alloc(dev, type, act_size, 2410 to_mucontext(ctx)->devx_uid, &dm->dev_addr, 2411 &dm->icm_dm.obj_id); 2412 if (err) 2413 return err; 2414 2415 err = uverbs_copy_to(attrs, 2416 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 2417 &dm->dev_addr, sizeof(dm->dev_addr)); 2418 if (err) 2419 mlx5_dm_sw_icm_dealloc(dev, type, dm->size, 2420 to_mucontext(ctx)->devx_uid, dm->dev_addr, 2421 dm->icm_dm.obj_id); 2422 2423 return err; 2424 } 2425 2426 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, 2427 struct ib_ucontext *context, 2428 struct ib_dm_alloc_attr *attr, 2429 struct uverbs_attr_bundle *attrs) 2430 { 2431 struct mlx5_ib_dm *dm; 2432 enum mlx5_ib_uapi_dm_type type; 2433 int err; 2434 2435 err = uverbs_get_const_default(&type, attrs, 2436 MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE, 2437 MLX5_IB_UAPI_DM_TYPE_MEMIC); 2438 if (err) 2439 return ERR_PTR(err); 2440 2441 mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n", 2442 type, attr->length, attr->alignment); 2443 2444 err = check_dm_type_support(to_mdev(ibdev), type); 2445 if (err) 2446 return ERR_PTR(err); 2447 2448 dm = kzalloc(sizeof(*dm), GFP_KERNEL); 2449 if (!dm) 2450 return ERR_PTR(-ENOMEM); 2451 2452 dm->type = type; 2453 2454 switch (type) { 2455 case MLX5_IB_UAPI_DM_TYPE_MEMIC: 2456 err = handle_alloc_dm_memic(context, dm, 2457 attr, 2458 attrs); 2459 break; 2460 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2461 err = handle_alloc_dm_sw_icm(context, dm, 2462 attr, attrs, 2463 MLX5_SW_ICM_TYPE_STEERING); 2464 break; 2465 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2466 err = handle_alloc_dm_sw_icm(context, dm, 2467 attr, attrs, 2468 MLX5_SW_ICM_TYPE_HEADER_MODIFY); 2469 break; 2470 default: 2471 err = -EOPNOTSUPP; 2472 } 2473 2474 if (err) 2475 goto err_free; 2476 2477 return &dm->ibdm; 2478 2479 err_free: 2480 kfree(dm); 2481 return ERR_PTR(err); 2482 } 2483 2484 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs) 2485 { 2486 struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context( 2487 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 2488 struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev; 2489 struct mlx5_ib_dm *dm = to_mdm(ibdm); 2490 int ret; 2491 2492 switch (dm->type) { 2493 case MLX5_IB_UAPI_DM_TYPE_MEMIC: 2494 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry); 2495 return 0; 2496 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2497 ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING, 2498 dm->size, ctx->devx_uid, dm->dev_addr, 2499 dm->icm_dm.obj_id); 2500 if (ret) 2501 return ret; 2502 break; 2503 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2504 ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY, 2505 dm->size, ctx->devx_uid, dm->dev_addr, 2506 dm->icm_dm.obj_id); 2507 if (ret) 2508 return ret; 2509 break; 2510 default: 2511 return -EOPNOTSUPP; 2512 } 2513 2514 kfree(dm); 2515 2516 return 0; 2517 } 2518 2519 static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 2520 { 2521 struct mlx5_ib_pd *pd = to_mpd(ibpd); 2522 struct ib_device *ibdev = ibpd->device; 2523 struct mlx5_ib_alloc_pd_resp resp; 2524 int err; 2525 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; 2526 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; 2527 u16 uid = 0; 2528 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( 2529 udata, struct mlx5_ib_ucontext, ibucontext); 2530 2531 uid = context ? context->devx_uid : 0; 2532 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); 2533 MLX5_SET(alloc_pd_in, in, uid, uid); 2534 err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in), 2535 out, sizeof(out)); 2536 if (err) 2537 return err; 2538 2539 pd->pdn = MLX5_GET(alloc_pd_out, out, pd); 2540 pd->uid = uid; 2541 if (udata) { 2542 resp.pdn = pd->pdn; 2543 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 2544 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid); 2545 return -EFAULT; 2546 } 2547 } 2548 2549 return 0; 2550 } 2551 2552 static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) 2553 { 2554 struct mlx5_ib_dev *mdev = to_mdev(pd->device); 2555 struct mlx5_ib_pd *mpd = to_mpd(pd); 2556 2557 mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid); 2558 } 2559 2560 enum { 2561 MATCH_CRITERIA_ENABLE_OUTER_BIT, 2562 MATCH_CRITERIA_ENABLE_MISC_BIT, 2563 MATCH_CRITERIA_ENABLE_INNER_BIT, 2564 MATCH_CRITERIA_ENABLE_MISC2_BIT 2565 }; 2566 2567 #define HEADER_IS_ZERO(match_criteria, headers) \ 2568 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 2569 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 2570 2571 static u8 get_match_criteria_enable(u32 *match_criteria) 2572 { 2573 u8 match_criteria_enable; 2574 2575 match_criteria_enable = 2576 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 2577 MATCH_CRITERIA_ENABLE_OUTER_BIT; 2578 match_criteria_enable |= 2579 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 2580 MATCH_CRITERIA_ENABLE_MISC_BIT; 2581 match_criteria_enable |= 2582 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 2583 MATCH_CRITERIA_ENABLE_INNER_BIT; 2584 match_criteria_enable |= 2585 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) << 2586 MATCH_CRITERIA_ENABLE_MISC2_BIT; 2587 2588 return match_criteria_enable; 2589 } 2590 2591 static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) 2592 { 2593 u8 entry_mask; 2594 u8 entry_val; 2595 int err = 0; 2596 2597 if (!mask) 2598 goto out; 2599 2600 entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c, 2601 ip_protocol); 2602 entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v, 2603 ip_protocol); 2604 if (!entry_mask) { 2605 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); 2606 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 2607 goto out; 2608 } 2609 /* Don't override existing ip protocol */ 2610 if (mask != entry_mask || val != entry_val) 2611 err = -EINVAL; 2612 out: 2613 return err; 2614 } 2615 2616 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val, 2617 bool inner) 2618 { 2619 if (inner) { 2620 MLX5_SET(fte_match_set_misc, 2621 misc_c, inner_ipv6_flow_label, mask); 2622 MLX5_SET(fte_match_set_misc, 2623 misc_v, inner_ipv6_flow_label, val); 2624 } else { 2625 MLX5_SET(fte_match_set_misc, 2626 misc_c, outer_ipv6_flow_label, mask); 2627 MLX5_SET(fte_match_set_misc, 2628 misc_v, outer_ipv6_flow_label, val); 2629 } 2630 } 2631 2632 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) 2633 { 2634 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask); 2635 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val); 2636 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2); 2637 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2); 2638 } 2639 2640 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask) 2641 { 2642 if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) && 2643 !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL)) 2644 return -EOPNOTSUPP; 2645 2646 if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) && 2647 !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP)) 2648 return -EOPNOTSUPP; 2649 2650 if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) && 2651 !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS)) 2652 return -EOPNOTSUPP; 2653 2654 if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) && 2655 !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL)) 2656 return -EOPNOTSUPP; 2657 2658 return 0; 2659 } 2660 2661 #define LAST_ETH_FIELD vlan_tag 2662 #define LAST_IB_FIELD sl 2663 #define LAST_IPV4_FIELD tos 2664 #define LAST_IPV6_FIELD traffic_class 2665 #define LAST_TCP_UDP_FIELD src_port 2666 #define LAST_TUNNEL_FIELD tunnel_id 2667 #define LAST_FLOW_TAG_FIELD tag_id 2668 #define LAST_DROP_FIELD size 2669 #define LAST_COUNTERS_FIELD counters 2670 2671 /* Field is the last supported field */ 2672 #define FIELDS_NOT_SUPPORTED(filter, field)\ 2673 memchr_inv((void *)&filter.field +\ 2674 sizeof(filter.field), 0,\ 2675 sizeof(filter) -\ 2676 offsetof(typeof(filter), field) -\ 2677 sizeof(filter.field)) 2678 2679 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, 2680 bool is_egress, 2681 struct mlx5_flow_act *action) 2682 { 2683 2684 switch (maction->ib_action.type) { 2685 case IB_FLOW_ACTION_ESP: 2686 if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 2687 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)) 2688 return -EINVAL; 2689 /* Currently only AES_GCM keymat is supported by the driver */ 2690 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx; 2691 action->action |= is_egress ? 2692 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT : 2693 MLX5_FLOW_CONTEXT_ACTION_DECRYPT; 2694 return 0; 2695 case IB_FLOW_ACTION_UNSPECIFIED: 2696 if (maction->flow_action_raw.sub_type == 2697 MLX5_IB_FLOW_ACTION_MODIFY_HEADER) { 2698 if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 2699 return -EINVAL; 2700 action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 2701 action->modify_hdr = 2702 maction->flow_action_raw.modify_hdr; 2703 return 0; 2704 } 2705 if (maction->flow_action_raw.sub_type == 2706 MLX5_IB_FLOW_ACTION_DECAP) { 2707 if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) 2708 return -EINVAL; 2709 action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 2710 return 0; 2711 } 2712 if (maction->flow_action_raw.sub_type == 2713 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) { 2714 if (action->action & 2715 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) 2716 return -EINVAL; 2717 action->action |= 2718 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 2719 action->pkt_reformat = 2720 maction->flow_action_raw.pkt_reformat; 2721 return 0; 2722 } 2723 /* fall through */ 2724 default: 2725 return -EOPNOTSUPP; 2726 } 2727 } 2728 2729 static int parse_flow_attr(struct mlx5_core_dev *mdev, 2730 struct mlx5_flow_spec *spec, 2731 const union ib_flow_spec *ib_spec, 2732 const struct ib_flow_attr *flow_attr, 2733 struct mlx5_flow_act *action, u32 prev_type) 2734 { 2735 struct mlx5_flow_context *flow_context = &spec->flow_context; 2736 u32 *match_c = spec->match_criteria; 2737 u32 *match_v = spec->match_value; 2738 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, 2739 misc_parameters); 2740 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, 2741 misc_parameters); 2742 void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c, 2743 misc_parameters_2); 2744 void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v, 2745 misc_parameters_2); 2746 void *headers_c; 2747 void *headers_v; 2748 int match_ipv; 2749 int ret; 2750 2751 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 2752 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 2753 inner_headers); 2754 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 2755 inner_headers); 2756 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2757 ft_field_support.inner_ip_version); 2758 } else { 2759 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 2760 outer_headers); 2761 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 2762 outer_headers); 2763 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2764 ft_field_support.outer_ip_version); 2765 } 2766 2767 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 2768 case IB_FLOW_SPEC_ETH: 2769 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) 2770 return -EOPNOTSUPP; 2771 2772 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2773 dmac_47_16), 2774 ib_spec->eth.mask.dst_mac); 2775 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2776 dmac_47_16), 2777 ib_spec->eth.val.dst_mac); 2778 2779 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2780 smac_47_16), 2781 ib_spec->eth.mask.src_mac); 2782 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2783 smac_47_16), 2784 ib_spec->eth.val.src_mac); 2785 2786 if (ib_spec->eth.mask.vlan_tag) { 2787 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2788 cvlan_tag, 1); 2789 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2790 cvlan_tag, 1); 2791 2792 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2793 first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); 2794 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2795 first_vid, ntohs(ib_spec->eth.val.vlan_tag)); 2796 2797 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2798 first_cfi, 2799 ntohs(ib_spec->eth.mask.vlan_tag) >> 12); 2800 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2801 first_cfi, 2802 ntohs(ib_spec->eth.val.vlan_tag) >> 12); 2803 2804 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2805 first_prio, 2806 ntohs(ib_spec->eth.mask.vlan_tag) >> 13); 2807 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2808 first_prio, 2809 ntohs(ib_spec->eth.val.vlan_tag) >> 13); 2810 } 2811 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2812 ethertype, ntohs(ib_spec->eth.mask.ether_type)); 2813 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2814 ethertype, ntohs(ib_spec->eth.val.ether_type)); 2815 break; 2816 case IB_FLOW_SPEC_IPV4: 2817 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) 2818 return -EOPNOTSUPP; 2819 2820 if (match_ipv) { 2821 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2822 ip_version, 0xf); 2823 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2824 ip_version, MLX5_FS_IPV4_VERSION); 2825 } else { 2826 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2827 ethertype, 0xffff); 2828 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2829 ethertype, ETH_P_IP); 2830 } 2831 2832 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2833 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2834 &ib_spec->ipv4.mask.src_ip, 2835 sizeof(ib_spec->ipv4.mask.src_ip)); 2836 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2837 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2838 &ib_spec->ipv4.val.src_ip, 2839 sizeof(ib_spec->ipv4.val.src_ip)); 2840 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2841 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2842 &ib_spec->ipv4.mask.dst_ip, 2843 sizeof(ib_spec->ipv4.mask.dst_ip)); 2844 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2845 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2846 &ib_spec->ipv4.val.dst_ip, 2847 sizeof(ib_spec->ipv4.val.dst_ip)); 2848 2849 set_tos(headers_c, headers_v, 2850 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); 2851 2852 if (set_proto(headers_c, headers_v, 2853 ib_spec->ipv4.mask.proto, 2854 ib_spec->ipv4.val.proto)) 2855 return -EINVAL; 2856 break; 2857 case IB_FLOW_SPEC_IPV6: 2858 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) 2859 return -EOPNOTSUPP; 2860 2861 if (match_ipv) { 2862 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2863 ip_version, 0xf); 2864 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2865 ip_version, MLX5_FS_IPV6_VERSION); 2866 } else { 2867 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2868 ethertype, 0xffff); 2869 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2870 ethertype, ETH_P_IPV6); 2871 } 2872 2873 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2874 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2875 &ib_spec->ipv6.mask.src_ip, 2876 sizeof(ib_spec->ipv6.mask.src_ip)); 2877 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2878 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2879 &ib_spec->ipv6.val.src_ip, 2880 sizeof(ib_spec->ipv6.val.src_ip)); 2881 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2882 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2883 &ib_spec->ipv6.mask.dst_ip, 2884 sizeof(ib_spec->ipv6.mask.dst_ip)); 2885 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2886 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2887 &ib_spec->ipv6.val.dst_ip, 2888 sizeof(ib_spec->ipv6.val.dst_ip)); 2889 2890 set_tos(headers_c, headers_v, 2891 ib_spec->ipv6.mask.traffic_class, 2892 ib_spec->ipv6.val.traffic_class); 2893 2894 if (set_proto(headers_c, headers_v, 2895 ib_spec->ipv6.mask.next_hdr, 2896 ib_spec->ipv6.val.next_hdr)) 2897 return -EINVAL; 2898 2899 set_flow_label(misc_params_c, misc_params_v, 2900 ntohl(ib_spec->ipv6.mask.flow_label), 2901 ntohl(ib_spec->ipv6.val.flow_label), 2902 ib_spec->type & IB_FLOW_SPEC_INNER); 2903 break; 2904 case IB_FLOW_SPEC_ESP: 2905 if (ib_spec->esp.mask.seq) 2906 return -EOPNOTSUPP; 2907 2908 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 2909 ntohl(ib_spec->esp.mask.spi)); 2910 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 2911 ntohl(ib_spec->esp.val.spi)); 2912 break; 2913 case IB_FLOW_SPEC_TCP: 2914 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 2915 LAST_TCP_UDP_FIELD)) 2916 return -EOPNOTSUPP; 2917 2918 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP)) 2919 return -EINVAL; 2920 2921 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport, 2922 ntohs(ib_spec->tcp_udp.mask.src_port)); 2923 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, 2924 ntohs(ib_spec->tcp_udp.val.src_port)); 2925 2926 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport, 2927 ntohs(ib_spec->tcp_udp.mask.dst_port)); 2928 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport, 2929 ntohs(ib_spec->tcp_udp.val.dst_port)); 2930 break; 2931 case IB_FLOW_SPEC_UDP: 2932 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 2933 LAST_TCP_UDP_FIELD)) 2934 return -EOPNOTSUPP; 2935 2936 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP)) 2937 return -EINVAL; 2938 2939 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, 2940 ntohs(ib_spec->tcp_udp.mask.src_port)); 2941 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, 2942 ntohs(ib_spec->tcp_udp.val.src_port)); 2943 2944 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, 2945 ntohs(ib_spec->tcp_udp.mask.dst_port)); 2946 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 2947 ntohs(ib_spec->tcp_udp.val.dst_port)); 2948 break; 2949 case IB_FLOW_SPEC_GRE: 2950 if (ib_spec->gre.mask.c_ks_res0_ver) 2951 return -EOPNOTSUPP; 2952 2953 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE)) 2954 return -EINVAL; 2955 2956 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 2957 0xff); 2958 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 2959 IPPROTO_GRE); 2960 2961 MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol, 2962 ntohs(ib_spec->gre.mask.protocol)); 2963 MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol, 2964 ntohs(ib_spec->gre.val.protocol)); 2965 2966 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, 2967 gre_key.nvgre.hi), 2968 &ib_spec->gre.mask.key, 2969 sizeof(ib_spec->gre.mask.key)); 2970 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v, 2971 gre_key.nvgre.hi), 2972 &ib_spec->gre.val.key, 2973 sizeof(ib_spec->gre.val.key)); 2974 break; 2975 case IB_FLOW_SPEC_MPLS: 2976 switch (prev_type) { 2977 case IB_FLOW_SPEC_UDP: 2978 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2979 ft_field_support.outer_first_mpls_over_udp), 2980 &ib_spec->mpls.mask.tag)) 2981 return -EOPNOTSUPP; 2982 2983 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 2984 outer_first_mpls_over_udp), 2985 &ib_spec->mpls.val.tag, 2986 sizeof(ib_spec->mpls.val.tag)); 2987 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 2988 outer_first_mpls_over_udp), 2989 &ib_spec->mpls.mask.tag, 2990 sizeof(ib_spec->mpls.mask.tag)); 2991 break; 2992 case IB_FLOW_SPEC_GRE: 2993 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2994 ft_field_support.outer_first_mpls_over_gre), 2995 &ib_spec->mpls.mask.tag)) 2996 return -EOPNOTSUPP; 2997 2998 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 2999 outer_first_mpls_over_gre), 3000 &ib_spec->mpls.val.tag, 3001 sizeof(ib_spec->mpls.val.tag)); 3002 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 3003 outer_first_mpls_over_gre), 3004 &ib_spec->mpls.mask.tag, 3005 sizeof(ib_spec->mpls.mask.tag)); 3006 break; 3007 default: 3008 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 3009 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 3010 ft_field_support.inner_first_mpls), 3011 &ib_spec->mpls.mask.tag)) 3012 return -EOPNOTSUPP; 3013 3014 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 3015 inner_first_mpls), 3016 &ib_spec->mpls.val.tag, 3017 sizeof(ib_spec->mpls.val.tag)); 3018 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 3019 inner_first_mpls), 3020 &ib_spec->mpls.mask.tag, 3021 sizeof(ib_spec->mpls.mask.tag)); 3022 } else { 3023 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 3024 ft_field_support.outer_first_mpls), 3025 &ib_spec->mpls.mask.tag)) 3026 return -EOPNOTSUPP; 3027 3028 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 3029 outer_first_mpls), 3030 &ib_spec->mpls.val.tag, 3031 sizeof(ib_spec->mpls.val.tag)); 3032 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 3033 outer_first_mpls), 3034 &ib_spec->mpls.mask.tag, 3035 sizeof(ib_spec->mpls.mask.tag)); 3036 } 3037 } 3038 break; 3039 case IB_FLOW_SPEC_VXLAN_TUNNEL: 3040 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask, 3041 LAST_TUNNEL_FIELD)) 3042 return -EOPNOTSUPP; 3043 3044 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni, 3045 ntohl(ib_spec->tunnel.mask.tunnel_id)); 3046 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni, 3047 ntohl(ib_spec->tunnel.val.tunnel_id)); 3048 break; 3049 case IB_FLOW_SPEC_ACTION_TAG: 3050 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag, 3051 LAST_FLOW_TAG_FIELD)) 3052 return -EOPNOTSUPP; 3053 if (ib_spec->flow_tag.tag_id >= BIT(24)) 3054 return -EINVAL; 3055 3056 flow_context->flow_tag = ib_spec->flow_tag.tag_id; 3057 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 3058 break; 3059 case IB_FLOW_SPEC_ACTION_DROP: 3060 if (FIELDS_NOT_SUPPORTED(ib_spec->drop, 3061 LAST_DROP_FIELD)) 3062 return -EOPNOTSUPP; 3063 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 3064 break; 3065 case IB_FLOW_SPEC_ACTION_HANDLE: 3066 ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act), 3067 flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action); 3068 if (ret) 3069 return ret; 3070 break; 3071 case IB_FLOW_SPEC_ACTION_COUNT: 3072 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count, 3073 LAST_COUNTERS_FIELD)) 3074 return -EOPNOTSUPP; 3075 3076 /* for now support only one counters spec per flow */ 3077 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 3078 return -EINVAL; 3079 3080 action->counters = ib_spec->flow_count.counters; 3081 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 3082 break; 3083 default: 3084 return -EINVAL; 3085 } 3086 3087 return 0; 3088 } 3089 3090 /* If a flow could catch both multicast and unicast packets, 3091 * it won't fall into the multicast flow steering table and this rule 3092 * could steal other multicast packets. 3093 */ 3094 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr) 3095 { 3096 union ib_flow_spec *flow_spec; 3097 3098 if (ib_attr->type != IB_FLOW_ATTR_NORMAL || 3099 ib_attr->num_of_specs < 1) 3100 return false; 3101 3102 flow_spec = (union ib_flow_spec *)(ib_attr + 1); 3103 if (flow_spec->type == IB_FLOW_SPEC_IPV4) { 3104 struct ib_flow_spec_ipv4 *ipv4_spec; 3105 3106 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec; 3107 if (ipv4_is_multicast(ipv4_spec->val.dst_ip)) 3108 return true; 3109 3110 return false; 3111 } 3112 3113 if (flow_spec->type == IB_FLOW_SPEC_ETH) { 3114 struct ib_flow_spec_eth *eth_spec; 3115 3116 eth_spec = (struct ib_flow_spec_eth *)flow_spec; 3117 return is_multicast_ether_addr(eth_spec->mask.dst_mac) && 3118 is_multicast_ether_addr(eth_spec->val.dst_mac); 3119 } 3120 3121 return false; 3122 } 3123 3124 enum valid_spec { 3125 VALID_SPEC_INVALID, 3126 VALID_SPEC_VALID, 3127 VALID_SPEC_NA, 3128 }; 3129 3130 static enum valid_spec 3131 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev, 3132 const struct mlx5_flow_spec *spec, 3133 const struct mlx5_flow_act *flow_act, 3134 bool egress) 3135 { 3136 const u32 *match_c = spec->match_criteria; 3137 bool is_crypto = 3138 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 3139 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)); 3140 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c); 3141 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP; 3142 3143 /* 3144 * Currently only crypto is supported in egress, when regular egress 3145 * rules would be supported, always return VALID_SPEC_NA. 3146 */ 3147 if (!is_crypto) 3148 return VALID_SPEC_NA; 3149 3150 return is_crypto && is_ipsec && 3151 (!egress || (!is_drop && 3152 !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ? 3153 VALID_SPEC_VALID : VALID_SPEC_INVALID; 3154 } 3155 3156 static bool is_valid_spec(struct mlx5_core_dev *mdev, 3157 const struct mlx5_flow_spec *spec, 3158 const struct mlx5_flow_act *flow_act, 3159 bool egress) 3160 { 3161 /* We curretly only support ipsec egress flow */ 3162 return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID; 3163 } 3164 3165 static bool is_valid_ethertype(struct mlx5_core_dev *mdev, 3166 const struct ib_flow_attr *flow_attr, 3167 bool check_inner) 3168 { 3169 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1); 3170 int match_ipv = check_inner ? 3171 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 3172 ft_field_support.inner_ip_version) : 3173 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 3174 ft_field_support.outer_ip_version); 3175 int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0; 3176 bool ipv4_spec_valid, ipv6_spec_valid; 3177 unsigned int ip_spec_type = 0; 3178 bool has_ethertype = false; 3179 unsigned int spec_index; 3180 bool mask_valid = true; 3181 u16 eth_type = 0; 3182 bool type_valid; 3183 3184 /* Validate that ethertype is correct */ 3185 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 3186 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) && 3187 ib_spec->eth.mask.ether_type) { 3188 mask_valid = (ib_spec->eth.mask.ether_type == 3189 htons(0xffff)); 3190 has_ethertype = true; 3191 eth_type = ntohs(ib_spec->eth.val.ether_type); 3192 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) || 3193 (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) { 3194 ip_spec_type = ib_spec->type; 3195 } 3196 ib_spec = (void *)ib_spec + ib_spec->size; 3197 } 3198 3199 type_valid = (!has_ethertype) || (!ip_spec_type); 3200 if (!type_valid && mask_valid) { 3201 ipv4_spec_valid = (eth_type == ETH_P_IP) && 3202 (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit)); 3203 ipv6_spec_valid = (eth_type == ETH_P_IPV6) && 3204 (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit)); 3205 3206 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) || 3207 (((eth_type == ETH_P_MPLS_UC) || 3208 (eth_type == ETH_P_MPLS_MC)) && match_ipv); 3209 } 3210 3211 return type_valid; 3212 } 3213 3214 static bool is_valid_attr(struct mlx5_core_dev *mdev, 3215 const struct ib_flow_attr *flow_attr) 3216 { 3217 return is_valid_ethertype(mdev, flow_attr, false) && 3218 is_valid_ethertype(mdev, flow_attr, true); 3219 } 3220 3221 static void put_flow_table(struct mlx5_ib_dev *dev, 3222 struct mlx5_ib_flow_prio *prio, bool ft_added) 3223 { 3224 prio->refcount -= !!ft_added; 3225 if (!prio->refcount) { 3226 mlx5_destroy_flow_table(prio->flow_table); 3227 prio->flow_table = NULL; 3228 } 3229 } 3230 3231 static void counters_clear_description(struct ib_counters *counters) 3232 { 3233 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 3234 3235 mutex_lock(&mcounters->mcntrs_mutex); 3236 kfree(mcounters->counters_data); 3237 mcounters->counters_data = NULL; 3238 mcounters->cntrs_max_index = 0; 3239 mutex_unlock(&mcounters->mcntrs_mutex); 3240 } 3241 3242 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) 3243 { 3244 struct mlx5_ib_flow_handler *handler = container_of(flow_id, 3245 struct mlx5_ib_flow_handler, 3246 ibflow); 3247 struct mlx5_ib_flow_handler *iter, *tmp; 3248 struct mlx5_ib_dev *dev = handler->dev; 3249 3250 mutex_lock(&dev->flow_db->lock); 3251 3252 list_for_each_entry_safe(iter, tmp, &handler->list, list) { 3253 mlx5_del_flow_rules(iter->rule); 3254 put_flow_table(dev, iter->prio, true); 3255 list_del(&iter->list); 3256 kfree(iter); 3257 } 3258 3259 mlx5_del_flow_rules(handler->rule); 3260 put_flow_table(dev, handler->prio, true); 3261 if (handler->ibcounters && 3262 atomic_read(&handler->ibcounters->usecnt) == 1) 3263 counters_clear_description(handler->ibcounters); 3264 3265 mutex_unlock(&dev->flow_db->lock); 3266 if (handler->flow_matcher) 3267 atomic_dec(&handler->flow_matcher->usecnt); 3268 kfree(handler); 3269 3270 return 0; 3271 } 3272 3273 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap) 3274 { 3275 priority *= 2; 3276 if (!dont_trap) 3277 priority++; 3278 return priority; 3279 } 3280 3281 enum flow_table_type { 3282 MLX5_IB_FT_RX, 3283 MLX5_IB_FT_TX 3284 }; 3285 3286 #define MLX5_FS_MAX_TYPES 6 3287 #define MLX5_FS_MAX_ENTRIES BIT(16) 3288 3289 static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns, 3290 struct mlx5_ib_flow_prio *prio, 3291 int priority, 3292 int num_entries, int num_groups, 3293 u32 flags) 3294 { 3295 struct mlx5_flow_table_attr ft_attr = {}; 3296 struct mlx5_flow_table *ft; 3297 3298 ft_attr.prio = priority; 3299 ft_attr.max_fte = num_entries; 3300 ft_attr.flags = flags; 3301 ft_attr.autogroup.max_num_groups = num_groups; 3302 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); 3303 if (IS_ERR(ft)) 3304 return ERR_CAST(ft); 3305 3306 prio->flow_table = ft; 3307 prio->refcount = 0; 3308 return prio; 3309 } 3310 3311 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, 3312 struct ib_flow_attr *flow_attr, 3313 enum flow_table_type ft_type) 3314 { 3315 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP; 3316 struct mlx5_flow_namespace *ns = NULL; 3317 struct mlx5_ib_flow_prio *prio; 3318 struct mlx5_flow_table *ft; 3319 int max_table_size; 3320 int num_entries; 3321 int num_groups; 3322 bool esw_encap; 3323 u32 flags = 0; 3324 int priority; 3325 3326 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3327 log_max_ft_size)); 3328 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != 3329 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 3330 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 3331 enum mlx5_flow_namespace_type fn_type; 3332 3333 if (flow_is_multicast_only(flow_attr) && 3334 !dont_trap) 3335 priority = MLX5_IB_FLOW_MCAST_PRIO; 3336 else 3337 priority = ib_prio_to_core_prio(flow_attr->priority, 3338 dont_trap); 3339 if (ft_type == MLX5_IB_FT_RX) { 3340 fn_type = MLX5_FLOW_NAMESPACE_BYPASS; 3341 prio = &dev->flow_db->prios[priority]; 3342 if (!dev->is_rep && !esw_encap && 3343 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap)) 3344 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 3345 if (!dev->is_rep && !esw_encap && 3346 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3347 reformat_l3_tunnel_to_l2)) 3348 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 3349 } else { 3350 max_table_size = 3351 BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, 3352 log_max_ft_size)); 3353 fn_type = MLX5_FLOW_NAMESPACE_EGRESS; 3354 prio = &dev->flow_db->egress_prios[priority]; 3355 if (!dev->is_rep && !esw_encap && 3356 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat)) 3357 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 3358 } 3359 ns = mlx5_get_flow_namespace(dev->mdev, fn_type); 3360 num_entries = MLX5_FS_MAX_ENTRIES; 3361 num_groups = MLX5_FS_MAX_TYPES; 3362 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3363 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 3364 ns = mlx5_get_flow_namespace(dev->mdev, 3365 MLX5_FLOW_NAMESPACE_LEFTOVERS); 3366 build_leftovers_ft_param(&priority, 3367 &num_entries, 3368 &num_groups); 3369 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; 3370 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 3371 if (!MLX5_CAP_FLOWTABLE(dev->mdev, 3372 allow_sniffer_and_nic_rx_shared_tir)) 3373 return ERR_PTR(-ENOTSUPP); 3374 3375 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ? 3376 MLX5_FLOW_NAMESPACE_SNIFFER_RX : 3377 MLX5_FLOW_NAMESPACE_SNIFFER_TX); 3378 3379 prio = &dev->flow_db->sniffer[ft_type]; 3380 priority = 0; 3381 num_entries = 1; 3382 num_groups = 1; 3383 } 3384 3385 if (!ns) 3386 return ERR_PTR(-ENOTSUPP); 3387 3388 max_table_size = min_t(int, num_entries, max_table_size); 3389 3390 ft = prio->flow_table; 3391 if (!ft) 3392 return _get_prio(ns, prio, priority, max_table_size, num_groups, 3393 flags); 3394 3395 return prio; 3396 } 3397 3398 static void set_underlay_qp(struct mlx5_ib_dev *dev, 3399 struct mlx5_flow_spec *spec, 3400 u32 underlay_qpn) 3401 { 3402 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, 3403 spec->match_criteria, 3404 misc_parameters); 3405 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 3406 misc_parameters); 3407 3408 if (underlay_qpn && 3409 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3410 ft_field_support.bth_dst_qp)) { 3411 MLX5_SET(fte_match_set_misc, 3412 misc_params_v, bth_dst_qp, underlay_qpn); 3413 MLX5_SET(fte_match_set_misc, 3414 misc_params_c, bth_dst_qp, 0xffffff); 3415 } 3416 } 3417 3418 static int read_flow_counters(struct ib_device *ibdev, 3419 struct mlx5_read_counters_attr *read_attr) 3420 { 3421 struct mlx5_fc *fc = read_attr->hw_cntrs_hndl; 3422 struct mlx5_ib_dev *dev = to_mdev(ibdev); 3423 3424 return mlx5_fc_query(dev->mdev, fc, 3425 &read_attr->out[IB_COUNTER_PACKETS], 3426 &read_attr->out[IB_COUNTER_BYTES]); 3427 } 3428 3429 /* flow counters currently expose two counters packets and bytes */ 3430 #define FLOW_COUNTERS_NUM 2 3431 static int counters_set_description(struct ib_counters *counters, 3432 enum mlx5_ib_counters_type counters_type, 3433 struct mlx5_ib_flow_counters_desc *desc_data, 3434 u32 ncounters) 3435 { 3436 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 3437 u32 cntrs_max_index = 0; 3438 int i; 3439 3440 if (counters_type != MLX5_IB_COUNTERS_FLOW) 3441 return -EINVAL; 3442 3443 /* init the fields for the object */ 3444 mcounters->type = counters_type; 3445 mcounters->read_counters = read_flow_counters; 3446 mcounters->counters_num = FLOW_COUNTERS_NUM; 3447 mcounters->ncounters = ncounters; 3448 /* each counter entry have both description and index pair */ 3449 for (i = 0; i < ncounters; i++) { 3450 if (desc_data[i].description > IB_COUNTER_BYTES) 3451 return -EINVAL; 3452 3453 if (cntrs_max_index <= desc_data[i].index) 3454 cntrs_max_index = desc_data[i].index + 1; 3455 } 3456 3457 mutex_lock(&mcounters->mcntrs_mutex); 3458 mcounters->counters_data = desc_data; 3459 mcounters->cntrs_max_index = cntrs_max_index; 3460 mutex_unlock(&mcounters->mcntrs_mutex); 3461 3462 return 0; 3463 } 3464 3465 #define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2)) 3466 static int flow_counters_set_data(struct ib_counters *ibcounters, 3467 struct mlx5_ib_create_flow *ucmd) 3468 { 3469 struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters); 3470 struct mlx5_ib_flow_counters_data *cntrs_data = NULL; 3471 struct mlx5_ib_flow_counters_desc *desc_data = NULL; 3472 bool hw_hndl = false; 3473 int ret = 0; 3474 3475 if (ucmd && ucmd->ncounters_data != 0) { 3476 cntrs_data = ucmd->data; 3477 if (cntrs_data->ncounters > MAX_COUNTERS_NUM) 3478 return -EINVAL; 3479 3480 desc_data = kcalloc(cntrs_data->ncounters, 3481 sizeof(*desc_data), 3482 GFP_KERNEL); 3483 if (!desc_data) 3484 return -ENOMEM; 3485 3486 if (copy_from_user(desc_data, 3487 u64_to_user_ptr(cntrs_data->counters_data), 3488 sizeof(*desc_data) * cntrs_data->ncounters)) { 3489 ret = -EFAULT; 3490 goto free; 3491 } 3492 } 3493 3494 if (!mcounters->hw_cntrs_hndl) { 3495 mcounters->hw_cntrs_hndl = mlx5_fc_create( 3496 to_mdev(ibcounters->device)->mdev, false); 3497 if (IS_ERR(mcounters->hw_cntrs_hndl)) { 3498 ret = PTR_ERR(mcounters->hw_cntrs_hndl); 3499 goto free; 3500 } 3501 hw_hndl = true; 3502 } 3503 3504 if (desc_data) { 3505 /* counters already bound to at least one flow */ 3506 if (mcounters->cntrs_max_index) { 3507 ret = -EINVAL; 3508 goto free_hndl; 3509 } 3510 3511 ret = counters_set_description(ibcounters, 3512 MLX5_IB_COUNTERS_FLOW, 3513 desc_data, 3514 cntrs_data->ncounters); 3515 if (ret) 3516 goto free_hndl; 3517 3518 } else if (!mcounters->cntrs_max_index) { 3519 /* counters not bound yet, must have udata passed */ 3520 ret = -EINVAL; 3521 goto free_hndl; 3522 } 3523 3524 return 0; 3525 3526 free_hndl: 3527 if (hw_hndl) { 3528 mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev, 3529 mcounters->hw_cntrs_hndl); 3530 mcounters->hw_cntrs_hndl = NULL; 3531 } 3532 free: 3533 kfree(desc_data); 3534 return ret; 3535 } 3536 3537 static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev, 3538 struct mlx5_flow_spec *spec, 3539 struct mlx5_eswitch_rep *rep) 3540 { 3541 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; 3542 void *misc; 3543 3544 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 3545 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 3546 misc_parameters_2); 3547 3548 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 3549 mlx5_eswitch_get_vport_metadata_for_match(esw, 3550 rep->vport)); 3551 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 3552 misc_parameters_2); 3553 3554 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0); 3555 } else { 3556 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 3557 misc_parameters); 3558 3559 MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport); 3560 3561 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 3562 misc_parameters); 3563 3564 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 3565 } 3566 } 3567 3568 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, 3569 struct mlx5_ib_flow_prio *ft_prio, 3570 const struct ib_flow_attr *flow_attr, 3571 struct mlx5_flow_destination *dst, 3572 u32 underlay_qpn, 3573 struct mlx5_ib_create_flow *ucmd) 3574 { 3575 struct mlx5_flow_table *ft = ft_prio->flow_table; 3576 struct mlx5_ib_flow_handler *handler; 3577 struct mlx5_flow_act flow_act = {}; 3578 struct mlx5_flow_spec *spec; 3579 struct mlx5_flow_destination dest_arr[2] = {}; 3580 struct mlx5_flow_destination *rule_dst = dest_arr; 3581 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); 3582 unsigned int spec_index; 3583 u32 prev_type = 0; 3584 int err = 0; 3585 int dest_num = 0; 3586 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 3587 3588 if (!is_valid_attr(dev->mdev, flow_attr)) 3589 return ERR_PTR(-EINVAL); 3590 3591 if (dev->is_rep && is_egress) 3592 return ERR_PTR(-EINVAL); 3593 3594 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 3595 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 3596 if (!handler || !spec) { 3597 err = -ENOMEM; 3598 goto free; 3599 } 3600 3601 INIT_LIST_HEAD(&handler->list); 3602 3603 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 3604 err = parse_flow_attr(dev->mdev, spec, 3605 ib_flow, flow_attr, &flow_act, 3606 prev_type); 3607 if (err < 0) 3608 goto free; 3609 3610 prev_type = ((union ib_flow_spec *)ib_flow)->type; 3611 ib_flow += ((union ib_flow_spec *)ib_flow)->size; 3612 } 3613 3614 if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) { 3615 memcpy(&dest_arr[0], dst, sizeof(*dst)); 3616 dest_num++; 3617 } 3618 3619 if (!flow_is_multicast_only(flow_attr)) 3620 set_underlay_qp(dev, spec, underlay_qpn); 3621 3622 if (dev->is_rep) { 3623 struct mlx5_eswitch_rep *rep; 3624 3625 rep = dev->port[flow_attr->port - 1].rep; 3626 if (!rep) { 3627 err = -EINVAL; 3628 goto free; 3629 } 3630 3631 mlx5_ib_set_rule_source_port(dev, spec, rep); 3632 } 3633 3634 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); 3635 3636 if (is_egress && 3637 !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) { 3638 err = -EINVAL; 3639 goto free; 3640 } 3641 3642 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 3643 struct mlx5_ib_mcounters *mcounters; 3644 3645 err = flow_counters_set_data(flow_act.counters, ucmd); 3646 if (err) 3647 goto free; 3648 3649 mcounters = to_mcounters(flow_act.counters); 3650 handler->ibcounters = flow_act.counters; 3651 dest_arr[dest_num].type = 3652 MLX5_FLOW_DESTINATION_TYPE_COUNTER; 3653 dest_arr[dest_num].counter_id = 3654 mlx5_fc_id(mcounters->hw_cntrs_hndl); 3655 dest_num++; 3656 } 3657 3658 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { 3659 if (!dest_num) 3660 rule_dst = NULL; 3661 } else { 3662 if (is_egress) 3663 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 3664 else 3665 flow_act.action |= 3666 dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : 3667 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 3668 } 3669 3670 if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) && 3671 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3672 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 3673 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n", 3674 spec->flow_context.flow_tag, flow_attr->type); 3675 err = -EINVAL; 3676 goto free; 3677 } 3678 handler->rule = mlx5_add_flow_rules(ft, spec, 3679 &flow_act, 3680 rule_dst, dest_num); 3681 3682 if (IS_ERR(handler->rule)) { 3683 err = PTR_ERR(handler->rule); 3684 goto free; 3685 } 3686 3687 ft_prio->refcount++; 3688 handler->prio = ft_prio; 3689 handler->dev = dev; 3690 3691 ft_prio->flow_table = ft; 3692 free: 3693 if (err && handler) { 3694 if (handler->ibcounters && 3695 atomic_read(&handler->ibcounters->usecnt) == 1) 3696 counters_clear_description(handler->ibcounters); 3697 kfree(handler); 3698 } 3699 kvfree(spec); 3700 return err ? ERR_PTR(err) : handler; 3701 } 3702 3703 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, 3704 struct mlx5_ib_flow_prio *ft_prio, 3705 const struct ib_flow_attr *flow_attr, 3706 struct mlx5_flow_destination *dst) 3707 { 3708 return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL); 3709 } 3710 3711 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev, 3712 struct mlx5_ib_flow_prio *ft_prio, 3713 struct ib_flow_attr *flow_attr, 3714 struct mlx5_flow_destination *dst) 3715 { 3716 struct mlx5_ib_flow_handler *handler_dst = NULL; 3717 struct mlx5_ib_flow_handler *handler = NULL; 3718 3719 handler = create_flow_rule(dev, ft_prio, flow_attr, NULL); 3720 if (!IS_ERR(handler)) { 3721 handler_dst = create_flow_rule(dev, ft_prio, 3722 flow_attr, dst); 3723 if (IS_ERR(handler_dst)) { 3724 mlx5_del_flow_rules(handler->rule); 3725 ft_prio->refcount--; 3726 kfree(handler); 3727 handler = handler_dst; 3728 } else { 3729 list_add(&handler_dst->list, &handler->list); 3730 } 3731 } 3732 3733 return handler; 3734 } 3735 enum { 3736 LEFTOVERS_MC, 3737 LEFTOVERS_UC, 3738 }; 3739 3740 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev, 3741 struct mlx5_ib_flow_prio *ft_prio, 3742 struct ib_flow_attr *flow_attr, 3743 struct mlx5_flow_destination *dst) 3744 { 3745 struct mlx5_ib_flow_handler *handler_ucast = NULL; 3746 struct mlx5_ib_flow_handler *handler = NULL; 3747 3748 static struct { 3749 struct ib_flow_attr flow_attr; 3750 struct ib_flow_spec_eth eth_flow; 3751 } leftovers_specs[] = { 3752 [LEFTOVERS_MC] = { 3753 .flow_attr = { 3754 .num_of_specs = 1, 3755 .size = sizeof(leftovers_specs[0]) 3756 }, 3757 .eth_flow = { 3758 .type = IB_FLOW_SPEC_ETH, 3759 .size = sizeof(struct ib_flow_spec_eth), 3760 .mask = {.dst_mac = {0x1} }, 3761 .val = {.dst_mac = {0x1} } 3762 } 3763 }, 3764 [LEFTOVERS_UC] = { 3765 .flow_attr = { 3766 .num_of_specs = 1, 3767 .size = sizeof(leftovers_specs[0]) 3768 }, 3769 .eth_flow = { 3770 .type = IB_FLOW_SPEC_ETH, 3771 .size = sizeof(struct ib_flow_spec_eth), 3772 .mask = {.dst_mac = {0x1} }, 3773 .val = {.dst_mac = {} } 3774 } 3775 } 3776 }; 3777 3778 handler = create_flow_rule(dev, ft_prio, 3779 &leftovers_specs[LEFTOVERS_MC].flow_attr, 3780 dst); 3781 if (!IS_ERR(handler) && 3782 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) { 3783 handler_ucast = create_flow_rule(dev, ft_prio, 3784 &leftovers_specs[LEFTOVERS_UC].flow_attr, 3785 dst); 3786 if (IS_ERR(handler_ucast)) { 3787 mlx5_del_flow_rules(handler->rule); 3788 ft_prio->refcount--; 3789 kfree(handler); 3790 handler = handler_ucast; 3791 } else { 3792 list_add(&handler_ucast->list, &handler->list); 3793 } 3794 } 3795 3796 return handler; 3797 } 3798 3799 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, 3800 struct mlx5_ib_flow_prio *ft_rx, 3801 struct mlx5_ib_flow_prio *ft_tx, 3802 struct mlx5_flow_destination *dst) 3803 { 3804 struct mlx5_ib_flow_handler *handler_rx; 3805 struct mlx5_ib_flow_handler *handler_tx; 3806 int err; 3807 static const struct ib_flow_attr flow_attr = { 3808 .num_of_specs = 0, 3809 .size = sizeof(flow_attr) 3810 }; 3811 3812 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst); 3813 if (IS_ERR(handler_rx)) { 3814 err = PTR_ERR(handler_rx); 3815 goto err; 3816 } 3817 3818 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst); 3819 if (IS_ERR(handler_tx)) { 3820 err = PTR_ERR(handler_tx); 3821 goto err_tx; 3822 } 3823 3824 list_add(&handler_tx->list, &handler_rx->list); 3825 3826 return handler_rx; 3827 3828 err_tx: 3829 mlx5_del_flow_rules(handler_rx->rule); 3830 ft_rx->refcount--; 3831 kfree(handler_rx); 3832 err: 3833 return ERR_PTR(err); 3834 } 3835 3836 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, 3837 struct ib_flow_attr *flow_attr, 3838 int domain, 3839 struct ib_udata *udata) 3840 { 3841 struct mlx5_ib_dev *dev = to_mdev(qp->device); 3842 struct mlx5_ib_qp *mqp = to_mqp(qp); 3843 struct mlx5_ib_flow_handler *handler = NULL; 3844 struct mlx5_flow_destination *dst = NULL; 3845 struct mlx5_ib_flow_prio *ft_prio_tx = NULL; 3846 struct mlx5_ib_flow_prio *ft_prio; 3847 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 3848 struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr; 3849 size_t min_ucmd_sz, required_ucmd_sz; 3850 int err; 3851 int underlay_qpn; 3852 3853 if (udata && udata->inlen) { 3854 min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) + 3855 sizeof(ucmd_hdr.reserved); 3856 if (udata->inlen < min_ucmd_sz) 3857 return ERR_PTR(-EOPNOTSUPP); 3858 3859 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz); 3860 if (err) 3861 return ERR_PTR(err); 3862 3863 /* currently supports only one counters data */ 3864 if (ucmd_hdr.ncounters_data > 1) 3865 return ERR_PTR(-EINVAL); 3866 3867 required_ucmd_sz = min_ucmd_sz + 3868 sizeof(struct mlx5_ib_flow_counters_data) * 3869 ucmd_hdr.ncounters_data; 3870 if (udata->inlen > required_ucmd_sz && 3871 !ib_is_udata_cleared(udata, required_ucmd_sz, 3872 udata->inlen - required_ucmd_sz)) 3873 return ERR_PTR(-EOPNOTSUPP); 3874 3875 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL); 3876 if (!ucmd) 3877 return ERR_PTR(-ENOMEM); 3878 3879 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz); 3880 if (err) 3881 goto free_ucmd; 3882 } 3883 3884 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) { 3885 err = -ENOMEM; 3886 goto free_ucmd; 3887 } 3888 3889 if (domain != IB_FLOW_DOMAIN_USER || 3890 flow_attr->port > dev->num_ports || 3891 (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | 3892 IB_FLOW_ATTR_FLAGS_EGRESS))) { 3893 err = -EINVAL; 3894 goto free_ucmd; 3895 } 3896 3897 if (is_egress && 3898 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3899 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 3900 err = -EINVAL; 3901 goto free_ucmd; 3902 } 3903 3904 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 3905 if (!dst) { 3906 err = -ENOMEM; 3907 goto free_ucmd; 3908 } 3909 3910 mutex_lock(&dev->flow_db->lock); 3911 3912 ft_prio = get_flow_table(dev, flow_attr, 3913 is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX); 3914 if (IS_ERR(ft_prio)) { 3915 err = PTR_ERR(ft_prio); 3916 goto unlock; 3917 } 3918 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 3919 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX); 3920 if (IS_ERR(ft_prio_tx)) { 3921 err = PTR_ERR(ft_prio_tx); 3922 ft_prio_tx = NULL; 3923 goto destroy_ft; 3924 } 3925 } 3926 3927 if (is_egress) { 3928 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT; 3929 } else { 3930 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; 3931 if (mqp->flags & MLX5_IB_QP_RSS) 3932 dst->tir_num = mqp->rss_qp.tirn; 3933 else 3934 dst->tir_num = mqp->raw_packet_qp.rq.tirn; 3935 } 3936 3937 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 3938 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { 3939 handler = create_dont_trap_rule(dev, ft_prio, 3940 flow_attr, dst); 3941 } else { 3942 underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ? 3943 mqp->underlay_qpn : 0; 3944 handler = _create_flow_rule(dev, ft_prio, flow_attr, 3945 dst, underlay_qpn, ucmd); 3946 } 3947 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3948 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 3949 handler = create_leftovers_rule(dev, ft_prio, flow_attr, 3950 dst); 3951 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 3952 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst); 3953 } else { 3954 err = -EINVAL; 3955 goto destroy_ft; 3956 } 3957 3958 if (IS_ERR(handler)) { 3959 err = PTR_ERR(handler); 3960 handler = NULL; 3961 goto destroy_ft; 3962 } 3963 3964 mutex_unlock(&dev->flow_db->lock); 3965 kfree(dst); 3966 kfree(ucmd); 3967 3968 return &handler->ibflow; 3969 3970 destroy_ft: 3971 put_flow_table(dev, ft_prio, false); 3972 if (ft_prio_tx) 3973 put_flow_table(dev, ft_prio_tx, false); 3974 unlock: 3975 mutex_unlock(&dev->flow_db->lock); 3976 kfree(dst); 3977 free_ucmd: 3978 kfree(ucmd); 3979 return ERR_PTR(err); 3980 } 3981 3982 static struct mlx5_ib_flow_prio * 3983 _get_flow_table(struct mlx5_ib_dev *dev, 3984 struct mlx5_ib_flow_matcher *fs_matcher, 3985 bool mcast) 3986 { 3987 struct mlx5_flow_namespace *ns = NULL; 3988 struct mlx5_ib_flow_prio *prio = NULL; 3989 int max_table_size = 0; 3990 bool esw_encap; 3991 u32 flags = 0; 3992 int priority; 3993 3994 if (mcast) 3995 priority = MLX5_IB_FLOW_MCAST_PRIO; 3996 else 3997 priority = ib_prio_to_core_prio(fs_matcher->priority, false); 3998 3999 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != 4000 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 4001 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) { 4002 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 4003 log_max_ft_size)); 4004 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap) 4005 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 4006 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 4007 reformat_l3_tunnel_to_l2) && 4008 !esw_encap) 4009 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 4010 } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) { 4011 max_table_size = BIT( 4012 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size)); 4013 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && !esw_encap) 4014 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 4015 } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) { 4016 max_table_size = BIT( 4017 MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size)); 4018 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap) 4019 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 4020 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, reformat_l3_tunnel_to_l2) && 4021 esw_encap) 4022 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 4023 priority = FDB_BYPASS_PATH; 4024 } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) { 4025 max_table_size = 4026 BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, 4027 log_max_ft_size)); 4028 priority = fs_matcher->priority; 4029 } 4030 4031 max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES); 4032 4033 ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type); 4034 if (!ns) 4035 return ERR_PTR(-ENOTSUPP); 4036 4037 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) 4038 prio = &dev->flow_db->prios[priority]; 4039 else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) 4040 prio = &dev->flow_db->egress_prios[priority]; 4041 else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) 4042 prio = &dev->flow_db->fdb; 4043 else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) 4044 prio = &dev->flow_db->rdma_rx[priority]; 4045 4046 if (!prio) 4047 return ERR_PTR(-EINVAL); 4048 4049 if (prio->flow_table) 4050 return prio; 4051 4052 return _get_prio(ns, prio, priority, max_table_size, 4053 MLX5_FS_MAX_TYPES, flags); 4054 } 4055 4056 static struct mlx5_ib_flow_handler * 4057 _create_raw_flow_rule(struct mlx5_ib_dev *dev, 4058 struct mlx5_ib_flow_prio *ft_prio, 4059 struct mlx5_flow_destination *dst, 4060 struct mlx5_ib_flow_matcher *fs_matcher, 4061 struct mlx5_flow_context *flow_context, 4062 struct mlx5_flow_act *flow_act, 4063 void *cmd_in, int inlen, 4064 int dst_num) 4065 { 4066 struct mlx5_ib_flow_handler *handler; 4067 struct mlx5_flow_spec *spec; 4068 struct mlx5_flow_table *ft = ft_prio->flow_table; 4069 int err = 0; 4070 4071 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 4072 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 4073 if (!handler || !spec) { 4074 err = -ENOMEM; 4075 goto free; 4076 } 4077 4078 INIT_LIST_HEAD(&handler->list); 4079 4080 memcpy(spec->match_value, cmd_in, inlen); 4081 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params, 4082 fs_matcher->mask_len); 4083 spec->match_criteria_enable = fs_matcher->match_criteria_enable; 4084 spec->flow_context = *flow_context; 4085 4086 handler->rule = mlx5_add_flow_rules(ft, spec, 4087 flow_act, dst, dst_num); 4088 4089 if (IS_ERR(handler->rule)) { 4090 err = PTR_ERR(handler->rule); 4091 goto free; 4092 } 4093 4094 ft_prio->refcount++; 4095 handler->prio = ft_prio; 4096 handler->dev = dev; 4097 ft_prio->flow_table = ft; 4098 4099 free: 4100 if (err) 4101 kfree(handler); 4102 kvfree(spec); 4103 return err ? ERR_PTR(err) : handler; 4104 } 4105 4106 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher, 4107 void *match_v) 4108 { 4109 void *match_c; 4110 void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4; 4111 void *dmac, *dmac_mask; 4112 void *ipv4, *ipv4_mask; 4113 4114 if (!(fs_matcher->match_criteria_enable & 4115 (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT))) 4116 return false; 4117 4118 match_c = fs_matcher->matcher_mask.match_params; 4119 match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v, 4120 outer_headers); 4121 match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c, 4122 outer_headers); 4123 4124 dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 4125 dmac_47_16); 4126 dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 4127 dmac_47_16); 4128 4129 if (is_multicast_ether_addr(dmac) && 4130 is_multicast_ether_addr(dmac_mask)) 4131 return true; 4132 4133 ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 4134 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 4135 4136 ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 4137 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 4138 4139 if (ipv4_is_multicast(*(__be32 *)(ipv4)) && 4140 ipv4_is_multicast(*(__be32 *)(ipv4_mask))) 4141 return true; 4142 4143 return false; 4144 } 4145 4146 struct mlx5_ib_flow_handler * 4147 mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev, 4148 struct mlx5_ib_flow_matcher *fs_matcher, 4149 struct mlx5_flow_context *flow_context, 4150 struct mlx5_flow_act *flow_act, 4151 u32 counter_id, 4152 void *cmd_in, int inlen, int dest_id, 4153 int dest_type) 4154 { 4155 struct mlx5_flow_destination *dst; 4156 struct mlx5_ib_flow_prio *ft_prio; 4157 struct mlx5_ib_flow_handler *handler; 4158 int dst_num = 0; 4159 bool mcast; 4160 int err; 4161 4162 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL) 4163 return ERR_PTR(-EOPNOTSUPP); 4164 4165 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO) 4166 return ERR_PTR(-ENOMEM); 4167 4168 dst = kcalloc(2, sizeof(*dst), GFP_KERNEL); 4169 if (!dst) 4170 return ERR_PTR(-ENOMEM); 4171 4172 mcast = raw_fs_is_multicast(fs_matcher, cmd_in); 4173 mutex_lock(&dev->flow_db->lock); 4174 4175 ft_prio = _get_flow_table(dev, fs_matcher, mcast); 4176 if (IS_ERR(ft_prio)) { 4177 err = PTR_ERR(ft_prio); 4178 goto unlock; 4179 } 4180 4181 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) { 4182 dst[dst_num].type = dest_type; 4183 dst[dst_num].tir_num = dest_id; 4184 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 4185 } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) { 4186 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM; 4187 dst[dst_num].ft_num = dest_id; 4188 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 4189 } else { 4190 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT; 4191 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 4192 } 4193 4194 dst_num++; 4195 4196 if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 4197 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 4198 dst[dst_num].counter_id = counter_id; 4199 dst_num++; 4200 } 4201 4202 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, 4203 flow_context, flow_act, 4204 cmd_in, inlen, dst_num); 4205 4206 if (IS_ERR(handler)) { 4207 err = PTR_ERR(handler); 4208 goto destroy_ft; 4209 } 4210 4211 mutex_unlock(&dev->flow_db->lock); 4212 atomic_inc(&fs_matcher->usecnt); 4213 handler->flow_matcher = fs_matcher; 4214 4215 kfree(dst); 4216 4217 return handler; 4218 4219 destroy_ft: 4220 put_flow_table(dev, ft_prio, false); 4221 unlock: 4222 mutex_unlock(&dev->flow_db->lock); 4223 kfree(dst); 4224 4225 return ERR_PTR(err); 4226 } 4227 4228 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags) 4229 { 4230 u32 flags = 0; 4231 4232 if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA) 4233 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA; 4234 4235 return flags; 4236 } 4237 4238 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA 4239 static struct ib_flow_action * 4240 mlx5_ib_create_flow_action_esp(struct ib_device *device, 4241 const struct ib_flow_action_attrs_esp *attr, 4242 struct uverbs_attr_bundle *attrs) 4243 { 4244 struct mlx5_ib_dev *mdev = to_mdev(device); 4245 struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm; 4246 struct mlx5_accel_esp_xfrm_attrs accel_attrs = {}; 4247 struct mlx5_ib_flow_action *action; 4248 u64 action_flags; 4249 u64 flags; 4250 int err = 0; 4251 4252 err = uverbs_get_flags64( 4253 &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 4254 ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1)); 4255 if (err) 4256 return ERR_PTR(err); 4257 4258 flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags); 4259 4260 /* We current only support a subset of the standard features. Only a 4261 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn 4262 * (with overlap). Full offload mode isn't supported. 4263 */ 4264 if (!attr->keymat || attr->replay || attr->encap || 4265 attr->spi || attr->seq || attr->tfc_pad || 4266 attr->hard_limit_pkts || 4267 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 4268 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT))) 4269 return ERR_PTR(-EOPNOTSUPP); 4270 4271 if (attr->keymat->protocol != 4272 IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM) 4273 return ERR_PTR(-EOPNOTSUPP); 4274 4275 aes_gcm = &attr->keymat->keymat.aes_gcm; 4276 4277 if (aes_gcm->icv_len != 16 || 4278 aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ) 4279 return ERR_PTR(-EOPNOTSUPP); 4280 4281 action = kmalloc(sizeof(*action), GFP_KERNEL); 4282 if (!action) 4283 return ERR_PTR(-ENOMEM); 4284 4285 action->esp_aes_gcm.ib_flags = attr->flags; 4286 memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key, 4287 sizeof(accel_attrs.keymat.aes_gcm.aes_key)); 4288 accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8; 4289 memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt, 4290 sizeof(accel_attrs.keymat.aes_gcm.salt)); 4291 memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv, 4292 sizeof(accel_attrs.keymat.aes_gcm.seq_iv)); 4293 accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8; 4294 accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ; 4295 accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM; 4296 4297 accel_attrs.esn = attr->esn; 4298 if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) 4299 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; 4300 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 4301 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 4302 4303 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT) 4304 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT; 4305 4306 action->esp_aes_gcm.ctx = 4307 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags); 4308 if (IS_ERR(action->esp_aes_gcm.ctx)) { 4309 err = PTR_ERR(action->esp_aes_gcm.ctx); 4310 goto err_parse; 4311 } 4312 4313 action->esp_aes_gcm.ib_flags = attr->flags; 4314 4315 return &action->ib_action; 4316 4317 err_parse: 4318 kfree(action); 4319 return ERR_PTR(err); 4320 } 4321 4322 static int 4323 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action, 4324 const struct ib_flow_action_attrs_esp *attr, 4325 struct uverbs_attr_bundle *attrs) 4326 { 4327 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 4328 struct mlx5_accel_esp_xfrm_attrs accel_attrs; 4329 int err = 0; 4330 4331 if (attr->keymat || attr->replay || attr->encap || 4332 attr->spi || attr->seq || attr->tfc_pad || 4333 attr->hard_limit_pkts || 4334 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 4335 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS | 4336 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))) 4337 return -EOPNOTSUPP; 4338 4339 /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can 4340 * be modified. 4341 */ 4342 if (!(maction->esp_aes_gcm.ib_flags & 4343 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) && 4344 attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 4345 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)) 4346 return -EINVAL; 4347 4348 memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs, 4349 sizeof(accel_attrs)); 4350 4351 accel_attrs.esn = attr->esn; 4352 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 4353 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 4354 else 4355 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 4356 4357 err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx, 4358 &accel_attrs); 4359 if (err) 4360 return err; 4361 4362 maction->esp_aes_gcm.ib_flags &= 4363 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 4364 maction->esp_aes_gcm.ib_flags |= 4365 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 4366 4367 return 0; 4368 } 4369 4370 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action) 4371 { 4372 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 4373 4374 switch (action->type) { 4375 case IB_FLOW_ACTION_ESP: 4376 /* 4377 * We only support aes_gcm by now, so we implicitly know this is 4378 * the underline crypto. 4379 */ 4380 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx); 4381 break; 4382 case IB_FLOW_ACTION_UNSPECIFIED: 4383 mlx5_ib_destroy_flow_action_raw(maction); 4384 break; 4385 default: 4386 WARN_ON(true); 4387 break; 4388 } 4389 4390 kfree(maction); 4391 return 0; 4392 } 4393 4394 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 4395 { 4396 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4397 struct mlx5_ib_qp *mqp = to_mqp(ibqp); 4398 int err; 4399 u16 uid; 4400 4401 uid = ibqp->pd ? 4402 to_mpd(ibqp->pd)->uid : 0; 4403 4404 if (mqp->flags & MLX5_IB_QP_UNDERLAY) { 4405 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n"); 4406 return -EOPNOTSUPP; 4407 } 4408 4409 err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid); 4410 if (err) 4411 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", 4412 ibqp->qp_num, gid->raw); 4413 4414 return err; 4415 } 4416 4417 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 4418 { 4419 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4420 int err; 4421 u16 uid; 4422 4423 uid = ibqp->pd ? 4424 to_mpd(ibqp->pd)->uid : 0; 4425 err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid); 4426 if (err) 4427 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", 4428 ibqp->qp_num, gid->raw); 4429 4430 return err; 4431 } 4432 4433 static int init_node_data(struct mlx5_ib_dev *dev) 4434 { 4435 int err; 4436 4437 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); 4438 if (err) 4439 return err; 4440 4441 dev->mdev->rev_id = dev->mdev->pdev->revision; 4442 4443 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); 4444 } 4445 4446 static ssize_t fw_pages_show(struct device *device, 4447 struct device_attribute *attr, char *buf) 4448 { 4449 struct mlx5_ib_dev *dev = 4450 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4451 4452 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages); 4453 } 4454 static DEVICE_ATTR_RO(fw_pages); 4455 4456 static ssize_t reg_pages_show(struct device *device, 4457 struct device_attribute *attr, char *buf) 4458 { 4459 struct mlx5_ib_dev *dev = 4460 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4461 4462 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages)); 4463 } 4464 static DEVICE_ATTR_RO(reg_pages); 4465 4466 static ssize_t hca_type_show(struct device *device, 4467 struct device_attribute *attr, char *buf) 4468 { 4469 struct mlx5_ib_dev *dev = 4470 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4471 4472 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device); 4473 } 4474 static DEVICE_ATTR_RO(hca_type); 4475 4476 static ssize_t hw_rev_show(struct device *device, 4477 struct device_attribute *attr, char *buf) 4478 { 4479 struct mlx5_ib_dev *dev = 4480 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4481 4482 return sprintf(buf, "%x\n", dev->mdev->rev_id); 4483 } 4484 static DEVICE_ATTR_RO(hw_rev); 4485 4486 static ssize_t board_id_show(struct device *device, 4487 struct device_attribute *attr, char *buf) 4488 { 4489 struct mlx5_ib_dev *dev = 4490 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4491 4492 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, 4493 dev->mdev->board_id); 4494 } 4495 static DEVICE_ATTR_RO(board_id); 4496 4497 static struct attribute *mlx5_class_attributes[] = { 4498 &dev_attr_hw_rev.attr, 4499 &dev_attr_hca_type.attr, 4500 &dev_attr_board_id.attr, 4501 &dev_attr_fw_pages.attr, 4502 &dev_attr_reg_pages.attr, 4503 NULL, 4504 }; 4505 4506 static const struct attribute_group mlx5_attr_group = { 4507 .attrs = mlx5_class_attributes, 4508 }; 4509 4510 static void pkey_change_handler(struct work_struct *work) 4511 { 4512 struct mlx5_ib_port_resources *ports = 4513 container_of(work, struct mlx5_ib_port_resources, 4514 pkey_change_work); 4515 4516 mutex_lock(&ports->devr->mutex); 4517 mlx5_ib_gsi_pkey_change(ports->gsi); 4518 mutex_unlock(&ports->devr->mutex); 4519 } 4520 4521 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev) 4522 { 4523 struct mlx5_ib_qp *mqp; 4524 struct mlx5_ib_cq *send_mcq, *recv_mcq; 4525 struct mlx5_core_cq *mcq; 4526 struct list_head cq_armed_list; 4527 unsigned long flags_qp; 4528 unsigned long flags_cq; 4529 unsigned long flags; 4530 4531 INIT_LIST_HEAD(&cq_armed_list); 4532 4533 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ 4534 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); 4535 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { 4536 spin_lock_irqsave(&mqp->sq.lock, flags_qp); 4537 if (mqp->sq.tail != mqp->sq.head) { 4538 send_mcq = to_mcq(mqp->ibqp.send_cq); 4539 spin_lock_irqsave(&send_mcq->lock, flags_cq); 4540 if (send_mcq->mcq.comp && 4541 mqp->ibqp.send_cq->comp_handler) { 4542 if (!send_mcq->mcq.reset_notify_added) { 4543 send_mcq->mcq.reset_notify_added = 1; 4544 list_add_tail(&send_mcq->mcq.reset_notify, 4545 &cq_armed_list); 4546 } 4547 } 4548 spin_unlock_irqrestore(&send_mcq->lock, flags_cq); 4549 } 4550 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); 4551 spin_lock_irqsave(&mqp->rq.lock, flags_qp); 4552 /* no handling is needed for SRQ */ 4553 if (!mqp->ibqp.srq) { 4554 if (mqp->rq.tail != mqp->rq.head) { 4555 recv_mcq = to_mcq(mqp->ibqp.recv_cq); 4556 spin_lock_irqsave(&recv_mcq->lock, flags_cq); 4557 if (recv_mcq->mcq.comp && 4558 mqp->ibqp.recv_cq->comp_handler) { 4559 if (!recv_mcq->mcq.reset_notify_added) { 4560 recv_mcq->mcq.reset_notify_added = 1; 4561 list_add_tail(&recv_mcq->mcq.reset_notify, 4562 &cq_armed_list); 4563 } 4564 } 4565 spin_unlock_irqrestore(&recv_mcq->lock, 4566 flags_cq); 4567 } 4568 } 4569 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); 4570 } 4571 /*At that point all inflight post send were put to be executed as of we 4572 * lock/unlock above locks Now need to arm all involved CQs. 4573 */ 4574 list_for_each_entry(mcq, &cq_armed_list, reset_notify) { 4575 mcq->comp(mcq, NULL); 4576 } 4577 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); 4578 } 4579 4580 static void delay_drop_handler(struct work_struct *work) 4581 { 4582 int err; 4583 struct mlx5_ib_delay_drop *delay_drop = 4584 container_of(work, struct mlx5_ib_delay_drop, 4585 delay_drop_work); 4586 4587 atomic_inc(&delay_drop->events_cnt); 4588 4589 mutex_lock(&delay_drop->lock); 4590 err = mlx5_core_set_delay_drop(delay_drop->dev->mdev, 4591 delay_drop->timeout); 4592 if (err) { 4593 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n", 4594 delay_drop->timeout); 4595 delay_drop->activate = false; 4596 } 4597 mutex_unlock(&delay_drop->lock); 4598 } 4599 4600 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, 4601 struct ib_event *ibev) 4602 { 4603 u8 port = (eqe->data.port.port >> 4) & 0xf; 4604 4605 switch (eqe->sub_type) { 4606 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT: 4607 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == 4608 IB_LINK_LAYER_ETHERNET) 4609 schedule_work(&ibdev->delay_drop.delay_drop_work); 4610 break; 4611 default: /* do nothing */ 4612 return; 4613 } 4614 } 4615 4616 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, 4617 struct ib_event *ibev) 4618 { 4619 u8 port = (eqe->data.port.port >> 4) & 0xf; 4620 4621 ibev->element.port_num = port; 4622 4623 switch (eqe->sub_type) { 4624 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 4625 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 4626 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 4627 /* In RoCE, port up/down events are handled in 4628 * mlx5_netdev_event(). 4629 */ 4630 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == 4631 IB_LINK_LAYER_ETHERNET) 4632 return -EINVAL; 4633 4634 ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ? 4635 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 4636 break; 4637 4638 case MLX5_PORT_CHANGE_SUBTYPE_LID: 4639 ibev->event = IB_EVENT_LID_CHANGE; 4640 break; 4641 4642 case MLX5_PORT_CHANGE_SUBTYPE_PKEY: 4643 ibev->event = IB_EVENT_PKEY_CHANGE; 4644 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 4645 break; 4646 4647 case MLX5_PORT_CHANGE_SUBTYPE_GUID: 4648 ibev->event = IB_EVENT_GID_CHANGE; 4649 break; 4650 4651 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 4652 ibev->event = IB_EVENT_CLIENT_REREGISTER; 4653 break; 4654 default: 4655 return -EINVAL; 4656 } 4657 4658 return 0; 4659 } 4660 4661 static void mlx5_ib_handle_event(struct work_struct *_work) 4662 { 4663 struct mlx5_ib_event_work *work = 4664 container_of(_work, struct mlx5_ib_event_work, work); 4665 struct mlx5_ib_dev *ibdev; 4666 struct ib_event ibev; 4667 bool fatal = false; 4668 4669 if (work->is_slave) { 4670 ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi); 4671 if (!ibdev) 4672 goto out; 4673 } else { 4674 ibdev = work->dev; 4675 } 4676 4677 switch (work->event) { 4678 case MLX5_DEV_EVENT_SYS_ERROR: 4679 ibev.event = IB_EVENT_DEVICE_FATAL; 4680 mlx5_ib_handle_internal_error(ibdev); 4681 ibev.element.port_num = (u8)(unsigned long)work->param; 4682 fatal = true; 4683 break; 4684 case MLX5_EVENT_TYPE_PORT_CHANGE: 4685 if (handle_port_change(ibdev, work->param, &ibev)) 4686 goto out; 4687 break; 4688 case MLX5_EVENT_TYPE_GENERAL_EVENT: 4689 handle_general_event(ibdev, work->param, &ibev); 4690 /* fall through */ 4691 default: 4692 goto out; 4693 } 4694 4695 ibev.device = &ibdev->ib_dev; 4696 4697 if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) { 4698 mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num); 4699 goto out; 4700 } 4701 4702 if (ibdev->ib_active) 4703 ib_dispatch_event(&ibev); 4704 4705 if (fatal) 4706 ibdev->ib_active = false; 4707 out: 4708 kfree(work); 4709 } 4710 4711 static int mlx5_ib_event(struct notifier_block *nb, 4712 unsigned long event, void *param) 4713 { 4714 struct mlx5_ib_event_work *work; 4715 4716 work = kmalloc(sizeof(*work), GFP_ATOMIC); 4717 if (!work) 4718 return NOTIFY_DONE; 4719 4720 INIT_WORK(&work->work, mlx5_ib_handle_event); 4721 work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events); 4722 work->is_slave = false; 4723 work->param = param; 4724 work->event = event; 4725 4726 queue_work(mlx5_ib_event_wq, &work->work); 4727 4728 return NOTIFY_OK; 4729 } 4730 4731 static int mlx5_ib_event_slave_port(struct notifier_block *nb, 4732 unsigned long event, void *param) 4733 { 4734 struct mlx5_ib_event_work *work; 4735 4736 work = kmalloc(sizeof(*work), GFP_ATOMIC); 4737 if (!work) 4738 return NOTIFY_DONE; 4739 4740 INIT_WORK(&work->work, mlx5_ib_handle_event); 4741 work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events); 4742 work->is_slave = true; 4743 work->param = param; 4744 work->event = event; 4745 queue_work(mlx5_ib_event_wq, &work->work); 4746 4747 return NOTIFY_OK; 4748 } 4749 4750 static int set_has_smi_cap(struct mlx5_ib_dev *dev) 4751 { 4752 struct mlx5_hca_vport_context vport_ctx; 4753 int err; 4754 int port; 4755 4756 for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) { 4757 dev->mdev->port_caps[port - 1].has_smi = false; 4758 if (MLX5_CAP_GEN(dev->mdev, port_type) == 4759 MLX5_CAP_PORT_TYPE_IB) { 4760 if (MLX5_CAP_GEN(dev->mdev, ib_virt)) { 4761 err = mlx5_query_hca_vport_context(dev->mdev, 0, 4762 port, 0, 4763 &vport_ctx); 4764 if (err) { 4765 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n", 4766 port, err); 4767 return err; 4768 } 4769 dev->mdev->port_caps[port - 1].has_smi = 4770 vport_ctx.has_smi; 4771 } else { 4772 dev->mdev->port_caps[port - 1].has_smi = true; 4773 } 4774 } 4775 } 4776 return 0; 4777 } 4778 4779 static void get_ext_port_caps(struct mlx5_ib_dev *dev) 4780 { 4781 int port; 4782 4783 for (port = 1; port <= dev->num_ports; port++) 4784 mlx5_query_ext_port_caps(dev, port); 4785 } 4786 4787 static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port) 4788 { 4789 struct ib_device_attr *dprops = NULL; 4790 struct ib_port_attr *pprops = NULL; 4791 int err = -ENOMEM; 4792 4793 pprops = kzalloc(sizeof(*pprops), GFP_KERNEL); 4794 if (!pprops) 4795 goto out; 4796 4797 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); 4798 if (!dprops) 4799 goto out; 4800 4801 err = mlx5_ib_query_device(&dev->ib_dev, dprops, NULL); 4802 if (err) { 4803 mlx5_ib_warn(dev, "query_device failed %d\n", err); 4804 goto out; 4805 } 4806 4807 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 4808 if (err) { 4809 mlx5_ib_warn(dev, "query_port %d failed %d\n", 4810 port, err); 4811 goto out; 4812 } 4813 4814 dev->mdev->port_caps[port - 1].pkey_table_len = 4815 dprops->max_pkeys; 4816 dev->mdev->port_caps[port - 1].gid_table_len = 4817 pprops->gid_tbl_len; 4818 mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n", 4819 port, dprops->max_pkeys, pprops->gid_tbl_len); 4820 4821 out: 4822 kfree(pprops); 4823 kfree(dprops); 4824 4825 return err; 4826 } 4827 4828 static int get_port_caps(struct mlx5_ib_dev *dev, u8 port) 4829 { 4830 /* For representors use port 1, is this is the only native 4831 * port 4832 */ 4833 if (dev->is_rep) 4834 return __get_port_caps(dev, 1); 4835 return __get_port_caps(dev, port); 4836 } 4837 4838 static void destroy_umrc_res(struct mlx5_ib_dev *dev) 4839 { 4840 int err; 4841 4842 err = mlx5_mr_cache_cleanup(dev); 4843 if (err) 4844 mlx5_ib_warn(dev, "mr cache cleanup failed\n"); 4845 4846 if (dev->umrc.qp) 4847 mlx5_ib_destroy_qp(dev->umrc.qp, NULL); 4848 if (dev->umrc.cq) 4849 ib_free_cq(dev->umrc.cq); 4850 if (dev->umrc.pd) 4851 ib_dealloc_pd(dev->umrc.pd); 4852 } 4853 4854 enum { 4855 MAX_UMR_WR = 128, 4856 }; 4857 4858 static int create_umr_res(struct mlx5_ib_dev *dev) 4859 { 4860 struct ib_qp_init_attr *init_attr = NULL; 4861 struct ib_qp_attr *attr = NULL; 4862 struct ib_pd *pd; 4863 struct ib_cq *cq; 4864 struct ib_qp *qp; 4865 int ret; 4866 4867 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 4868 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 4869 if (!attr || !init_attr) { 4870 ret = -ENOMEM; 4871 goto error_0; 4872 } 4873 4874 pd = ib_alloc_pd(&dev->ib_dev, 0); 4875 if (IS_ERR(pd)) { 4876 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); 4877 ret = PTR_ERR(pd); 4878 goto error_0; 4879 } 4880 4881 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); 4882 if (IS_ERR(cq)) { 4883 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); 4884 ret = PTR_ERR(cq); 4885 goto error_2; 4886 } 4887 4888 init_attr->send_cq = cq; 4889 init_attr->recv_cq = cq; 4890 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 4891 init_attr->cap.max_send_wr = MAX_UMR_WR; 4892 init_attr->cap.max_send_sge = 1; 4893 init_attr->qp_type = MLX5_IB_QPT_REG_UMR; 4894 init_attr->port_num = 1; 4895 qp = mlx5_ib_create_qp(pd, init_attr, NULL); 4896 if (IS_ERR(qp)) { 4897 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); 4898 ret = PTR_ERR(qp); 4899 goto error_3; 4900 } 4901 qp->device = &dev->ib_dev; 4902 qp->real_qp = qp; 4903 qp->uobject = NULL; 4904 qp->qp_type = MLX5_IB_QPT_REG_UMR; 4905 qp->send_cq = init_attr->send_cq; 4906 qp->recv_cq = init_attr->recv_cq; 4907 4908 attr->qp_state = IB_QPS_INIT; 4909 attr->port_num = 1; 4910 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | 4911 IB_QP_PORT, NULL); 4912 if (ret) { 4913 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); 4914 goto error_4; 4915 } 4916 4917 memset(attr, 0, sizeof(*attr)); 4918 attr->qp_state = IB_QPS_RTR; 4919 attr->path_mtu = IB_MTU_256; 4920 4921 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 4922 if (ret) { 4923 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); 4924 goto error_4; 4925 } 4926 4927 memset(attr, 0, sizeof(*attr)); 4928 attr->qp_state = IB_QPS_RTS; 4929 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 4930 if (ret) { 4931 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); 4932 goto error_4; 4933 } 4934 4935 dev->umrc.qp = qp; 4936 dev->umrc.cq = cq; 4937 dev->umrc.pd = pd; 4938 4939 sema_init(&dev->umrc.sem, MAX_UMR_WR); 4940 ret = mlx5_mr_cache_init(dev); 4941 if (ret) { 4942 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); 4943 goto error_4; 4944 } 4945 4946 kfree(attr); 4947 kfree(init_attr); 4948 4949 return 0; 4950 4951 error_4: 4952 mlx5_ib_destroy_qp(qp, NULL); 4953 dev->umrc.qp = NULL; 4954 4955 error_3: 4956 ib_free_cq(cq); 4957 dev->umrc.cq = NULL; 4958 4959 error_2: 4960 ib_dealloc_pd(pd); 4961 dev->umrc.pd = NULL; 4962 4963 error_0: 4964 kfree(attr); 4965 kfree(init_attr); 4966 return ret; 4967 } 4968 4969 static u8 mlx5_get_umr_fence(u8 umr_fence_cap) 4970 { 4971 switch (umr_fence_cap) { 4972 case MLX5_CAP_UMR_FENCE_NONE: 4973 return MLX5_FENCE_MODE_NONE; 4974 case MLX5_CAP_UMR_FENCE_SMALL: 4975 return MLX5_FENCE_MODE_INITIATOR_SMALL; 4976 default: 4977 return MLX5_FENCE_MODE_STRONG_ORDERING; 4978 } 4979 } 4980 4981 static int create_dev_resources(struct mlx5_ib_resources *devr) 4982 { 4983 struct ib_srq_init_attr attr; 4984 struct mlx5_ib_dev *dev; 4985 struct ib_device *ibdev; 4986 struct ib_cq_init_attr cq_attr = {.cqe = 1}; 4987 int port; 4988 int ret = 0; 4989 4990 dev = container_of(devr, struct mlx5_ib_dev, devr); 4991 ibdev = &dev->ib_dev; 4992 4993 mutex_init(&devr->mutex); 4994 4995 devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd); 4996 if (!devr->p0) 4997 return -ENOMEM; 4998 4999 devr->p0->device = ibdev; 5000 devr->p0->uobject = NULL; 5001 atomic_set(&devr->p0->usecnt, 0); 5002 5003 ret = mlx5_ib_alloc_pd(devr->p0, NULL); 5004 if (ret) 5005 goto error0; 5006 5007 devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq); 5008 if (!devr->c0) { 5009 ret = -ENOMEM; 5010 goto error1; 5011 } 5012 5013 devr->c0->device = &dev->ib_dev; 5014 atomic_set(&devr->c0->usecnt, 0); 5015 5016 ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL); 5017 if (ret) 5018 goto err_create_cq; 5019 5020 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); 5021 if (IS_ERR(devr->x0)) { 5022 ret = PTR_ERR(devr->x0); 5023 goto error2; 5024 } 5025 devr->x0->device = &dev->ib_dev; 5026 devr->x0->inode = NULL; 5027 atomic_set(&devr->x0->usecnt, 0); 5028 mutex_init(&devr->x0->tgt_qp_mutex); 5029 INIT_LIST_HEAD(&devr->x0->tgt_qp_list); 5030 5031 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); 5032 if (IS_ERR(devr->x1)) { 5033 ret = PTR_ERR(devr->x1); 5034 goto error3; 5035 } 5036 devr->x1->device = &dev->ib_dev; 5037 devr->x1->inode = NULL; 5038 atomic_set(&devr->x1->usecnt, 0); 5039 mutex_init(&devr->x1->tgt_qp_mutex); 5040 INIT_LIST_HEAD(&devr->x1->tgt_qp_list); 5041 5042 memset(&attr, 0, sizeof(attr)); 5043 attr.attr.max_sge = 1; 5044 attr.attr.max_wr = 1; 5045 attr.srq_type = IB_SRQT_XRC; 5046 attr.ext.cq = devr->c0; 5047 attr.ext.xrc.xrcd = devr->x0; 5048 5049 devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq); 5050 if (!devr->s0) { 5051 ret = -ENOMEM; 5052 goto error4; 5053 } 5054 5055 devr->s0->device = &dev->ib_dev; 5056 devr->s0->pd = devr->p0; 5057 devr->s0->srq_type = IB_SRQT_XRC; 5058 devr->s0->ext.xrc.xrcd = devr->x0; 5059 devr->s0->ext.cq = devr->c0; 5060 ret = mlx5_ib_create_srq(devr->s0, &attr, NULL); 5061 if (ret) 5062 goto err_create; 5063 5064 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); 5065 atomic_inc(&devr->s0->ext.cq->usecnt); 5066 atomic_inc(&devr->p0->usecnt); 5067 atomic_set(&devr->s0->usecnt, 0); 5068 5069 memset(&attr, 0, sizeof(attr)); 5070 attr.attr.max_sge = 1; 5071 attr.attr.max_wr = 1; 5072 attr.srq_type = IB_SRQT_BASIC; 5073 devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq); 5074 if (!devr->s1) { 5075 ret = -ENOMEM; 5076 goto error5; 5077 } 5078 5079 devr->s1->device = &dev->ib_dev; 5080 devr->s1->pd = devr->p0; 5081 devr->s1->srq_type = IB_SRQT_BASIC; 5082 devr->s1->ext.cq = devr->c0; 5083 5084 ret = mlx5_ib_create_srq(devr->s1, &attr, NULL); 5085 if (ret) 5086 goto error6; 5087 5088 atomic_inc(&devr->p0->usecnt); 5089 atomic_set(&devr->s1->usecnt, 0); 5090 5091 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) { 5092 INIT_WORK(&devr->ports[port].pkey_change_work, 5093 pkey_change_handler); 5094 devr->ports[port].devr = devr; 5095 } 5096 5097 return 0; 5098 5099 error6: 5100 kfree(devr->s1); 5101 error5: 5102 mlx5_ib_destroy_srq(devr->s0, NULL); 5103 err_create: 5104 kfree(devr->s0); 5105 error4: 5106 mlx5_ib_dealloc_xrcd(devr->x1, NULL); 5107 error3: 5108 mlx5_ib_dealloc_xrcd(devr->x0, NULL); 5109 error2: 5110 mlx5_ib_destroy_cq(devr->c0, NULL); 5111 err_create_cq: 5112 kfree(devr->c0); 5113 error1: 5114 mlx5_ib_dealloc_pd(devr->p0, NULL); 5115 error0: 5116 kfree(devr->p0); 5117 return ret; 5118 } 5119 5120 static void destroy_dev_resources(struct mlx5_ib_resources *devr) 5121 { 5122 int port; 5123 5124 mlx5_ib_destroy_srq(devr->s1, NULL); 5125 kfree(devr->s1); 5126 mlx5_ib_destroy_srq(devr->s0, NULL); 5127 kfree(devr->s0); 5128 mlx5_ib_dealloc_xrcd(devr->x0, NULL); 5129 mlx5_ib_dealloc_xrcd(devr->x1, NULL); 5130 mlx5_ib_destroy_cq(devr->c0, NULL); 5131 kfree(devr->c0); 5132 mlx5_ib_dealloc_pd(devr->p0, NULL); 5133 kfree(devr->p0); 5134 5135 /* Make sure no change P_Key work items are still executing */ 5136 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) 5137 cancel_work_sync(&devr->ports[port].pkey_change_work); 5138 } 5139 5140 static u32 get_core_cap_flags(struct ib_device *ibdev, 5141 struct mlx5_hca_vport_context *rep) 5142 { 5143 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5144 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1); 5145 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type); 5146 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version); 5147 bool raw_support = !mlx5_core_mp_enabled(dev->mdev); 5148 u32 ret = 0; 5149 5150 if (rep->grh_required) 5151 ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED; 5152 5153 if (ll == IB_LINK_LAYER_INFINIBAND) 5154 return ret | RDMA_CORE_PORT_IBA_IB; 5155 5156 if (raw_support) 5157 ret |= RDMA_CORE_PORT_RAW_PACKET; 5158 5159 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP)) 5160 return ret; 5161 5162 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP)) 5163 return ret; 5164 5165 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP) 5166 ret |= RDMA_CORE_PORT_IBA_ROCE; 5167 5168 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP) 5169 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 5170 5171 return ret; 5172 } 5173 5174 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, 5175 struct ib_port_immutable *immutable) 5176 { 5177 struct ib_port_attr attr; 5178 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5179 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num); 5180 struct mlx5_hca_vport_context rep = {0}; 5181 int err; 5182 5183 err = ib_query_port(ibdev, port_num, &attr); 5184 if (err) 5185 return err; 5186 5187 if (ll == IB_LINK_LAYER_INFINIBAND) { 5188 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0, 5189 &rep); 5190 if (err) 5191 return err; 5192 } 5193 5194 immutable->pkey_tbl_len = attr.pkey_tbl_len; 5195 immutable->gid_tbl_len = attr.gid_tbl_len; 5196 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep); 5197 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 5198 5199 return 0; 5200 } 5201 5202 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num, 5203 struct ib_port_immutable *immutable) 5204 { 5205 struct ib_port_attr attr; 5206 int err; 5207 5208 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; 5209 5210 err = ib_query_port(ibdev, port_num, &attr); 5211 if (err) 5212 return err; 5213 5214 immutable->pkey_tbl_len = attr.pkey_tbl_len; 5215 immutable->gid_tbl_len = attr.gid_tbl_len; 5216 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; 5217 5218 return 0; 5219 } 5220 5221 static void get_dev_fw_str(struct ib_device *ibdev, char *str) 5222 { 5223 struct mlx5_ib_dev *dev = 5224 container_of(ibdev, struct mlx5_ib_dev, ib_dev); 5225 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d", 5226 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev), 5227 fw_rev_sub(dev->mdev)); 5228 } 5229 5230 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) 5231 { 5232 struct mlx5_core_dev *mdev = dev->mdev; 5233 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev, 5234 MLX5_FLOW_NAMESPACE_LAG); 5235 struct mlx5_flow_table *ft; 5236 int err; 5237 5238 if (!ns || !mlx5_lag_is_roce(mdev)) 5239 return 0; 5240 5241 err = mlx5_cmd_create_vport_lag(mdev); 5242 if (err) 5243 return err; 5244 5245 ft = mlx5_create_lag_demux_flow_table(ns, 0, 0); 5246 if (IS_ERR(ft)) { 5247 err = PTR_ERR(ft); 5248 goto err_destroy_vport_lag; 5249 } 5250 5251 dev->flow_db->lag_demux_ft = ft; 5252 dev->lag_active = true; 5253 return 0; 5254 5255 err_destroy_vport_lag: 5256 mlx5_cmd_destroy_vport_lag(mdev); 5257 return err; 5258 } 5259 5260 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev) 5261 { 5262 struct mlx5_core_dev *mdev = dev->mdev; 5263 5264 if (dev->lag_active) { 5265 dev->lag_active = false; 5266 5267 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft); 5268 dev->flow_db->lag_demux_ft = NULL; 5269 5270 mlx5_cmd_destroy_vport_lag(mdev); 5271 } 5272 } 5273 5274 static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) 5275 { 5276 int err; 5277 5278 dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event; 5279 err = register_netdevice_notifier(&dev->port[port_num].roce.nb); 5280 if (err) { 5281 dev->port[port_num].roce.nb.notifier_call = NULL; 5282 return err; 5283 } 5284 5285 return 0; 5286 } 5287 5288 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) 5289 { 5290 if (dev->port[port_num].roce.nb.notifier_call) { 5291 unregister_netdevice_notifier(&dev->port[port_num].roce.nb); 5292 dev->port[port_num].roce.nb.notifier_call = NULL; 5293 } 5294 } 5295 5296 static int mlx5_enable_eth(struct mlx5_ib_dev *dev) 5297 { 5298 int err; 5299 5300 err = mlx5_nic_vport_enable_roce(dev->mdev); 5301 if (err) 5302 return err; 5303 5304 err = mlx5_eth_lag_init(dev); 5305 if (err) 5306 goto err_disable_roce; 5307 5308 return 0; 5309 5310 err_disable_roce: 5311 mlx5_nic_vport_disable_roce(dev->mdev); 5312 5313 return err; 5314 } 5315 5316 static void mlx5_disable_eth(struct mlx5_ib_dev *dev) 5317 { 5318 mlx5_eth_lag_cleanup(dev); 5319 mlx5_nic_vport_disable_roce(dev->mdev); 5320 } 5321 5322 struct mlx5_ib_counter { 5323 const char *name; 5324 size_t offset; 5325 }; 5326 5327 #define INIT_Q_COUNTER(_name) \ 5328 { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)} 5329 5330 static const struct mlx5_ib_counter basic_q_cnts[] = { 5331 INIT_Q_COUNTER(rx_write_requests), 5332 INIT_Q_COUNTER(rx_read_requests), 5333 INIT_Q_COUNTER(rx_atomic_requests), 5334 INIT_Q_COUNTER(out_of_buffer), 5335 }; 5336 5337 static const struct mlx5_ib_counter out_of_seq_q_cnts[] = { 5338 INIT_Q_COUNTER(out_of_sequence), 5339 }; 5340 5341 static const struct mlx5_ib_counter retrans_q_cnts[] = { 5342 INIT_Q_COUNTER(duplicate_request), 5343 INIT_Q_COUNTER(rnr_nak_retry_err), 5344 INIT_Q_COUNTER(packet_seq_err), 5345 INIT_Q_COUNTER(implied_nak_seq_err), 5346 INIT_Q_COUNTER(local_ack_timeout_err), 5347 }; 5348 5349 #define INIT_CONG_COUNTER(_name) \ 5350 { .name = #_name, .offset = \ 5351 MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)} 5352 5353 static const struct mlx5_ib_counter cong_cnts[] = { 5354 INIT_CONG_COUNTER(rp_cnp_ignored), 5355 INIT_CONG_COUNTER(rp_cnp_handled), 5356 INIT_CONG_COUNTER(np_ecn_marked_roce_packets), 5357 INIT_CONG_COUNTER(np_cnp_sent), 5358 }; 5359 5360 static const struct mlx5_ib_counter extended_err_cnts[] = { 5361 INIT_Q_COUNTER(resp_local_length_error), 5362 INIT_Q_COUNTER(resp_cqe_error), 5363 INIT_Q_COUNTER(req_cqe_error), 5364 INIT_Q_COUNTER(req_remote_invalid_request), 5365 INIT_Q_COUNTER(req_remote_access_errors), 5366 INIT_Q_COUNTER(resp_remote_access_errors), 5367 INIT_Q_COUNTER(resp_cqe_flush_error), 5368 INIT_Q_COUNTER(req_cqe_flush_error), 5369 }; 5370 5371 #define INIT_EXT_PPCNT_COUNTER(_name) \ 5372 { .name = #_name, .offset = \ 5373 MLX5_BYTE_OFF(ppcnt_reg, \ 5374 counter_set.eth_extended_cntrs_grp_data_layout._name##_high)} 5375 5376 static const struct mlx5_ib_counter ext_ppcnt_cnts[] = { 5377 INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated), 5378 }; 5379 5380 static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev) 5381 { 5382 return MLX5_ESWITCH_MANAGER(mdev) && 5383 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == 5384 MLX5_ESWITCH_OFFLOADS; 5385 } 5386 5387 static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) 5388 { 5389 int num_cnt_ports; 5390 int i; 5391 5392 num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; 5393 5394 for (i = 0; i < num_cnt_ports; i++) { 5395 if (dev->port[i].cnts.set_id_valid) 5396 mlx5_core_dealloc_q_counter(dev->mdev, 5397 dev->port[i].cnts.set_id); 5398 kfree(dev->port[i].cnts.names); 5399 kfree(dev->port[i].cnts.offsets); 5400 } 5401 } 5402 5403 static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev, 5404 struct mlx5_ib_counters *cnts) 5405 { 5406 u32 num_counters; 5407 5408 num_counters = ARRAY_SIZE(basic_q_cnts); 5409 5410 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) 5411 num_counters += ARRAY_SIZE(out_of_seq_q_cnts); 5412 5413 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) 5414 num_counters += ARRAY_SIZE(retrans_q_cnts); 5415 5416 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) 5417 num_counters += ARRAY_SIZE(extended_err_cnts); 5418 5419 cnts->num_q_counters = num_counters; 5420 5421 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { 5422 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts); 5423 num_counters += ARRAY_SIZE(cong_cnts); 5424 } 5425 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { 5426 cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts); 5427 num_counters += ARRAY_SIZE(ext_ppcnt_cnts); 5428 } 5429 cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL); 5430 if (!cnts->names) 5431 return -ENOMEM; 5432 5433 cnts->offsets = kcalloc(num_counters, 5434 sizeof(cnts->offsets), GFP_KERNEL); 5435 if (!cnts->offsets) 5436 goto err_names; 5437 5438 return 0; 5439 5440 err_names: 5441 kfree(cnts->names); 5442 cnts->names = NULL; 5443 return -ENOMEM; 5444 } 5445 5446 static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, 5447 const char **names, 5448 size_t *offsets) 5449 { 5450 int i; 5451 int j = 0; 5452 5453 for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) { 5454 names[j] = basic_q_cnts[i].name; 5455 offsets[j] = basic_q_cnts[i].offset; 5456 } 5457 5458 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { 5459 for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) { 5460 names[j] = out_of_seq_q_cnts[i].name; 5461 offsets[j] = out_of_seq_q_cnts[i].offset; 5462 } 5463 } 5464 5465 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { 5466 for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) { 5467 names[j] = retrans_q_cnts[i].name; 5468 offsets[j] = retrans_q_cnts[i].offset; 5469 } 5470 } 5471 5472 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) { 5473 for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) { 5474 names[j] = extended_err_cnts[i].name; 5475 offsets[j] = extended_err_cnts[i].offset; 5476 } 5477 } 5478 5479 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { 5480 for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) { 5481 names[j] = cong_cnts[i].name; 5482 offsets[j] = cong_cnts[i].offset; 5483 } 5484 } 5485 5486 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { 5487 for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) { 5488 names[j] = ext_ppcnt_cnts[i].name; 5489 offsets[j] = ext_ppcnt_cnts[i].offset; 5490 } 5491 } 5492 } 5493 5494 static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) 5495 { 5496 int num_cnt_ports; 5497 int err = 0; 5498 int i; 5499 bool is_shared; 5500 5501 is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; 5502 num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; 5503 5504 for (i = 0; i < num_cnt_ports; i++) { 5505 err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts); 5506 if (err) 5507 goto err_alloc; 5508 5509 mlx5_ib_fill_counters(dev, dev->port[i].cnts.names, 5510 dev->port[i].cnts.offsets); 5511 5512 err = mlx5_cmd_alloc_q_counter(dev->mdev, 5513 &dev->port[i].cnts.set_id, 5514 is_shared ? 5515 MLX5_SHARED_RESOURCE_UID : 0); 5516 if (err) { 5517 mlx5_ib_warn(dev, 5518 "couldn't allocate queue counter for port %d, err %d\n", 5519 i + 1, err); 5520 goto err_alloc; 5521 } 5522 dev->port[i].cnts.set_id_valid = true; 5523 } 5524 return 0; 5525 5526 err_alloc: 5527 mlx5_ib_dealloc_counters(dev); 5528 return err; 5529 } 5530 5531 static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev, 5532 u8 port_num) 5533 { 5534 return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts : 5535 &dev->port[port_num].cnts; 5536 } 5537 5538 /** 5539 * mlx5_ib_get_counters_id - Returns counters id to use for device+port 5540 * @dev: Pointer to mlx5 IB device 5541 * @port_num: Zero based port number 5542 * 5543 * mlx5_ib_get_counters_id() Returns counters set id to use for given 5544 * device port combination in switchdev and non switchdev mode of the 5545 * parent device. 5546 */ 5547 u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num) 5548 { 5549 const struct mlx5_ib_counters *cnts = get_counters(dev, port_num); 5550 5551 return cnts->set_id; 5552 } 5553 5554 static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, 5555 u8 port_num) 5556 { 5557 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5558 const struct mlx5_ib_counters *cnts; 5559 bool is_switchdev = is_mdev_switchdev_mode(dev->mdev); 5560 5561 if ((is_switchdev && port_num) || (!is_switchdev && !port_num)) 5562 return NULL; 5563 5564 cnts = get_counters(dev, port_num - 1); 5565 5566 return rdma_alloc_hw_stats_struct(cnts->names, 5567 cnts->num_q_counters + 5568 cnts->num_cong_counters + 5569 cnts->num_ext_ppcnt_counters, 5570 RDMA_HW_STATS_DEFAULT_LIFESPAN); 5571 } 5572 5573 static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev, 5574 const struct mlx5_ib_counters *cnts, 5575 struct rdma_hw_stats *stats, 5576 u16 set_id) 5577 { 5578 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); 5579 void *out; 5580 __be32 val; 5581 int ret, i; 5582 5583 out = kvzalloc(outlen, GFP_KERNEL); 5584 if (!out) 5585 return -ENOMEM; 5586 5587 ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen); 5588 if (ret) 5589 goto free; 5590 5591 for (i = 0; i < cnts->num_q_counters; i++) { 5592 val = *(__be32 *)(out + cnts->offsets[i]); 5593 stats->value[i] = (u64)be32_to_cpu(val); 5594 } 5595 5596 free: 5597 kvfree(out); 5598 return ret; 5599 } 5600 5601 static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev, 5602 const struct mlx5_ib_counters *cnts, 5603 struct rdma_hw_stats *stats) 5604 { 5605 int offset = cnts->num_q_counters + cnts->num_cong_counters; 5606 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 5607 int ret, i; 5608 void *out; 5609 5610 out = kvzalloc(sz, GFP_KERNEL); 5611 if (!out) 5612 return -ENOMEM; 5613 5614 ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out); 5615 if (ret) 5616 goto free; 5617 5618 for (i = 0; i < cnts->num_ext_ppcnt_counters; i++) 5619 stats->value[i + offset] = 5620 be64_to_cpup((__be64 *)(out + 5621 cnts->offsets[i + offset])); 5622 free: 5623 kvfree(out); 5624 return ret; 5625 } 5626 5627 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, 5628 struct rdma_hw_stats *stats, 5629 u8 port_num, int index) 5630 { 5631 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5632 const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1); 5633 struct mlx5_core_dev *mdev; 5634 int ret, num_counters; 5635 u8 mdev_port_num; 5636 5637 if (!stats) 5638 return -EINVAL; 5639 5640 num_counters = cnts->num_q_counters + 5641 cnts->num_cong_counters + 5642 cnts->num_ext_ppcnt_counters; 5643 5644 /* q_counters are per IB device, query the master mdev */ 5645 ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id); 5646 if (ret) 5647 return ret; 5648 5649 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { 5650 ret = mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats); 5651 if (ret) 5652 return ret; 5653 } 5654 5655 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { 5656 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, 5657 &mdev_port_num); 5658 if (!mdev) { 5659 /* If port is not affiliated yet, its in down state 5660 * which doesn't have any counters yet, so it would be 5661 * zero. So no need to read from the HCA. 5662 */ 5663 goto done; 5664 } 5665 ret = mlx5_lag_query_cong_counters(dev->mdev, 5666 stats->value + 5667 cnts->num_q_counters, 5668 cnts->num_cong_counters, 5669 cnts->offsets + 5670 cnts->num_q_counters); 5671 5672 mlx5_ib_put_native_port_mdev(dev, port_num); 5673 if (ret) 5674 return ret; 5675 } 5676 5677 done: 5678 return num_counters; 5679 } 5680 5681 static struct rdma_hw_stats * 5682 mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) 5683 { 5684 struct mlx5_ib_dev *dev = to_mdev(counter->device); 5685 const struct mlx5_ib_counters *cnts = 5686 get_counters(dev, counter->port - 1); 5687 5688 /* Q counters are in the beginning of all counters */ 5689 return rdma_alloc_hw_stats_struct(cnts->names, 5690 cnts->num_q_counters, 5691 RDMA_HW_STATS_DEFAULT_LIFESPAN); 5692 } 5693 5694 static int mlx5_ib_counter_update_stats(struct rdma_counter *counter) 5695 { 5696 struct mlx5_ib_dev *dev = to_mdev(counter->device); 5697 const struct mlx5_ib_counters *cnts = 5698 get_counters(dev, counter->port - 1); 5699 5700 return mlx5_ib_query_q_counters(dev->mdev, cnts, 5701 counter->stats, counter->id); 5702 } 5703 5704 static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, 5705 struct ib_qp *qp) 5706 { 5707 struct mlx5_ib_dev *dev = to_mdev(qp->device); 5708 u16 cnt_set_id = 0; 5709 int err; 5710 5711 if (!counter->id) { 5712 err = mlx5_cmd_alloc_q_counter(dev->mdev, 5713 &cnt_set_id, 5714 MLX5_SHARED_RESOURCE_UID); 5715 if (err) 5716 return err; 5717 counter->id = cnt_set_id; 5718 } 5719 5720 err = mlx5_ib_qp_set_counter(qp, counter); 5721 if (err) 5722 goto fail_set_counter; 5723 5724 return 0; 5725 5726 fail_set_counter: 5727 mlx5_core_dealloc_q_counter(dev->mdev, cnt_set_id); 5728 counter->id = 0; 5729 5730 return err; 5731 } 5732 5733 static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp) 5734 { 5735 return mlx5_ib_qp_set_counter(qp, NULL); 5736 } 5737 5738 static int mlx5_ib_counter_dealloc(struct rdma_counter *counter) 5739 { 5740 struct mlx5_ib_dev *dev = to_mdev(counter->device); 5741 5742 return mlx5_core_dealloc_q_counter(dev->mdev, counter->id); 5743 } 5744 5745 static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num, 5746 enum rdma_netdev_t type, 5747 struct rdma_netdev_alloc_params *params) 5748 { 5749 if (type != RDMA_NETDEV_IPOIB) 5750 return -EOPNOTSUPP; 5751 5752 return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params); 5753 } 5754 5755 static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev) 5756 { 5757 if (!dev->delay_drop.dir_debugfs) 5758 return; 5759 debugfs_remove_recursive(dev->delay_drop.dir_debugfs); 5760 dev->delay_drop.dir_debugfs = NULL; 5761 } 5762 5763 static void cancel_delay_drop(struct mlx5_ib_dev *dev) 5764 { 5765 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) 5766 return; 5767 5768 cancel_work_sync(&dev->delay_drop.delay_drop_work); 5769 delay_drop_debugfs_cleanup(dev); 5770 } 5771 5772 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf, 5773 size_t count, loff_t *pos) 5774 { 5775 struct mlx5_ib_delay_drop *delay_drop = filp->private_data; 5776 char lbuf[20]; 5777 int len; 5778 5779 len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout); 5780 return simple_read_from_buffer(buf, count, pos, lbuf, len); 5781 } 5782 5783 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf, 5784 size_t count, loff_t *pos) 5785 { 5786 struct mlx5_ib_delay_drop *delay_drop = filp->private_data; 5787 u32 timeout; 5788 u32 var; 5789 5790 if (kstrtouint_from_user(buf, count, 0, &var)) 5791 return -EFAULT; 5792 5793 timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 5794 1000); 5795 if (timeout != var) 5796 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n", 5797 timeout); 5798 5799 delay_drop->timeout = timeout; 5800 5801 return count; 5802 } 5803 5804 static const struct file_operations fops_delay_drop_timeout = { 5805 .owner = THIS_MODULE, 5806 .open = simple_open, 5807 .write = delay_drop_timeout_write, 5808 .read = delay_drop_timeout_read, 5809 }; 5810 5811 static void delay_drop_debugfs_init(struct mlx5_ib_dev *dev) 5812 { 5813 struct dentry *root; 5814 5815 if (!mlx5_debugfs_root) 5816 return; 5817 5818 root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root); 5819 dev->delay_drop.dir_debugfs = root; 5820 5821 debugfs_create_atomic_t("num_timeout_events", 0400, root, 5822 &dev->delay_drop.events_cnt); 5823 debugfs_create_atomic_t("num_rqs", 0400, root, 5824 &dev->delay_drop.rqs_cnt); 5825 debugfs_create_file("timeout", 0600, root, &dev->delay_drop, 5826 &fops_delay_drop_timeout); 5827 } 5828 5829 static void init_delay_drop(struct mlx5_ib_dev *dev) 5830 { 5831 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) 5832 return; 5833 5834 mutex_init(&dev->delay_drop.lock); 5835 dev->delay_drop.dev = dev; 5836 dev->delay_drop.activate = false; 5837 dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000; 5838 INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler); 5839 atomic_set(&dev->delay_drop.rqs_cnt, 0); 5840 atomic_set(&dev->delay_drop.events_cnt, 0); 5841 5842 delay_drop_debugfs_init(dev); 5843 } 5844 5845 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, 5846 struct mlx5_ib_multiport_info *mpi) 5847 { 5848 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 5849 struct mlx5_ib_port *port = &ibdev->port[port_num]; 5850 int comps; 5851 int err; 5852 int i; 5853 5854 lockdep_assert_held(&mlx5_ib_multiport_mutex); 5855 5856 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); 5857 5858 spin_lock(&port->mp.mpi_lock); 5859 if (!mpi->ibdev) { 5860 spin_unlock(&port->mp.mpi_lock); 5861 return; 5862 } 5863 5864 mpi->ibdev = NULL; 5865 5866 spin_unlock(&port->mp.mpi_lock); 5867 if (mpi->mdev_events.notifier_call) 5868 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); 5869 mpi->mdev_events.notifier_call = NULL; 5870 mlx5_remove_netdev_notifier(ibdev, port_num); 5871 spin_lock(&port->mp.mpi_lock); 5872 5873 comps = mpi->mdev_refcnt; 5874 if (comps) { 5875 mpi->unaffiliate = true; 5876 init_completion(&mpi->unref_comp); 5877 spin_unlock(&port->mp.mpi_lock); 5878 5879 for (i = 0; i < comps; i++) 5880 wait_for_completion(&mpi->unref_comp); 5881 5882 spin_lock(&port->mp.mpi_lock); 5883 mpi->unaffiliate = false; 5884 } 5885 5886 port->mp.mpi = NULL; 5887 5888 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); 5889 5890 spin_unlock(&port->mp.mpi_lock); 5891 5892 err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev); 5893 5894 mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1); 5895 /* Log an error, still needed to cleanup the pointers and add 5896 * it back to the list. 5897 */ 5898 if (err) 5899 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n", 5900 port_num + 1); 5901 5902 ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN; 5903 } 5904 5905 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, 5906 struct mlx5_ib_multiport_info *mpi) 5907 { 5908 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 5909 int err; 5910 5911 lockdep_assert_held(&mlx5_ib_multiport_mutex); 5912 5913 spin_lock(&ibdev->port[port_num].mp.mpi_lock); 5914 if (ibdev->port[port_num].mp.mpi) { 5915 mlx5_ib_dbg(ibdev, "port %d already affiliated.\n", 5916 port_num + 1); 5917 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); 5918 return false; 5919 } 5920 5921 ibdev->port[port_num].mp.mpi = mpi; 5922 mpi->ibdev = ibdev; 5923 mpi->mdev_events.notifier_call = NULL; 5924 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); 5925 5926 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev); 5927 if (err) 5928 goto unbind; 5929 5930 err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev)); 5931 if (err) 5932 goto unbind; 5933 5934 err = mlx5_add_netdev_notifier(ibdev, port_num); 5935 if (err) { 5936 mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n", 5937 port_num + 1); 5938 goto unbind; 5939 } 5940 5941 mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port; 5942 mlx5_notifier_register(mpi->mdev, &mpi->mdev_events); 5943 5944 mlx5_ib_init_cong_debugfs(ibdev, port_num); 5945 5946 return true; 5947 5948 unbind: 5949 mlx5_ib_unbind_slave_port(ibdev, mpi); 5950 return false; 5951 } 5952 5953 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev) 5954 { 5955 int port_num = mlx5_core_native_port_num(dev->mdev) - 1; 5956 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 5957 port_num + 1); 5958 struct mlx5_ib_multiport_info *mpi; 5959 int err; 5960 int i; 5961 5962 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 5963 return 0; 5964 5965 err = mlx5_query_nic_vport_system_image_guid(dev->mdev, 5966 &dev->sys_image_guid); 5967 if (err) 5968 return err; 5969 5970 err = mlx5_nic_vport_enable_roce(dev->mdev); 5971 if (err) 5972 return err; 5973 5974 mutex_lock(&mlx5_ib_multiport_mutex); 5975 for (i = 0; i < dev->num_ports; i++) { 5976 bool bound = false; 5977 5978 /* build a stub multiport info struct for the native port. */ 5979 if (i == port_num) { 5980 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL); 5981 if (!mpi) { 5982 mutex_unlock(&mlx5_ib_multiport_mutex); 5983 mlx5_nic_vport_disable_roce(dev->mdev); 5984 return -ENOMEM; 5985 } 5986 5987 mpi->is_master = true; 5988 mpi->mdev = dev->mdev; 5989 mpi->sys_image_guid = dev->sys_image_guid; 5990 dev->port[i].mp.mpi = mpi; 5991 mpi->ibdev = dev; 5992 mpi = NULL; 5993 continue; 5994 } 5995 5996 list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list, 5997 list) { 5998 if (dev->sys_image_guid == mpi->sys_image_guid && 5999 (mlx5_core_native_port_num(mpi->mdev) - 1) == i) { 6000 bound = mlx5_ib_bind_slave_port(dev, mpi); 6001 } 6002 6003 if (bound) { 6004 dev_dbg(mpi->mdev->device, 6005 "removing port from unaffiliated list.\n"); 6006 mlx5_ib_dbg(dev, "port %d bound\n", i + 1); 6007 list_del(&mpi->list); 6008 break; 6009 } 6010 } 6011 if (!bound) { 6012 get_port_caps(dev, i + 1); 6013 mlx5_ib_dbg(dev, "no free port found for port %d\n", 6014 i + 1); 6015 } 6016 } 6017 6018 list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list); 6019 mutex_unlock(&mlx5_ib_multiport_mutex); 6020 return err; 6021 } 6022 6023 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev) 6024 { 6025 int port_num = mlx5_core_native_port_num(dev->mdev) - 1; 6026 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 6027 port_num + 1); 6028 int i; 6029 6030 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 6031 return; 6032 6033 mutex_lock(&mlx5_ib_multiport_mutex); 6034 for (i = 0; i < dev->num_ports; i++) { 6035 if (dev->port[i].mp.mpi) { 6036 /* Destroy the native port stub */ 6037 if (i == port_num) { 6038 kfree(dev->port[i].mp.mpi); 6039 dev->port[i].mp.mpi = NULL; 6040 } else { 6041 mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1); 6042 mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi); 6043 } 6044 } 6045 } 6046 6047 mlx5_ib_dbg(dev, "removing from devlist\n"); 6048 list_del(&dev->ib_dev_list); 6049 mutex_unlock(&mlx5_ib_multiport_mutex); 6050 6051 mlx5_nic_vport_disable_roce(dev->mdev); 6052 } 6053 6054 ADD_UVERBS_ATTRIBUTES_SIMPLE( 6055 mlx5_ib_dm, 6056 UVERBS_OBJECT_DM, 6057 UVERBS_METHOD_DM_ALLOC, 6058 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 6059 UVERBS_ATTR_TYPE(u64), 6060 UA_MANDATORY), 6061 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 6062 UVERBS_ATTR_TYPE(u16), 6063 UA_OPTIONAL), 6064 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE, 6065 enum mlx5_ib_uapi_dm_type, 6066 UA_OPTIONAL)); 6067 6068 ADD_UVERBS_ATTRIBUTES_SIMPLE( 6069 mlx5_ib_flow_action, 6070 UVERBS_OBJECT_FLOW_ACTION, 6071 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, 6072 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 6073 enum mlx5_ib_uapi_flow_action_flags)); 6074 6075 static const struct uapi_definition mlx5_ib_defs[] = { 6076 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) 6077 UAPI_DEF_CHAIN(mlx5_ib_devx_defs), 6078 UAPI_DEF_CHAIN(mlx5_ib_flow_defs), 6079 #endif 6080 6081 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 6082 &mlx5_ib_flow_action), 6083 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm), 6084 {} 6085 }; 6086 6087 static int mlx5_ib_read_counters(struct ib_counters *counters, 6088 struct ib_counters_read_attr *read_attr, 6089 struct uverbs_attr_bundle *attrs) 6090 { 6091 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 6092 struct mlx5_read_counters_attr mread_attr = {}; 6093 struct mlx5_ib_flow_counters_desc *desc; 6094 int ret, i; 6095 6096 mutex_lock(&mcounters->mcntrs_mutex); 6097 if (mcounters->cntrs_max_index > read_attr->ncounters) { 6098 ret = -EINVAL; 6099 goto err_bound; 6100 } 6101 6102 mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64), 6103 GFP_KERNEL); 6104 if (!mread_attr.out) { 6105 ret = -ENOMEM; 6106 goto err_bound; 6107 } 6108 6109 mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl; 6110 mread_attr.flags = read_attr->flags; 6111 ret = mcounters->read_counters(counters->device, &mread_attr); 6112 if (ret) 6113 goto err_read; 6114 6115 /* do the pass over the counters data array to assign according to the 6116 * descriptions and indexing pairs 6117 */ 6118 desc = mcounters->counters_data; 6119 for (i = 0; i < mcounters->ncounters; i++) 6120 read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description]; 6121 6122 err_read: 6123 kfree(mread_attr.out); 6124 err_bound: 6125 mutex_unlock(&mcounters->mcntrs_mutex); 6126 return ret; 6127 } 6128 6129 static int mlx5_ib_destroy_counters(struct ib_counters *counters) 6130 { 6131 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 6132 6133 counters_clear_description(counters); 6134 if (mcounters->hw_cntrs_hndl) 6135 mlx5_fc_destroy(to_mdev(counters->device)->mdev, 6136 mcounters->hw_cntrs_hndl); 6137 6138 kfree(mcounters); 6139 6140 return 0; 6141 } 6142 6143 static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device, 6144 struct uverbs_attr_bundle *attrs) 6145 { 6146 struct mlx5_ib_mcounters *mcounters; 6147 6148 mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL); 6149 if (!mcounters) 6150 return ERR_PTR(-ENOMEM); 6151 6152 mutex_init(&mcounters->mcntrs_mutex); 6153 6154 return &mcounters->ibcntrs; 6155 } 6156 6157 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) 6158 { 6159 mlx5_ib_cleanup_multiport_master(dev); 6160 WARN_ON(!xa_empty(&dev->odp_mkeys)); 6161 cleanup_srcu_struct(&dev->odp_srcu); 6162 6163 WARN_ON(!xa_empty(&dev->sig_mrs)); 6164 WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES)); 6165 } 6166 6167 static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) 6168 { 6169 struct mlx5_core_dev *mdev = dev->mdev; 6170 int err; 6171 int i; 6172 6173 for (i = 0; i < dev->num_ports; i++) { 6174 spin_lock_init(&dev->port[i].mp.mpi_lock); 6175 rwlock_init(&dev->port[i].roce.netdev_lock); 6176 dev->port[i].roce.dev = dev; 6177 dev->port[i].roce.native_port_num = i + 1; 6178 dev->port[i].roce.last_port_state = IB_PORT_DOWN; 6179 } 6180 6181 mlx5_ib_internal_fill_odp_caps(dev); 6182 6183 err = mlx5_ib_init_multiport_master(dev); 6184 if (err) 6185 return err; 6186 6187 err = set_has_smi_cap(dev); 6188 if (err) 6189 return err; 6190 6191 if (!mlx5_core_mp_enabled(mdev)) { 6192 for (i = 1; i <= dev->num_ports; i++) { 6193 err = get_port_caps(dev, i); 6194 if (err) 6195 break; 6196 } 6197 } else { 6198 err = get_port_caps(dev, mlx5_core_native_port_num(mdev)); 6199 } 6200 if (err) 6201 goto err_mp; 6202 6203 if (mlx5_use_mad_ifc(dev)) 6204 get_ext_port_caps(dev); 6205 6206 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 6207 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; 6208 dev->ib_dev.phys_port_cnt = dev->num_ports; 6209 dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev); 6210 dev->ib_dev.dev.parent = mdev->device; 6211 6212 mutex_init(&dev->cap_mask_mutex); 6213 INIT_LIST_HEAD(&dev->qp_list); 6214 spin_lock_init(&dev->reset_flow_resource_lock); 6215 xa_init(&dev->odp_mkeys); 6216 xa_init(&dev->sig_mrs); 6217 6218 spin_lock_init(&dev->dm.lock); 6219 dev->dm.dev = mdev; 6220 6221 err = init_srcu_struct(&dev->odp_srcu); 6222 if (err) 6223 goto err_mp; 6224 6225 return 0; 6226 6227 err_mp: 6228 mlx5_ib_cleanup_multiport_master(dev); 6229 6230 return -ENOMEM; 6231 } 6232 6233 static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev) 6234 { 6235 dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL); 6236 6237 if (!dev->flow_db) 6238 return -ENOMEM; 6239 6240 mutex_init(&dev->flow_db->lock); 6241 6242 return 0; 6243 } 6244 6245 static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev) 6246 { 6247 kfree(dev->flow_db); 6248 } 6249 6250 static const struct ib_device_ops mlx5_ib_dev_ops = { 6251 .owner = THIS_MODULE, 6252 .driver_id = RDMA_DRIVER_MLX5, 6253 .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION, 6254 6255 .add_gid = mlx5_ib_add_gid, 6256 .alloc_mr = mlx5_ib_alloc_mr, 6257 .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity, 6258 .alloc_pd = mlx5_ib_alloc_pd, 6259 .alloc_ucontext = mlx5_ib_alloc_ucontext, 6260 .attach_mcast = mlx5_ib_mcg_attach, 6261 .check_mr_status = mlx5_ib_check_mr_status, 6262 .create_ah = mlx5_ib_create_ah, 6263 .create_counters = mlx5_ib_create_counters, 6264 .create_cq = mlx5_ib_create_cq, 6265 .create_flow = mlx5_ib_create_flow, 6266 .create_qp = mlx5_ib_create_qp, 6267 .create_srq = mlx5_ib_create_srq, 6268 .dealloc_pd = mlx5_ib_dealloc_pd, 6269 .dealloc_ucontext = mlx5_ib_dealloc_ucontext, 6270 .del_gid = mlx5_ib_del_gid, 6271 .dereg_mr = mlx5_ib_dereg_mr, 6272 .destroy_ah = mlx5_ib_destroy_ah, 6273 .destroy_counters = mlx5_ib_destroy_counters, 6274 .destroy_cq = mlx5_ib_destroy_cq, 6275 .destroy_flow = mlx5_ib_destroy_flow, 6276 .destroy_flow_action = mlx5_ib_destroy_flow_action, 6277 .destroy_qp = mlx5_ib_destroy_qp, 6278 .destroy_srq = mlx5_ib_destroy_srq, 6279 .detach_mcast = mlx5_ib_mcg_detach, 6280 .disassociate_ucontext = mlx5_ib_disassociate_ucontext, 6281 .drain_rq = mlx5_ib_drain_rq, 6282 .drain_sq = mlx5_ib_drain_sq, 6283 .enable_driver = mlx5_ib_enable_driver, 6284 .fill_res_entry = mlx5_ib_fill_res_entry, 6285 .fill_stat_entry = mlx5_ib_fill_stat_entry, 6286 .get_dev_fw_str = get_dev_fw_str, 6287 .get_dma_mr = mlx5_ib_get_dma_mr, 6288 .get_link_layer = mlx5_ib_port_link_layer, 6289 .map_mr_sg = mlx5_ib_map_mr_sg, 6290 .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi, 6291 .mmap = mlx5_ib_mmap, 6292 .mmap_free = mlx5_ib_mmap_free, 6293 .modify_cq = mlx5_ib_modify_cq, 6294 .modify_device = mlx5_ib_modify_device, 6295 .modify_port = mlx5_ib_modify_port, 6296 .modify_qp = mlx5_ib_modify_qp, 6297 .modify_srq = mlx5_ib_modify_srq, 6298 .poll_cq = mlx5_ib_poll_cq, 6299 .post_recv = mlx5_ib_post_recv, 6300 .post_send = mlx5_ib_post_send, 6301 .post_srq_recv = mlx5_ib_post_srq_recv, 6302 .process_mad = mlx5_ib_process_mad, 6303 .query_ah = mlx5_ib_query_ah, 6304 .query_device = mlx5_ib_query_device, 6305 .query_gid = mlx5_ib_query_gid, 6306 .query_pkey = mlx5_ib_query_pkey, 6307 .query_qp = mlx5_ib_query_qp, 6308 .query_srq = mlx5_ib_query_srq, 6309 .read_counters = mlx5_ib_read_counters, 6310 .reg_user_mr = mlx5_ib_reg_user_mr, 6311 .req_notify_cq = mlx5_ib_arm_cq, 6312 .rereg_user_mr = mlx5_ib_rereg_user_mr, 6313 .resize_cq = mlx5_ib_resize_cq, 6314 6315 INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah), 6316 INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq), 6317 INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), 6318 INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq), 6319 INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext), 6320 }; 6321 6322 static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = { 6323 .create_flow_action_esp = mlx5_ib_create_flow_action_esp, 6324 .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp, 6325 }; 6326 6327 static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = { 6328 .rdma_netdev_get_params = mlx5_ib_rn_get_params, 6329 }; 6330 6331 static const struct ib_device_ops mlx5_ib_dev_sriov_ops = { 6332 .get_vf_config = mlx5_ib_get_vf_config, 6333 .get_vf_guid = mlx5_ib_get_vf_guid, 6334 .get_vf_stats = mlx5_ib_get_vf_stats, 6335 .set_vf_guid = mlx5_ib_set_vf_guid, 6336 .set_vf_link_state = mlx5_ib_set_vf_link_state, 6337 }; 6338 6339 static const struct ib_device_ops mlx5_ib_dev_mw_ops = { 6340 .alloc_mw = mlx5_ib_alloc_mw, 6341 .dealloc_mw = mlx5_ib_dealloc_mw, 6342 }; 6343 6344 static const struct ib_device_ops mlx5_ib_dev_xrc_ops = { 6345 .alloc_xrcd = mlx5_ib_alloc_xrcd, 6346 .dealloc_xrcd = mlx5_ib_dealloc_xrcd, 6347 }; 6348 6349 static const struct ib_device_ops mlx5_ib_dev_dm_ops = { 6350 .alloc_dm = mlx5_ib_alloc_dm, 6351 .dealloc_dm = mlx5_ib_dealloc_dm, 6352 .reg_dm_mr = mlx5_ib_reg_dm_mr, 6353 }; 6354 6355 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) 6356 { 6357 struct mlx5_core_dev *mdev = dev->mdev; 6358 int err; 6359 6360 dev->ib_dev.uverbs_cmd_mask = 6361 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 6362 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 6363 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 6364 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 6365 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 6366 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 6367 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | 6368 (1ull << IB_USER_VERBS_CMD_REG_MR) | 6369 (1ull << IB_USER_VERBS_CMD_REREG_MR) | 6370 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 6371 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 6372 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 6373 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 6374 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 6375 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 6376 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 6377 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 6378 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 6379 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 6380 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 6381 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 6382 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 6383 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 6384 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 6385 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 6386 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 6387 dev->ib_dev.uverbs_ex_cmd_mask = 6388 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | 6389 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | 6390 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) | 6391 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) | 6392 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) | 6393 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | 6394 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); 6395 6396 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && 6397 IS_ENABLED(CONFIG_MLX5_CORE_IPOIB)) 6398 ib_set_device_ops(&dev->ib_dev, 6399 &mlx5_ib_dev_ipoib_enhanced_ops); 6400 6401 if (mlx5_core_is_pf(mdev)) 6402 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops); 6403 6404 dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); 6405 6406 if (MLX5_CAP_GEN(mdev, imaicl)) { 6407 dev->ib_dev.uverbs_cmd_mask |= 6408 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | 6409 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); 6410 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops); 6411 } 6412 6413 if (MLX5_CAP_GEN(mdev, xrc)) { 6414 dev->ib_dev.uverbs_cmd_mask |= 6415 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | 6416 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); 6417 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops); 6418 } 6419 6420 if (MLX5_CAP_DEV_MEM(mdev, memic) || 6421 MLX5_CAP_GEN_64(dev->mdev, general_obj_types) & 6422 MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) 6423 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops); 6424 6425 if (mlx5_accel_ipsec_device_caps(dev->mdev) & 6426 MLX5_ACCEL_IPSEC_CAP_DEVICE) 6427 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops); 6428 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops); 6429 6430 if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)) 6431 dev->ib_dev.driver_def = mlx5_ib_defs; 6432 6433 err = init_node_data(dev); 6434 if (err) 6435 return err; 6436 6437 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && 6438 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) || 6439 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 6440 mutex_init(&dev->lb.mutex); 6441 6442 dev->ib_dev.use_cq_dim = true; 6443 6444 return 0; 6445 } 6446 6447 static const struct ib_device_ops mlx5_ib_dev_port_ops = { 6448 .get_port_immutable = mlx5_port_immutable, 6449 .query_port = mlx5_ib_query_port, 6450 }; 6451 6452 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev) 6453 { 6454 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops); 6455 return 0; 6456 } 6457 6458 static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = { 6459 .get_port_immutable = mlx5_port_rep_immutable, 6460 .query_port = mlx5_ib_rep_query_port, 6461 }; 6462 6463 static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev) 6464 { 6465 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops); 6466 return 0; 6467 } 6468 6469 static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = { 6470 .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table, 6471 .create_wq = mlx5_ib_create_wq, 6472 .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table, 6473 .destroy_wq = mlx5_ib_destroy_wq, 6474 .get_netdev = mlx5_ib_get_netdev, 6475 .modify_wq = mlx5_ib_modify_wq, 6476 }; 6477 6478 static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev) 6479 { 6480 u8 port_num; 6481 6482 dev->ib_dev.uverbs_ex_cmd_mask |= 6483 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | 6484 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | 6485 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | 6486 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | 6487 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); 6488 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops); 6489 6490 port_num = mlx5_core_native_port_num(dev->mdev) - 1; 6491 6492 /* Register only for native ports */ 6493 return mlx5_add_netdev_notifier(dev, port_num); 6494 } 6495 6496 static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev) 6497 { 6498 u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1; 6499 6500 mlx5_remove_netdev_notifier(dev, port_num); 6501 } 6502 6503 static int mlx5_ib_stage_raw_eth_roce_init(struct mlx5_ib_dev *dev) 6504 { 6505 struct mlx5_core_dev *mdev = dev->mdev; 6506 enum rdma_link_layer ll; 6507 int port_type_cap; 6508 int err = 0; 6509 6510 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 6511 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 6512 6513 if (ll == IB_LINK_LAYER_ETHERNET) 6514 err = mlx5_ib_stage_common_roce_init(dev); 6515 6516 return err; 6517 } 6518 6519 static void mlx5_ib_stage_raw_eth_roce_cleanup(struct mlx5_ib_dev *dev) 6520 { 6521 mlx5_ib_stage_common_roce_cleanup(dev); 6522 } 6523 6524 static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev) 6525 { 6526 struct mlx5_core_dev *mdev = dev->mdev; 6527 enum rdma_link_layer ll; 6528 int port_type_cap; 6529 int err; 6530 6531 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 6532 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 6533 6534 if (ll == IB_LINK_LAYER_ETHERNET) { 6535 err = mlx5_ib_stage_common_roce_init(dev); 6536 if (err) 6537 return err; 6538 6539 err = mlx5_enable_eth(dev); 6540 if (err) 6541 goto cleanup; 6542 } 6543 6544 return 0; 6545 cleanup: 6546 mlx5_ib_stage_common_roce_cleanup(dev); 6547 6548 return err; 6549 } 6550 6551 static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev) 6552 { 6553 struct mlx5_core_dev *mdev = dev->mdev; 6554 enum rdma_link_layer ll; 6555 int port_type_cap; 6556 6557 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 6558 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 6559 6560 if (ll == IB_LINK_LAYER_ETHERNET) { 6561 mlx5_disable_eth(dev); 6562 mlx5_ib_stage_common_roce_cleanup(dev); 6563 } 6564 } 6565 6566 static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev) 6567 { 6568 return create_dev_resources(&dev->devr); 6569 } 6570 6571 static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev) 6572 { 6573 destroy_dev_resources(&dev->devr); 6574 } 6575 6576 static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) 6577 { 6578 return mlx5_ib_odp_init_one(dev); 6579 } 6580 6581 static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev) 6582 { 6583 mlx5_ib_odp_cleanup_one(dev); 6584 } 6585 6586 static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = { 6587 .alloc_hw_stats = mlx5_ib_alloc_hw_stats, 6588 .get_hw_stats = mlx5_ib_get_hw_stats, 6589 .counter_bind_qp = mlx5_ib_counter_bind_qp, 6590 .counter_unbind_qp = mlx5_ib_counter_unbind_qp, 6591 .counter_dealloc = mlx5_ib_counter_dealloc, 6592 .counter_alloc_stats = mlx5_ib_counter_alloc_stats, 6593 .counter_update_stats = mlx5_ib_counter_update_stats, 6594 }; 6595 6596 static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev) 6597 { 6598 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) { 6599 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops); 6600 6601 return mlx5_ib_alloc_counters(dev); 6602 } 6603 6604 return 0; 6605 } 6606 6607 static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev) 6608 { 6609 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) 6610 mlx5_ib_dealloc_counters(dev); 6611 } 6612 6613 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev) 6614 { 6615 mlx5_ib_init_cong_debugfs(dev, 6616 mlx5_core_native_port_num(dev->mdev) - 1); 6617 return 0; 6618 } 6619 6620 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev) 6621 { 6622 mlx5_ib_cleanup_cong_debugfs(dev, 6623 mlx5_core_native_port_num(dev->mdev) - 1); 6624 } 6625 6626 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev) 6627 { 6628 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); 6629 return PTR_ERR_OR_ZERO(dev->mdev->priv.uar); 6630 } 6631 6632 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev) 6633 { 6634 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); 6635 } 6636 6637 static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) 6638 { 6639 int err; 6640 6641 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false); 6642 if (err) 6643 return err; 6644 6645 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true); 6646 if (err) 6647 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); 6648 6649 return err; 6650 } 6651 6652 static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) 6653 { 6654 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); 6655 mlx5_free_bfreg(dev->mdev, &dev->bfreg); 6656 } 6657 6658 static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) 6659 { 6660 const char *name; 6661 6662 rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group); 6663 if (!mlx5_lag_is_roce(dev->mdev)) 6664 name = "mlx5_%d"; 6665 else 6666 name = "mlx5_bond_%d"; 6667 return ib_register_device(&dev->ib_dev, name); 6668 } 6669 6670 static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) 6671 { 6672 destroy_umrc_res(dev); 6673 } 6674 6675 static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) 6676 { 6677 ib_unregister_device(&dev->ib_dev); 6678 } 6679 6680 static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) 6681 { 6682 return create_umr_res(dev); 6683 } 6684 6685 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) 6686 { 6687 init_delay_drop(dev); 6688 6689 return 0; 6690 } 6691 6692 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev) 6693 { 6694 cancel_delay_drop(dev); 6695 } 6696 6697 static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev) 6698 { 6699 dev->mdev_events.notifier_call = mlx5_ib_event; 6700 mlx5_notifier_register(dev->mdev, &dev->mdev_events); 6701 return 0; 6702 } 6703 6704 static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev) 6705 { 6706 mlx5_notifier_unregister(dev->mdev, &dev->mdev_events); 6707 } 6708 6709 static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev) 6710 { 6711 int uid; 6712 6713 uid = mlx5_ib_devx_create(dev, false); 6714 if (uid > 0) { 6715 dev->devx_whitelist_uid = uid; 6716 mlx5_ib_devx_init_event_table(dev); 6717 } 6718 6719 return 0; 6720 } 6721 static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev) 6722 { 6723 if (dev->devx_whitelist_uid) { 6724 mlx5_ib_devx_cleanup_event_table(dev); 6725 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid); 6726 } 6727 } 6728 6729 int mlx5_ib_enable_driver(struct ib_device *dev) 6730 { 6731 struct mlx5_ib_dev *mdev = to_mdev(dev); 6732 int ret; 6733 6734 ret = mlx5_ib_test_wc(mdev); 6735 mlx5_ib_dbg(mdev, "Write-Combining %s", 6736 mdev->wc_support ? "supported" : "not supported"); 6737 6738 return ret; 6739 } 6740 6741 void __mlx5_ib_remove(struct mlx5_ib_dev *dev, 6742 const struct mlx5_ib_profile *profile, 6743 int stage) 6744 { 6745 /* Number of stages to cleanup */ 6746 while (stage) { 6747 stage--; 6748 if (profile->stage[stage].cleanup) 6749 profile->stage[stage].cleanup(dev); 6750 } 6751 6752 kfree(dev->port); 6753 ib_dealloc_device(&dev->ib_dev); 6754 } 6755 6756 void *__mlx5_ib_add(struct mlx5_ib_dev *dev, 6757 const struct mlx5_ib_profile *profile) 6758 { 6759 int err; 6760 int i; 6761 6762 for (i = 0; i < MLX5_IB_STAGE_MAX; i++) { 6763 if (profile->stage[i].init) { 6764 err = profile->stage[i].init(dev); 6765 if (err) 6766 goto err_out; 6767 } 6768 } 6769 6770 dev->profile = profile; 6771 dev->ib_active = true; 6772 6773 return dev; 6774 6775 err_out: 6776 __mlx5_ib_remove(dev, profile, i); 6777 6778 return NULL; 6779 } 6780 6781 static const struct mlx5_ib_profile pf_profile = { 6782 STAGE_CREATE(MLX5_IB_STAGE_INIT, 6783 mlx5_ib_stage_init_init, 6784 mlx5_ib_stage_init_cleanup), 6785 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB, 6786 mlx5_ib_stage_flow_db_init, 6787 mlx5_ib_stage_flow_db_cleanup), 6788 STAGE_CREATE(MLX5_IB_STAGE_CAPS, 6789 mlx5_ib_stage_caps_init, 6790 NULL), 6791 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, 6792 mlx5_ib_stage_non_default_cb, 6793 NULL), 6794 STAGE_CREATE(MLX5_IB_STAGE_ROCE, 6795 mlx5_ib_stage_roce_init, 6796 mlx5_ib_stage_roce_cleanup), 6797 STAGE_CREATE(MLX5_IB_STAGE_SRQ, 6798 mlx5_init_srq_table, 6799 mlx5_cleanup_srq_table), 6800 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, 6801 mlx5_ib_stage_dev_res_init, 6802 mlx5_ib_stage_dev_res_cleanup), 6803 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, 6804 mlx5_ib_stage_dev_notifier_init, 6805 mlx5_ib_stage_dev_notifier_cleanup), 6806 STAGE_CREATE(MLX5_IB_STAGE_ODP, 6807 mlx5_ib_stage_odp_init, 6808 mlx5_ib_stage_odp_cleanup), 6809 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, 6810 mlx5_ib_stage_counters_init, 6811 mlx5_ib_stage_counters_cleanup), 6812 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, 6813 mlx5_ib_stage_cong_debugfs_init, 6814 mlx5_ib_stage_cong_debugfs_cleanup), 6815 STAGE_CREATE(MLX5_IB_STAGE_UAR, 6816 mlx5_ib_stage_uar_init, 6817 mlx5_ib_stage_uar_cleanup), 6818 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 6819 mlx5_ib_stage_bfrag_init, 6820 mlx5_ib_stage_bfrag_cleanup), 6821 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, 6822 NULL, 6823 mlx5_ib_stage_pre_ib_reg_umr_cleanup), 6824 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID, 6825 mlx5_ib_stage_devx_init, 6826 mlx5_ib_stage_devx_cleanup), 6827 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 6828 mlx5_ib_stage_ib_reg_init, 6829 mlx5_ib_stage_ib_reg_cleanup), 6830 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, 6831 mlx5_ib_stage_post_ib_reg_umr_init, 6832 NULL), 6833 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, 6834 mlx5_ib_stage_delay_drop_init, 6835 mlx5_ib_stage_delay_drop_cleanup), 6836 }; 6837 6838 const struct mlx5_ib_profile raw_eth_profile = { 6839 STAGE_CREATE(MLX5_IB_STAGE_INIT, 6840 mlx5_ib_stage_init_init, 6841 mlx5_ib_stage_init_cleanup), 6842 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB, 6843 mlx5_ib_stage_flow_db_init, 6844 mlx5_ib_stage_flow_db_cleanup), 6845 STAGE_CREATE(MLX5_IB_STAGE_CAPS, 6846 mlx5_ib_stage_caps_init, 6847 NULL), 6848 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, 6849 mlx5_ib_stage_raw_eth_non_default_cb, 6850 NULL), 6851 STAGE_CREATE(MLX5_IB_STAGE_ROCE, 6852 mlx5_ib_stage_raw_eth_roce_init, 6853 mlx5_ib_stage_raw_eth_roce_cleanup), 6854 STAGE_CREATE(MLX5_IB_STAGE_SRQ, 6855 mlx5_init_srq_table, 6856 mlx5_cleanup_srq_table), 6857 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, 6858 mlx5_ib_stage_dev_res_init, 6859 mlx5_ib_stage_dev_res_cleanup), 6860 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, 6861 mlx5_ib_stage_dev_notifier_init, 6862 mlx5_ib_stage_dev_notifier_cleanup), 6863 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, 6864 mlx5_ib_stage_counters_init, 6865 mlx5_ib_stage_counters_cleanup), 6866 STAGE_CREATE(MLX5_IB_STAGE_UAR, 6867 mlx5_ib_stage_uar_init, 6868 mlx5_ib_stage_uar_cleanup), 6869 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 6870 mlx5_ib_stage_bfrag_init, 6871 mlx5_ib_stage_bfrag_cleanup), 6872 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, 6873 NULL, 6874 mlx5_ib_stage_pre_ib_reg_umr_cleanup), 6875 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID, 6876 mlx5_ib_stage_devx_init, 6877 mlx5_ib_stage_devx_cleanup), 6878 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 6879 mlx5_ib_stage_ib_reg_init, 6880 mlx5_ib_stage_ib_reg_cleanup), 6881 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, 6882 mlx5_ib_stage_post_ib_reg_umr_init, 6883 NULL), 6884 }; 6885 6886 static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev) 6887 { 6888 struct mlx5_ib_multiport_info *mpi; 6889 struct mlx5_ib_dev *dev; 6890 bool bound = false; 6891 int err; 6892 6893 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL); 6894 if (!mpi) 6895 return NULL; 6896 6897 mpi->mdev = mdev; 6898 6899 err = mlx5_query_nic_vport_system_image_guid(mdev, 6900 &mpi->sys_image_guid); 6901 if (err) { 6902 kfree(mpi); 6903 return NULL; 6904 } 6905 6906 mutex_lock(&mlx5_ib_multiport_mutex); 6907 list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) { 6908 if (dev->sys_image_guid == mpi->sys_image_guid) 6909 bound = mlx5_ib_bind_slave_port(dev, mpi); 6910 6911 if (bound) { 6912 rdma_roce_rescan_device(&dev->ib_dev); 6913 break; 6914 } 6915 } 6916 6917 if (!bound) { 6918 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); 6919 dev_dbg(mdev->device, 6920 "no suitable IB device found to bind to, added to unaffiliated list.\n"); 6921 } 6922 mutex_unlock(&mlx5_ib_multiport_mutex); 6923 6924 return mpi; 6925 } 6926 6927 static void *mlx5_ib_add(struct mlx5_core_dev *mdev) 6928 { 6929 const struct mlx5_ib_profile *profile; 6930 enum rdma_link_layer ll; 6931 struct mlx5_ib_dev *dev; 6932 int port_type_cap; 6933 int num_ports; 6934 6935 printk_once(KERN_INFO "%s", mlx5_version); 6936 6937 if (MLX5_ESWITCH_MANAGER(mdev) && 6938 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) { 6939 if (!mlx5_core_mp_enabled(mdev)) 6940 mlx5_ib_register_vport_reps(mdev); 6941 return mdev; 6942 } 6943 6944 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 6945 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 6946 6947 if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) 6948 return mlx5_ib_add_slave_port(mdev); 6949 6950 num_ports = max(MLX5_CAP_GEN(mdev, num_ports), 6951 MLX5_CAP_GEN(mdev, num_vhca_ports)); 6952 dev = ib_alloc_device(mlx5_ib_dev, ib_dev); 6953 if (!dev) 6954 return NULL; 6955 dev->port = kcalloc(num_ports, sizeof(*dev->port), 6956 GFP_KERNEL); 6957 if (!dev->port) { 6958 ib_dealloc_device(&dev->ib_dev); 6959 return NULL; 6960 } 6961 6962 dev->mdev = mdev; 6963 dev->num_ports = num_ports; 6964 6965 if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev)) 6966 profile = &raw_eth_profile; 6967 else 6968 profile = &pf_profile; 6969 6970 return __mlx5_ib_add(dev, profile); 6971 } 6972 6973 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) 6974 { 6975 struct mlx5_ib_multiport_info *mpi; 6976 struct mlx5_ib_dev *dev; 6977 6978 if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) { 6979 mlx5_ib_unregister_vport_reps(mdev); 6980 return; 6981 } 6982 6983 if (mlx5_core_is_mp_slave(mdev)) { 6984 mpi = context; 6985 mutex_lock(&mlx5_ib_multiport_mutex); 6986 if (mpi->ibdev) 6987 mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); 6988 list_del(&mpi->list); 6989 mutex_unlock(&mlx5_ib_multiport_mutex); 6990 kfree(mpi); 6991 return; 6992 } 6993 6994 dev = context; 6995 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); 6996 } 6997 6998 static struct mlx5_interface mlx5_ib_interface = { 6999 .add = mlx5_ib_add, 7000 .remove = mlx5_ib_remove, 7001 .protocol = MLX5_INTERFACE_PROTOCOL_IB, 7002 }; 7003 7004 unsigned long mlx5_ib_get_xlt_emergency_page(void) 7005 { 7006 mutex_lock(&xlt_emergency_page_mutex); 7007 return xlt_emergency_page; 7008 } 7009 7010 void mlx5_ib_put_xlt_emergency_page(void) 7011 { 7012 mutex_unlock(&xlt_emergency_page_mutex); 7013 } 7014 7015 static int __init mlx5_ib_init(void) 7016 { 7017 int err; 7018 7019 xlt_emergency_page = __get_free_page(GFP_KERNEL); 7020 if (!xlt_emergency_page) 7021 return -ENOMEM; 7022 7023 mutex_init(&xlt_emergency_page_mutex); 7024 7025 mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0); 7026 if (!mlx5_ib_event_wq) { 7027 free_page(xlt_emergency_page); 7028 return -ENOMEM; 7029 } 7030 7031 mlx5_ib_odp_init(); 7032 7033 err = mlx5_register_interface(&mlx5_ib_interface); 7034 7035 return err; 7036 } 7037 7038 static void __exit mlx5_ib_cleanup(void) 7039 { 7040 mlx5_unregister_interface(&mlx5_ib_interface); 7041 destroy_workqueue(mlx5_ib_event_wq); 7042 mutex_destroy(&xlt_emergency_page_mutex); 7043 free_page(xlt_emergency_page); 7044 } 7045 7046 module_init(mlx5_ib_init); 7047 module_exit(mlx5_ib_cleanup); 7048