1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/debugfs.h> 34 #include <linux/highmem.h> 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/errno.h> 38 #include <linux/pci.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/slab.h> 41 #include <linux/bitmap.h> 42 #if defined(CONFIG_X86) 43 #include <asm/pat.h> 44 #endif 45 #include <linux/sched.h> 46 #include <linux/sched/mm.h> 47 #include <linux/sched/task.h> 48 #include <linux/delay.h> 49 #include <rdma/ib_user_verbs.h> 50 #include <rdma/ib_addr.h> 51 #include <rdma/ib_cache.h> 52 #include <linux/mlx5/port.h> 53 #include <linux/mlx5/vport.h> 54 #include <linux/mlx5/fs.h> 55 #include <linux/mlx5/eswitch.h> 56 #include <linux/list.h> 57 #include <rdma/ib_smi.h> 58 #include <rdma/ib_umem.h> 59 #include <linux/in.h> 60 #include <linux/etherdevice.h> 61 #include "mlx5_ib.h" 62 #include "ib_rep.h" 63 #include "cmd.h" 64 #include "srq.h" 65 #include <linux/mlx5/fs_helpers.h> 66 #include <linux/mlx5/accel.h> 67 #include <rdma/uverbs_std_types.h> 68 #include <rdma/mlx5_user_ioctl_verbs.h> 69 #include <rdma/mlx5_user_ioctl_cmds.h> 70 #include <rdma/ib_umem_odp.h> 71 72 #define UVERBS_MODULE_NAME mlx5_ib 73 #include <rdma/uverbs_named_ioctl.h> 74 75 #define DRIVER_NAME "mlx5_ib" 76 #define DRIVER_VERSION "5.0-0" 77 78 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 79 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); 80 MODULE_LICENSE("Dual BSD/GPL"); 81 82 static char mlx5_version[] = 83 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" 84 DRIVER_VERSION "\n"; 85 86 struct mlx5_ib_event_work { 87 struct work_struct work; 88 union { 89 struct mlx5_ib_dev *dev; 90 struct mlx5_ib_multiport_info *mpi; 91 }; 92 bool is_slave; 93 unsigned int event; 94 void *param; 95 }; 96 97 enum { 98 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, 99 }; 100 101 static struct workqueue_struct *mlx5_ib_event_wq; 102 static LIST_HEAD(mlx5_ib_unaffiliated_port_list); 103 static LIST_HEAD(mlx5_ib_dev_list); 104 /* 105 * This mutex should be held when accessing either of the above lists 106 */ 107 static DEFINE_MUTEX(mlx5_ib_multiport_mutex); 108 109 /* We can't use an array for xlt_emergency_page because dma_map_single 110 * doesn't work on kernel modules memory 111 */ 112 static unsigned long xlt_emergency_page; 113 static struct mutex xlt_emergency_page_mutex; 114 115 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi) 116 { 117 struct mlx5_ib_dev *dev; 118 119 mutex_lock(&mlx5_ib_multiport_mutex); 120 dev = mpi->ibdev; 121 mutex_unlock(&mlx5_ib_multiport_mutex); 122 return dev; 123 } 124 125 static enum rdma_link_layer 126 mlx5_port_type_cap_to_rdma_ll(int port_type_cap) 127 { 128 switch (port_type_cap) { 129 case MLX5_CAP_PORT_TYPE_IB: 130 return IB_LINK_LAYER_INFINIBAND; 131 case MLX5_CAP_PORT_TYPE_ETH: 132 return IB_LINK_LAYER_ETHERNET; 133 default: 134 return IB_LINK_LAYER_UNSPECIFIED; 135 } 136 } 137 138 static enum rdma_link_layer 139 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num) 140 { 141 struct mlx5_ib_dev *dev = to_mdev(device); 142 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); 143 144 return mlx5_port_type_cap_to_rdma_ll(port_type_cap); 145 } 146 147 static int get_port_state(struct ib_device *ibdev, 148 u8 port_num, 149 enum ib_port_state *state) 150 { 151 struct ib_port_attr attr; 152 int ret; 153 154 memset(&attr, 0, sizeof(attr)); 155 ret = ibdev->ops.query_port(ibdev, port_num, &attr); 156 if (!ret) 157 *state = attr.state; 158 return ret; 159 } 160 161 static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev, 162 struct net_device *ndev, 163 u8 *port_num) 164 { 165 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; 166 struct net_device *rep_ndev; 167 struct mlx5_ib_port *port; 168 int i; 169 170 for (i = 0; i < dev->num_ports; i++) { 171 port = &dev->port[i]; 172 if (!port->rep) 173 continue; 174 175 read_lock(&port->roce.netdev_lock); 176 rep_ndev = mlx5_ib_get_rep_netdev(esw, 177 port->rep->vport); 178 if (rep_ndev == ndev) { 179 read_unlock(&port->roce.netdev_lock); 180 *port_num = i + 1; 181 return &port->roce; 182 } 183 read_unlock(&port->roce.netdev_lock); 184 } 185 186 return NULL; 187 } 188 189 static int mlx5_netdev_event(struct notifier_block *this, 190 unsigned long event, void *ptr) 191 { 192 struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb); 193 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 194 u8 port_num = roce->native_port_num; 195 struct mlx5_core_dev *mdev; 196 struct mlx5_ib_dev *ibdev; 197 198 ibdev = roce->dev; 199 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); 200 if (!mdev) 201 return NOTIFY_DONE; 202 203 switch (event) { 204 case NETDEV_REGISTER: 205 /* Should already be registered during the load */ 206 if (ibdev->is_rep) 207 break; 208 write_lock(&roce->netdev_lock); 209 if (ndev->dev.parent == mdev->device) 210 roce->netdev = ndev; 211 write_unlock(&roce->netdev_lock); 212 break; 213 214 case NETDEV_UNREGISTER: 215 /* In case of reps, ib device goes away before the netdevs */ 216 write_lock(&roce->netdev_lock); 217 if (roce->netdev == ndev) 218 roce->netdev = NULL; 219 write_unlock(&roce->netdev_lock); 220 break; 221 222 case NETDEV_CHANGE: 223 case NETDEV_UP: 224 case NETDEV_DOWN: { 225 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev); 226 struct net_device *upper = NULL; 227 228 if (lag_ndev) { 229 upper = netdev_master_upper_dev_get(lag_ndev); 230 dev_put(lag_ndev); 231 } 232 233 if (ibdev->is_rep) 234 roce = mlx5_get_rep_roce(ibdev, ndev, &port_num); 235 if (!roce) 236 return NOTIFY_DONE; 237 if ((upper == ndev || (!upper && ndev == roce->netdev)) 238 && ibdev->ib_active) { 239 struct ib_event ibev = { }; 240 enum ib_port_state port_state; 241 242 if (get_port_state(&ibdev->ib_dev, port_num, 243 &port_state)) 244 goto done; 245 246 if (roce->last_port_state == port_state) 247 goto done; 248 249 roce->last_port_state = port_state; 250 ibev.device = &ibdev->ib_dev; 251 if (port_state == IB_PORT_DOWN) 252 ibev.event = IB_EVENT_PORT_ERR; 253 else if (port_state == IB_PORT_ACTIVE) 254 ibev.event = IB_EVENT_PORT_ACTIVE; 255 else 256 goto done; 257 258 ibev.element.port_num = port_num; 259 ib_dispatch_event(&ibev); 260 } 261 break; 262 } 263 264 default: 265 break; 266 } 267 done: 268 mlx5_ib_put_native_port_mdev(ibdev, port_num); 269 return NOTIFY_DONE; 270 } 271 272 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device, 273 u8 port_num) 274 { 275 struct mlx5_ib_dev *ibdev = to_mdev(device); 276 struct net_device *ndev; 277 struct mlx5_core_dev *mdev; 278 279 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); 280 if (!mdev) 281 return NULL; 282 283 ndev = mlx5_lag_get_roce_netdev(mdev); 284 if (ndev) 285 goto out; 286 287 /* Ensure ndev does not disappear before we invoke dev_hold() 288 */ 289 read_lock(&ibdev->port[port_num - 1].roce.netdev_lock); 290 ndev = ibdev->port[port_num - 1].roce.netdev; 291 if (ndev) 292 dev_hold(ndev); 293 read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock); 294 295 out: 296 mlx5_ib_put_native_port_mdev(ibdev, port_num); 297 return ndev; 298 } 299 300 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, 301 u8 ib_port_num, 302 u8 *native_port_num) 303 { 304 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, 305 ib_port_num); 306 struct mlx5_core_dev *mdev = NULL; 307 struct mlx5_ib_multiport_info *mpi; 308 struct mlx5_ib_port *port; 309 310 if (!mlx5_core_mp_enabled(ibdev->mdev) || 311 ll != IB_LINK_LAYER_ETHERNET) { 312 if (native_port_num) 313 *native_port_num = ib_port_num; 314 return ibdev->mdev; 315 } 316 317 if (native_port_num) 318 *native_port_num = 1; 319 320 port = &ibdev->port[ib_port_num - 1]; 321 if (!port) 322 return NULL; 323 324 spin_lock(&port->mp.mpi_lock); 325 mpi = ibdev->port[ib_port_num - 1].mp.mpi; 326 if (mpi && !mpi->unaffiliate) { 327 mdev = mpi->mdev; 328 /* If it's the master no need to refcount, it'll exist 329 * as long as the ib_dev exists. 330 */ 331 if (!mpi->is_master) 332 mpi->mdev_refcnt++; 333 } 334 spin_unlock(&port->mp.mpi_lock); 335 336 return mdev; 337 } 338 339 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num) 340 { 341 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, 342 port_num); 343 struct mlx5_ib_multiport_info *mpi; 344 struct mlx5_ib_port *port; 345 346 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 347 return; 348 349 port = &ibdev->port[port_num - 1]; 350 351 spin_lock(&port->mp.mpi_lock); 352 mpi = ibdev->port[port_num - 1].mp.mpi; 353 if (mpi->is_master) 354 goto out; 355 356 mpi->mdev_refcnt--; 357 if (mpi->unaffiliate) 358 complete(&mpi->unref_comp); 359 out: 360 spin_unlock(&port->mp.mpi_lock); 361 } 362 363 static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed, 364 u8 *active_width) 365 { 366 switch (eth_proto_oper) { 367 case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII): 368 case MLX5E_PROT_MASK(MLX5E_1000BASE_KX): 369 case MLX5E_PROT_MASK(MLX5E_100BASE_TX): 370 case MLX5E_PROT_MASK(MLX5E_1000BASE_T): 371 *active_width = IB_WIDTH_1X; 372 *active_speed = IB_SPEED_SDR; 373 break; 374 case MLX5E_PROT_MASK(MLX5E_10GBASE_T): 375 case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4): 376 case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4): 377 case MLX5E_PROT_MASK(MLX5E_10GBASE_KR): 378 case MLX5E_PROT_MASK(MLX5E_10GBASE_CR): 379 case MLX5E_PROT_MASK(MLX5E_10GBASE_SR): 380 case MLX5E_PROT_MASK(MLX5E_10GBASE_ER): 381 *active_width = IB_WIDTH_1X; 382 *active_speed = IB_SPEED_QDR; 383 break; 384 case MLX5E_PROT_MASK(MLX5E_25GBASE_CR): 385 case MLX5E_PROT_MASK(MLX5E_25GBASE_KR): 386 case MLX5E_PROT_MASK(MLX5E_25GBASE_SR): 387 *active_width = IB_WIDTH_1X; 388 *active_speed = IB_SPEED_EDR; 389 break; 390 case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4): 391 case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4): 392 case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4): 393 case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4): 394 *active_width = IB_WIDTH_4X; 395 *active_speed = IB_SPEED_QDR; 396 break; 397 case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2): 398 case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2): 399 case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2): 400 *active_width = IB_WIDTH_1X; 401 *active_speed = IB_SPEED_HDR; 402 break; 403 case MLX5E_PROT_MASK(MLX5E_56GBASE_R4): 404 *active_width = IB_WIDTH_4X; 405 *active_speed = IB_SPEED_FDR; 406 break; 407 case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4): 408 case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4): 409 case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4): 410 case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4): 411 *active_width = IB_WIDTH_4X; 412 *active_speed = IB_SPEED_EDR; 413 break; 414 default: 415 return -EINVAL; 416 } 417 418 return 0; 419 } 420 421 static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed, 422 u8 *active_width) 423 { 424 switch (eth_proto_oper) { 425 case MLX5E_PROT_MASK(MLX5E_SGMII_100M): 426 case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII): 427 *active_width = IB_WIDTH_1X; 428 *active_speed = IB_SPEED_SDR; 429 break; 430 case MLX5E_PROT_MASK(MLX5E_5GBASE_R): 431 *active_width = IB_WIDTH_1X; 432 *active_speed = IB_SPEED_DDR; 433 break; 434 case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1): 435 *active_width = IB_WIDTH_1X; 436 *active_speed = IB_SPEED_QDR; 437 break; 438 case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4): 439 *active_width = IB_WIDTH_4X; 440 *active_speed = IB_SPEED_QDR; 441 break; 442 case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR): 443 *active_width = IB_WIDTH_1X; 444 *active_speed = IB_SPEED_EDR; 445 break; 446 case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2): 447 *active_width = IB_WIDTH_2X; 448 *active_speed = IB_SPEED_EDR; 449 break; 450 case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR): 451 *active_width = IB_WIDTH_1X; 452 *active_speed = IB_SPEED_HDR; 453 break; 454 case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4): 455 *active_width = IB_WIDTH_4X; 456 *active_speed = IB_SPEED_EDR; 457 break; 458 case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2): 459 *active_width = IB_WIDTH_2X; 460 *active_speed = IB_SPEED_HDR; 461 break; 462 case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4): 463 *active_width = IB_WIDTH_4X; 464 *active_speed = IB_SPEED_HDR; 465 break; 466 default: 467 return -EINVAL; 468 } 469 470 return 0; 471 } 472 473 static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, 474 u8 *active_width, bool ext) 475 { 476 return ext ? 477 translate_eth_ext_proto_oper(eth_proto_oper, active_speed, 478 active_width) : 479 translate_eth_legacy_proto_oper(eth_proto_oper, active_speed, 480 active_width); 481 } 482 483 static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, 484 struct ib_port_attr *props) 485 { 486 struct mlx5_ib_dev *dev = to_mdev(device); 487 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; 488 struct mlx5_core_dev *mdev; 489 struct net_device *ndev, *upper; 490 enum ib_mtu ndev_ib_mtu; 491 bool put_mdev = true; 492 u16 qkey_viol_cntr; 493 u32 eth_prot_oper; 494 u8 mdev_port_num; 495 bool ext; 496 int err; 497 498 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); 499 if (!mdev) { 500 /* This means the port isn't affiliated yet. Get the 501 * info for the master port instead. 502 */ 503 put_mdev = false; 504 mdev = dev->mdev; 505 mdev_port_num = 1; 506 port_num = 1; 507 } 508 509 /* Possible bad flows are checked before filling out props so in case 510 * of an error it will still be zeroed out. 511 * Use native port in case of reps 512 */ 513 if (dev->is_rep) 514 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 515 1); 516 else 517 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 518 mdev_port_num); 519 if (err) 520 goto out; 521 ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet); 522 eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); 523 524 props->active_width = IB_WIDTH_4X; 525 props->active_speed = IB_SPEED_QDR; 526 527 translate_eth_proto_oper(eth_prot_oper, &props->active_speed, 528 &props->active_width, ext); 529 530 props->port_cap_flags |= IB_PORT_CM_SUP; 531 props->ip_gids = true; 532 533 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev, 534 roce_address_table_size); 535 props->max_mtu = IB_MTU_4096; 536 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); 537 props->pkey_tbl_len = 1; 538 props->state = IB_PORT_DOWN; 539 props->phys_state = IB_PORT_PHYS_STATE_DISABLED; 540 541 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr); 542 props->qkey_viol_cntr = qkey_viol_cntr; 543 544 /* If this is a stub query for an unaffiliated port stop here */ 545 if (!put_mdev) 546 goto out; 547 548 ndev = mlx5_ib_get_netdev(device, port_num); 549 if (!ndev) 550 goto out; 551 552 if (dev->lag_active) { 553 rcu_read_lock(); 554 upper = netdev_master_upper_dev_get_rcu(ndev); 555 if (upper) { 556 dev_put(ndev); 557 ndev = upper; 558 dev_hold(ndev); 559 } 560 rcu_read_unlock(); 561 } 562 563 if (netif_running(ndev) && netif_carrier_ok(ndev)) { 564 props->state = IB_PORT_ACTIVE; 565 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 566 } 567 568 ndev_ib_mtu = iboe_get_mtu(ndev->mtu); 569 570 dev_put(ndev); 571 572 props->active_mtu = min(props->max_mtu, ndev_ib_mtu); 573 out: 574 if (put_mdev) 575 mlx5_ib_put_native_port_mdev(dev, port_num); 576 return err; 577 } 578 579 static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, 580 unsigned int index, const union ib_gid *gid, 581 const struct ib_gid_attr *attr) 582 { 583 enum ib_gid_type gid_type = IB_GID_TYPE_IB; 584 u16 vlan_id = 0xffff; 585 u8 roce_version = 0; 586 u8 roce_l3_type = 0; 587 u8 mac[ETH_ALEN]; 588 int ret; 589 590 if (gid) { 591 gid_type = attr->gid_type; 592 ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]); 593 if (ret) 594 return ret; 595 } 596 597 switch (gid_type) { 598 case IB_GID_TYPE_IB: 599 roce_version = MLX5_ROCE_VERSION_1; 600 break; 601 case IB_GID_TYPE_ROCE_UDP_ENCAP: 602 roce_version = MLX5_ROCE_VERSION_2; 603 if (ipv6_addr_v4mapped((void *)gid)) 604 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4; 605 else 606 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6; 607 break; 608 609 default: 610 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type); 611 } 612 613 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version, 614 roce_l3_type, gid->raw, mac, 615 vlan_id < VLAN_CFI_MASK, vlan_id, 616 port_num); 617 } 618 619 static int mlx5_ib_add_gid(const struct ib_gid_attr *attr, 620 __always_unused void **context) 621 { 622 return set_roce_addr(to_mdev(attr->device), attr->port_num, 623 attr->index, &attr->gid, attr); 624 } 625 626 static int mlx5_ib_del_gid(const struct ib_gid_attr *attr, 627 __always_unused void **context) 628 { 629 return set_roce_addr(to_mdev(attr->device), attr->port_num, 630 attr->index, NULL, NULL); 631 } 632 633 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, 634 const struct ib_gid_attr *attr) 635 { 636 if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) 637 return 0; 638 639 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port)); 640 } 641 642 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) 643 { 644 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) 645 return !MLX5_CAP_GEN(dev->mdev, ib_virt); 646 return 0; 647 } 648 649 enum { 650 MLX5_VPORT_ACCESS_METHOD_MAD, 651 MLX5_VPORT_ACCESS_METHOD_HCA, 652 MLX5_VPORT_ACCESS_METHOD_NIC, 653 }; 654 655 static int mlx5_get_vport_access_method(struct ib_device *ibdev) 656 { 657 if (mlx5_use_mad_ifc(to_mdev(ibdev))) 658 return MLX5_VPORT_ACCESS_METHOD_MAD; 659 660 if (mlx5_ib_port_link_layer(ibdev, 1) == 661 IB_LINK_LAYER_ETHERNET) 662 return MLX5_VPORT_ACCESS_METHOD_NIC; 663 664 return MLX5_VPORT_ACCESS_METHOD_HCA; 665 } 666 667 static void get_atomic_caps(struct mlx5_ib_dev *dev, 668 u8 atomic_size_qp, 669 struct ib_device_attr *props) 670 { 671 u8 tmp; 672 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); 673 u8 atomic_req_8B_endianness_mode = 674 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode); 675 676 /* Check if HW supports 8 bytes standard atomic operations and capable 677 * of host endianness respond 678 */ 679 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD; 680 if (((atomic_operations & tmp) == tmp) && 681 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) && 682 (atomic_req_8B_endianness_mode)) { 683 props->atomic_cap = IB_ATOMIC_HCA; 684 } else { 685 props->atomic_cap = IB_ATOMIC_NONE; 686 } 687 } 688 689 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev, 690 struct ib_device_attr *props) 691 { 692 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); 693 694 get_atomic_caps(dev, atomic_size_qp, props); 695 } 696 697 static int mlx5_query_system_image_guid(struct ib_device *ibdev, 698 __be64 *sys_image_guid) 699 { 700 struct mlx5_ib_dev *dev = to_mdev(ibdev); 701 struct mlx5_core_dev *mdev = dev->mdev; 702 u64 tmp; 703 int err; 704 705 switch (mlx5_get_vport_access_method(ibdev)) { 706 case MLX5_VPORT_ACCESS_METHOD_MAD: 707 return mlx5_query_mad_ifc_system_image_guid(ibdev, 708 sys_image_guid); 709 710 case MLX5_VPORT_ACCESS_METHOD_HCA: 711 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); 712 break; 713 714 case MLX5_VPORT_ACCESS_METHOD_NIC: 715 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp); 716 break; 717 718 default: 719 return -EINVAL; 720 } 721 722 if (!err) 723 *sys_image_guid = cpu_to_be64(tmp); 724 725 return err; 726 727 } 728 729 static int mlx5_query_max_pkeys(struct ib_device *ibdev, 730 u16 *max_pkeys) 731 { 732 struct mlx5_ib_dev *dev = to_mdev(ibdev); 733 struct mlx5_core_dev *mdev = dev->mdev; 734 735 switch (mlx5_get_vport_access_method(ibdev)) { 736 case MLX5_VPORT_ACCESS_METHOD_MAD: 737 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); 738 739 case MLX5_VPORT_ACCESS_METHOD_HCA: 740 case MLX5_VPORT_ACCESS_METHOD_NIC: 741 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, 742 pkey_table_size)); 743 return 0; 744 745 default: 746 return -EINVAL; 747 } 748 } 749 750 static int mlx5_query_vendor_id(struct ib_device *ibdev, 751 u32 *vendor_id) 752 { 753 struct mlx5_ib_dev *dev = to_mdev(ibdev); 754 755 switch (mlx5_get_vport_access_method(ibdev)) { 756 case MLX5_VPORT_ACCESS_METHOD_MAD: 757 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); 758 759 case MLX5_VPORT_ACCESS_METHOD_HCA: 760 case MLX5_VPORT_ACCESS_METHOD_NIC: 761 return mlx5_core_query_vendor_id(dev->mdev, vendor_id); 762 763 default: 764 return -EINVAL; 765 } 766 } 767 768 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev, 769 __be64 *node_guid) 770 { 771 u64 tmp; 772 int err; 773 774 switch (mlx5_get_vport_access_method(&dev->ib_dev)) { 775 case MLX5_VPORT_ACCESS_METHOD_MAD: 776 return mlx5_query_mad_ifc_node_guid(dev, node_guid); 777 778 case MLX5_VPORT_ACCESS_METHOD_HCA: 779 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp); 780 break; 781 782 case MLX5_VPORT_ACCESS_METHOD_NIC: 783 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp); 784 break; 785 786 default: 787 return -EINVAL; 788 } 789 790 if (!err) 791 *node_guid = cpu_to_be64(tmp); 792 793 return err; 794 } 795 796 struct mlx5_reg_node_desc { 797 u8 desc[IB_DEVICE_NODE_DESC_MAX]; 798 }; 799 800 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc) 801 { 802 struct mlx5_reg_node_desc in; 803 804 if (mlx5_use_mad_ifc(dev)) 805 return mlx5_query_mad_ifc_node_desc(dev, node_desc); 806 807 memset(&in, 0, sizeof(in)); 808 809 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc, 810 sizeof(struct mlx5_reg_node_desc), 811 MLX5_REG_NODE_DESC, 0, 0); 812 } 813 814 static int mlx5_ib_query_device(struct ib_device *ibdev, 815 struct ib_device_attr *props, 816 struct ib_udata *uhw) 817 { 818 struct mlx5_ib_dev *dev = to_mdev(ibdev); 819 struct mlx5_core_dev *mdev = dev->mdev; 820 int err = -ENOMEM; 821 int max_sq_desc; 822 int max_rq_sg; 823 int max_sq_sg; 824 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); 825 bool raw_support = !mlx5_core_mp_enabled(mdev); 826 struct mlx5_ib_query_device_resp resp = {}; 827 size_t resp_len; 828 u64 max_tso; 829 830 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); 831 if (uhw->outlen && uhw->outlen < resp_len) 832 return -EINVAL; 833 834 resp.response_length = resp_len; 835 836 if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen)) 837 return -EINVAL; 838 839 memset(props, 0, sizeof(*props)); 840 err = mlx5_query_system_image_guid(ibdev, 841 &props->sys_image_guid); 842 if (err) 843 return err; 844 845 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys); 846 if (err) 847 return err; 848 849 err = mlx5_query_vendor_id(ibdev, &props->vendor_id); 850 if (err) 851 return err; 852 853 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | 854 (fw_rev_min(dev->mdev) << 16) | 855 fw_rev_sub(dev->mdev); 856 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 857 IB_DEVICE_PORT_ACTIVE_EVENT | 858 IB_DEVICE_SYS_IMAGE_GUID | 859 IB_DEVICE_RC_RNR_NAK_GEN; 860 861 if (MLX5_CAP_GEN(mdev, pkv)) 862 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 863 if (MLX5_CAP_GEN(mdev, qkv)) 864 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 865 if (MLX5_CAP_GEN(mdev, apm)) 866 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 867 if (MLX5_CAP_GEN(mdev, xrc)) 868 props->device_cap_flags |= IB_DEVICE_XRC; 869 if (MLX5_CAP_GEN(mdev, imaicl)) { 870 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW | 871 IB_DEVICE_MEM_WINDOW_TYPE_2B; 872 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 873 /* We support 'Gappy' memory registration too */ 874 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; 875 } 876 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 877 if (MLX5_CAP_GEN(mdev, sho)) { 878 props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER; 879 /* At this stage no support for signature handover */ 880 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | 881 IB_PROT_T10DIF_TYPE_2 | 882 IB_PROT_T10DIF_TYPE_3; 883 props->sig_guard_cap = IB_GUARD_T10DIF_CRC | 884 IB_GUARD_T10DIF_CSUM; 885 } 886 if (MLX5_CAP_GEN(mdev, block_lb_mc)) 887 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 888 889 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) { 890 if (MLX5_CAP_ETH(mdev, csum_cap)) { 891 /* Legacy bit to support old userspace libraries */ 892 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; 893 props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM; 894 } 895 896 if (MLX5_CAP_ETH(dev->mdev, vlan_cap)) 897 props->raw_packet_caps |= 898 IB_RAW_PACKET_CAP_CVLAN_STRIPPING; 899 900 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) { 901 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap); 902 if (max_tso) { 903 resp.tso_caps.max_tso = 1 << max_tso; 904 resp.tso_caps.supported_qpts |= 905 1 << IB_QPT_RAW_PACKET; 906 resp.response_length += sizeof(resp.tso_caps); 907 } 908 } 909 910 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) { 911 resp.rss_caps.rx_hash_function = 912 MLX5_RX_HASH_FUNC_TOEPLITZ; 913 resp.rss_caps.rx_hash_fields_mask = 914 MLX5_RX_HASH_SRC_IPV4 | 915 MLX5_RX_HASH_DST_IPV4 | 916 MLX5_RX_HASH_SRC_IPV6 | 917 MLX5_RX_HASH_DST_IPV6 | 918 MLX5_RX_HASH_SRC_PORT_TCP | 919 MLX5_RX_HASH_DST_PORT_TCP | 920 MLX5_RX_HASH_SRC_PORT_UDP | 921 MLX5_RX_HASH_DST_PORT_UDP | 922 MLX5_RX_HASH_INNER; 923 if (mlx5_accel_ipsec_device_caps(dev->mdev) & 924 MLX5_ACCEL_IPSEC_CAP_DEVICE) 925 resp.rss_caps.rx_hash_fields_mask |= 926 MLX5_RX_HASH_IPSEC_SPI; 927 resp.response_length += sizeof(resp.rss_caps); 928 } 929 } else { 930 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) 931 resp.response_length += sizeof(resp.tso_caps); 932 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) 933 resp.response_length += sizeof(resp.rss_caps); 934 } 935 936 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { 937 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 938 props->device_cap_flags |= IB_DEVICE_UD_TSO; 939 } 940 941 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) && 942 MLX5_CAP_GEN(dev->mdev, general_notification_event) && 943 raw_support) 944 props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP; 945 946 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && 947 MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap)) 948 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 949 950 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 951 MLX5_CAP_ETH(dev->mdev, scatter_fcs) && 952 raw_support) { 953 /* Legacy bit to support old userspace libraries */ 954 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; 955 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS; 956 } 957 958 if (MLX5_CAP_DEV_MEM(mdev, memic)) { 959 props->max_dm_size = 960 MLX5_CAP_DEV_MEM(mdev, max_memic_size); 961 } 962 963 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) 964 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; 965 966 if (MLX5_CAP_GEN(mdev, end_pad)) 967 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING; 968 969 props->vendor_part_id = mdev->pdev->device; 970 props->hw_ver = mdev->pdev->revision; 971 972 props->max_mr_size = ~0ull; 973 props->page_size_cap = ~(min_page_size - 1); 974 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); 975 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 976 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / 977 sizeof(struct mlx5_wqe_data_seg); 978 max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512); 979 max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) - 980 sizeof(struct mlx5_wqe_raddr_seg)) / 981 sizeof(struct mlx5_wqe_data_seg); 982 props->max_send_sge = max_sq_sg; 983 props->max_recv_sge = max_rq_sg; 984 props->max_sge_rd = MLX5_MAX_SGE_RD; 985 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 986 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; 987 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 988 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); 989 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); 990 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp); 991 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq); 992 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1; 993 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); 994 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 995 props->max_srq_sge = max_rq_sg - 1; 996 props->max_fast_reg_page_list_len = 997 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size); 998 props->max_pi_fast_reg_page_list_len = 999 props->max_fast_reg_page_list_len / 2; 1000 props->max_sgl_rd = 1001 MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance); 1002 get_atomic_caps_qp(dev, props); 1003 props->masked_atomic_cap = IB_ATOMIC_NONE; 1004 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); 1005 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); 1006 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 1007 props->max_mcast_grp; 1008 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ 1009 props->max_ah = INT_MAX; 1010 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz); 1011 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; 1012 1013 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 1014 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) 1015 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 1016 props->odp_caps = dev->odp_caps; 1017 } 1018 1019 if (MLX5_CAP_GEN(mdev, cd)) 1020 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; 1021 1022 if (mlx5_core_is_vf(mdev)) 1023 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; 1024 1025 if (mlx5_ib_port_link_layer(ibdev, 1) == 1026 IB_LINK_LAYER_ETHERNET && raw_support) { 1027 props->rss_caps.max_rwq_indirection_tables = 1028 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt); 1029 props->rss_caps.max_rwq_indirection_table_size = 1030 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size); 1031 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; 1032 props->max_wq_type_rq = 1033 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); 1034 } 1035 1036 if (MLX5_CAP_GEN(mdev, tag_matching)) { 1037 props->tm_caps.max_num_tags = 1038 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; 1039 props->tm_caps.max_ops = 1040 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 1041 props->tm_caps.max_sge = MLX5_TM_MAX_SGE; 1042 } 1043 1044 if (MLX5_CAP_GEN(mdev, tag_matching) && 1045 MLX5_CAP_GEN(mdev, rndv_offload_rc)) { 1046 props->tm_caps.flags = IB_TM_CAP_RNDV_RC; 1047 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; 1048 } 1049 1050 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) { 1051 props->cq_caps.max_cq_moderation_count = 1052 MLX5_MAX_CQ_COUNT; 1053 props->cq_caps.max_cq_moderation_period = 1054 MLX5_MAX_CQ_PERIOD; 1055 } 1056 1057 if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { 1058 resp.response_length += sizeof(resp.cqe_comp_caps); 1059 1060 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) { 1061 resp.cqe_comp_caps.max_num = 1062 MLX5_CAP_GEN(dev->mdev, 1063 cqe_compression_max_num); 1064 1065 resp.cqe_comp_caps.supported_format = 1066 MLX5_IB_CQE_RES_FORMAT_HASH | 1067 MLX5_IB_CQE_RES_FORMAT_CSUM; 1068 1069 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index)) 1070 resp.cqe_comp_caps.supported_format |= 1071 MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX; 1072 } 1073 } 1074 1075 if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) && 1076 raw_support) { 1077 if (MLX5_CAP_QOS(mdev, packet_pacing) && 1078 MLX5_CAP_GEN(mdev, qos)) { 1079 resp.packet_pacing_caps.qp_rate_limit_max = 1080 MLX5_CAP_QOS(mdev, packet_pacing_max_rate); 1081 resp.packet_pacing_caps.qp_rate_limit_min = 1082 MLX5_CAP_QOS(mdev, packet_pacing_min_rate); 1083 resp.packet_pacing_caps.supported_qpts |= 1084 1 << IB_QPT_RAW_PACKET; 1085 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) && 1086 MLX5_CAP_QOS(mdev, packet_pacing_typical_size)) 1087 resp.packet_pacing_caps.cap_flags |= 1088 MLX5_IB_PP_SUPPORT_BURST; 1089 } 1090 resp.response_length += sizeof(resp.packet_pacing_caps); 1091 } 1092 1093 if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes, 1094 uhw->outlen)) { 1095 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe)) 1096 resp.mlx5_ib_support_multi_pkt_send_wqes = 1097 MLX5_IB_ALLOW_MPW; 1098 1099 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)) 1100 resp.mlx5_ib_support_multi_pkt_send_wqes |= 1101 MLX5_IB_SUPPORT_EMPW; 1102 1103 resp.response_length += 1104 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); 1105 } 1106 1107 if (field_avail(typeof(resp), flags, uhw->outlen)) { 1108 resp.response_length += sizeof(resp.flags); 1109 1110 if (MLX5_CAP_GEN(mdev, cqe_compression_128)) 1111 resp.flags |= 1112 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP; 1113 1114 if (MLX5_CAP_GEN(mdev, cqe_128_always)) 1115 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD; 1116 if (MLX5_CAP_GEN(mdev, qp_packet_based)) 1117 resp.flags |= 1118 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE; 1119 1120 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT; 1121 } 1122 1123 if (field_avail(typeof(resp), sw_parsing_caps, 1124 uhw->outlen)) { 1125 resp.response_length += sizeof(resp.sw_parsing_caps); 1126 if (MLX5_CAP_ETH(mdev, swp)) { 1127 resp.sw_parsing_caps.sw_parsing_offloads |= 1128 MLX5_IB_SW_PARSING; 1129 1130 if (MLX5_CAP_ETH(mdev, swp_csum)) 1131 resp.sw_parsing_caps.sw_parsing_offloads |= 1132 MLX5_IB_SW_PARSING_CSUM; 1133 1134 if (MLX5_CAP_ETH(mdev, swp_lso)) 1135 resp.sw_parsing_caps.sw_parsing_offloads |= 1136 MLX5_IB_SW_PARSING_LSO; 1137 1138 if (resp.sw_parsing_caps.sw_parsing_offloads) 1139 resp.sw_parsing_caps.supported_qpts = 1140 BIT(IB_QPT_RAW_PACKET); 1141 } 1142 } 1143 1144 if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) && 1145 raw_support) { 1146 resp.response_length += sizeof(resp.striding_rq_caps); 1147 if (MLX5_CAP_GEN(mdev, striding_rq)) { 1148 resp.striding_rq_caps.min_single_stride_log_num_of_bytes = 1149 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 1150 resp.striding_rq_caps.max_single_stride_log_num_of_bytes = 1151 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES; 1152 if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range)) 1153 resp.striding_rq_caps 1154 .min_single_wqe_log_num_of_strides = 1155 MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 1156 else 1157 resp.striding_rq_caps 1158 .min_single_wqe_log_num_of_strides = 1159 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 1160 resp.striding_rq_caps.max_single_wqe_log_num_of_strides = 1161 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES; 1162 resp.striding_rq_caps.supported_qpts = 1163 BIT(IB_QPT_RAW_PACKET); 1164 } 1165 } 1166 1167 if (field_avail(typeof(resp), tunnel_offloads_caps, 1168 uhw->outlen)) { 1169 resp.response_length += sizeof(resp.tunnel_offloads_caps); 1170 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan)) 1171 resp.tunnel_offloads_caps |= 1172 MLX5_IB_TUNNELED_OFFLOADS_VXLAN; 1173 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx)) 1174 resp.tunnel_offloads_caps |= 1175 MLX5_IB_TUNNELED_OFFLOADS_GENEVE; 1176 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) 1177 resp.tunnel_offloads_caps |= 1178 MLX5_IB_TUNNELED_OFFLOADS_GRE; 1179 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & 1180 MLX5_FLEX_PROTO_CW_MPLS_GRE) 1181 resp.tunnel_offloads_caps |= 1182 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE; 1183 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & 1184 MLX5_FLEX_PROTO_CW_MPLS_UDP) 1185 resp.tunnel_offloads_caps |= 1186 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP; 1187 } 1188 1189 if (uhw->outlen) { 1190 err = ib_copy_to_udata(uhw, &resp, resp.response_length); 1191 1192 if (err) 1193 return err; 1194 } 1195 1196 return 0; 1197 } 1198 1199 enum mlx5_ib_width { 1200 MLX5_IB_WIDTH_1X = 1 << 0, 1201 MLX5_IB_WIDTH_2X = 1 << 1, 1202 MLX5_IB_WIDTH_4X = 1 << 2, 1203 MLX5_IB_WIDTH_8X = 1 << 3, 1204 MLX5_IB_WIDTH_12X = 1 << 4 1205 }; 1206 1207 static void translate_active_width(struct ib_device *ibdev, u8 active_width, 1208 u8 *ib_width) 1209 { 1210 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1211 1212 if (active_width & MLX5_IB_WIDTH_1X) 1213 *ib_width = IB_WIDTH_1X; 1214 else if (active_width & MLX5_IB_WIDTH_2X) 1215 *ib_width = IB_WIDTH_2X; 1216 else if (active_width & MLX5_IB_WIDTH_4X) 1217 *ib_width = IB_WIDTH_4X; 1218 else if (active_width & MLX5_IB_WIDTH_8X) 1219 *ib_width = IB_WIDTH_8X; 1220 else if (active_width & MLX5_IB_WIDTH_12X) 1221 *ib_width = IB_WIDTH_12X; 1222 else { 1223 mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n", 1224 (int)active_width); 1225 *ib_width = IB_WIDTH_4X; 1226 } 1227 1228 return; 1229 } 1230 1231 static int mlx5_mtu_to_ib_mtu(int mtu) 1232 { 1233 switch (mtu) { 1234 case 256: return 1; 1235 case 512: return 2; 1236 case 1024: return 3; 1237 case 2048: return 4; 1238 case 4096: return 5; 1239 default: 1240 pr_warn("invalid mtu\n"); 1241 return -1; 1242 } 1243 } 1244 1245 enum ib_max_vl_num { 1246 __IB_MAX_VL_0 = 1, 1247 __IB_MAX_VL_0_1 = 2, 1248 __IB_MAX_VL_0_3 = 3, 1249 __IB_MAX_VL_0_7 = 4, 1250 __IB_MAX_VL_0_14 = 5, 1251 }; 1252 1253 enum mlx5_vl_hw_cap { 1254 MLX5_VL_HW_0 = 1, 1255 MLX5_VL_HW_0_1 = 2, 1256 MLX5_VL_HW_0_2 = 3, 1257 MLX5_VL_HW_0_3 = 4, 1258 MLX5_VL_HW_0_4 = 5, 1259 MLX5_VL_HW_0_5 = 6, 1260 MLX5_VL_HW_0_6 = 7, 1261 MLX5_VL_HW_0_7 = 8, 1262 MLX5_VL_HW_0_14 = 15 1263 }; 1264 1265 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, 1266 u8 *max_vl_num) 1267 { 1268 switch (vl_hw_cap) { 1269 case MLX5_VL_HW_0: 1270 *max_vl_num = __IB_MAX_VL_0; 1271 break; 1272 case MLX5_VL_HW_0_1: 1273 *max_vl_num = __IB_MAX_VL_0_1; 1274 break; 1275 case MLX5_VL_HW_0_3: 1276 *max_vl_num = __IB_MAX_VL_0_3; 1277 break; 1278 case MLX5_VL_HW_0_7: 1279 *max_vl_num = __IB_MAX_VL_0_7; 1280 break; 1281 case MLX5_VL_HW_0_14: 1282 *max_vl_num = __IB_MAX_VL_0_14; 1283 break; 1284 1285 default: 1286 return -EINVAL; 1287 } 1288 1289 return 0; 1290 } 1291 1292 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, 1293 struct ib_port_attr *props) 1294 { 1295 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1296 struct mlx5_core_dev *mdev = dev->mdev; 1297 struct mlx5_hca_vport_context *rep; 1298 u16 max_mtu; 1299 u16 oper_mtu; 1300 int err; 1301 u8 ib_link_width_oper; 1302 u8 vl_hw_cap; 1303 1304 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 1305 if (!rep) { 1306 err = -ENOMEM; 1307 goto out; 1308 } 1309 1310 /* props being zeroed by the caller, avoid zeroing it here */ 1311 1312 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep); 1313 if (err) 1314 goto out; 1315 1316 props->lid = rep->lid; 1317 props->lmc = rep->lmc; 1318 props->sm_lid = rep->sm_lid; 1319 props->sm_sl = rep->sm_sl; 1320 props->state = rep->vport_state; 1321 props->phys_state = rep->port_physical_state; 1322 props->port_cap_flags = rep->cap_mask1; 1323 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size)); 1324 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 1325 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); 1326 props->bad_pkey_cntr = rep->pkey_violation_counter; 1327 props->qkey_viol_cntr = rep->qkey_violation_counter; 1328 props->subnet_timeout = rep->subnet_timeout; 1329 props->init_type_reply = rep->init_type_reply; 1330 1331 if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) 1332 props->port_cap_flags2 = rep->cap_mask2; 1333 1334 err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port); 1335 if (err) 1336 goto out; 1337 1338 translate_active_width(ibdev, ib_link_width_oper, &props->active_width); 1339 1340 err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port); 1341 if (err) 1342 goto out; 1343 1344 mlx5_query_port_max_mtu(mdev, &max_mtu, port); 1345 1346 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu); 1347 1348 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port); 1349 1350 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu); 1351 1352 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port); 1353 if (err) 1354 goto out; 1355 1356 err = translate_max_vl_num(ibdev, vl_hw_cap, 1357 &props->max_vl_num); 1358 out: 1359 kfree(rep); 1360 return err; 1361 } 1362 1363 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 1364 struct ib_port_attr *props) 1365 { 1366 unsigned int count; 1367 int ret; 1368 1369 switch (mlx5_get_vport_access_method(ibdev)) { 1370 case MLX5_VPORT_ACCESS_METHOD_MAD: 1371 ret = mlx5_query_mad_ifc_port(ibdev, port, props); 1372 break; 1373 1374 case MLX5_VPORT_ACCESS_METHOD_HCA: 1375 ret = mlx5_query_hca_port(ibdev, port, props); 1376 break; 1377 1378 case MLX5_VPORT_ACCESS_METHOD_NIC: 1379 ret = mlx5_query_port_roce(ibdev, port, props); 1380 break; 1381 1382 default: 1383 ret = -EINVAL; 1384 } 1385 1386 if (!ret && props) { 1387 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1388 struct mlx5_core_dev *mdev; 1389 bool put_mdev = true; 1390 1391 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL); 1392 if (!mdev) { 1393 /* If the port isn't affiliated yet query the master. 1394 * The master and slave will have the same values. 1395 */ 1396 mdev = dev->mdev; 1397 port = 1; 1398 put_mdev = false; 1399 } 1400 count = mlx5_core_reserved_gids_count(mdev); 1401 if (put_mdev) 1402 mlx5_ib_put_native_port_mdev(dev, port); 1403 props->gid_tbl_len -= count; 1404 } 1405 return ret; 1406 } 1407 1408 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port, 1409 struct ib_port_attr *props) 1410 { 1411 int ret; 1412 1413 /* Only link layer == ethernet is valid for representors 1414 * and we always use port 1 1415 */ 1416 ret = mlx5_query_port_roce(ibdev, port, props); 1417 if (ret || !props) 1418 return ret; 1419 1420 /* We don't support GIDS */ 1421 props->gid_tbl_len = 0; 1422 1423 return ret; 1424 } 1425 1426 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 1427 union ib_gid *gid) 1428 { 1429 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1430 struct mlx5_core_dev *mdev = dev->mdev; 1431 1432 switch (mlx5_get_vport_access_method(ibdev)) { 1433 case MLX5_VPORT_ACCESS_METHOD_MAD: 1434 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); 1435 1436 case MLX5_VPORT_ACCESS_METHOD_HCA: 1437 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid); 1438 1439 default: 1440 return -EINVAL; 1441 } 1442 1443 } 1444 1445 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port, 1446 u16 index, u16 *pkey) 1447 { 1448 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1449 struct mlx5_core_dev *mdev; 1450 bool put_mdev = true; 1451 u8 mdev_port_num; 1452 int err; 1453 1454 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num); 1455 if (!mdev) { 1456 /* The port isn't affiliated yet, get the PKey from the master 1457 * port. For RoCE the PKey tables will be the same. 1458 */ 1459 put_mdev = false; 1460 mdev = dev->mdev; 1461 mdev_port_num = 1; 1462 } 1463 1464 err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0, 1465 index, pkey); 1466 if (put_mdev) 1467 mlx5_ib_put_native_port_mdev(dev, port); 1468 1469 return err; 1470 } 1471 1472 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1473 u16 *pkey) 1474 { 1475 switch (mlx5_get_vport_access_method(ibdev)) { 1476 case MLX5_VPORT_ACCESS_METHOD_MAD: 1477 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); 1478 1479 case MLX5_VPORT_ACCESS_METHOD_HCA: 1480 case MLX5_VPORT_ACCESS_METHOD_NIC: 1481 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey); 1482 default: 1483 return -EINVAL; 1484 } 1485 } 1486 1487 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, 1488 struct ib_device_modify *props) 1489 { 1490 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1491 struct mlx5_reg_node_desc in; 1492 struct mlx5_reg_node_desc out; 1493 int err; 1494 1495 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 1496 return -EOPNOTSUPP; 1497 1498 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) 1499 return 0; 1500 1501 /* 1502 * If possible, pass node desc to FW, so it can generate 1503 * a 144 trap. If cmd fails, just ignore. 1504 */ 1505 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 1506 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out, 1507 sizeof(out), MLX5_REG_NODE_DESC, 0, 1); 1508 if (err) 1509 return err; 1510 1511 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 1512 1513 return err; 1514 } 1515 1516 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask, 1517 u32 value) 1518 { 1519 struct mlx5_hca_vport_context ctx = {}; 1520 struct mlx5_core_dev *mdev; 1521 u8 mdev_port_num; 1522 int err; 1523 1524 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); 1525 if (!mdev) 1526 return -ENODEV; 1527 1528 err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx); 1529 if (err) 1530 goto out; 1531 1532 if (~ctx.cap_mask1_perm & mask) { 1533 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n", 1534 mask, ctx.cap_mask1_perm); 1535 err = -EINVAL; 1536 goto out; 1537 } 1538 1539 ctx.cap_mask1 = value; 1540 ctx.cap_mask1_perm = mask; 1541 err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num, 1542 0, &ctx); 1543 1544 out: 1545 mlx5_ib_put_native_port_mdev(dev, port_num); 1546 1547 return err; 1548 } 1549 1550 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 1551 struct ib_port_modify *props) 1552 { 1553 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1554 struct ib_port_attr attr; 1555 u32 tmp; 1556 int err; 1557 u32 change_mask; 1558 u32 value; 1559 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == 1560 IB_LINK_LAYER_INFINIBAND); 1561 1562 /* CM layer calls ib_modify_port() regardless of the link layer. For 1563 * Ethernet ports, qkey violation and Port capabilities are meaningless. 1564 */ 1565 if (!is_ib) 1566 return 0; 1567 1568 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { 1569 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; 1570 value = ~props->clr_port_cap_mask | props->set_port_cap_mask; 1571 return set_port_caps_atomic(dev, port, change_mask, value); 1572 } 1573 1574 mutex_lock(&dev->cap_mask_mutex); 1575 1576 err = ib_query_port(ibdev, port, &attr); 1577 if (err) 1578 goto out; 1579 1580 tmp = (attr.port_cap_flags | props->set_port_cap_mask) & 1581 ~props->clr_port_cap_mask; 1582 1583 err = mlx5_set_port_caps(dev->mdev, port, tmp); 1584 1585 out: 1586 mutex_unlock(&dev->cap_mask_mutex); 1587 return err; 1588 } 1589 1590 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps) 1591 { 1592 mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n", 1593 caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n"); 1594 } 1595 1596 static u16 calc_dynamic_bfregs(int uars_per_sys_page) 1597 { 1598 /* Large page with non 4k uar support might limit the dynamic size */ 1599 if (uars_per_sys_page == 1 && PAGE_SIZE > 4096) 1600 return MLX5_MIN_DYN_BFREGS; 1601 1602 return MLX5_MAX_DYN_BFREGS; 1603 } 1604 1605 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k, 1606 struct mlx5_ib_alloc_ucontext_req_v2 *req, 1607 struct mlx5_bfreg_info *bfregi) 1608 { 1609 int uars_per_sys_page; 1610 int bfregs_per_sys_page; 1611 int ref_bfregs = req->total_num_bfregs; 1612 1613 if (req->total_num_bfregs == 0) 1614 return -EINVAL; 1615 1616 BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE); 1617 BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE); 1618 1619 if (req->total_num_bfregs > MLX5_MAX_BFREGS) 1620 return -ENOMEM; 1621 1622 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k); 1623 bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR; 1624 /* This holds the required static allocation asked by the user */ 1625 req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page); 1626 if (req->num_low_latency_bfregs > req->total_num_bfregs - 1) 1627 return -EINVAL; 1628 1629 bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page; 1630 bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page); 1631 bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs; 1632 bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page; 1633 1634 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n", 1635 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no", 1636 lib_uar_4k ? "yes" : "no", ref_bfregs, 1637 req->total_num_bfregs, bfregi->total_num_bfregs, 1638 bfregi->num_sys_pages); 1639 1640 return 0; 1641 } 1642 1643 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) 1644 { 1645 struct mlx5_bfreg_info *bfregi; 1646 int err; 1647 int i; 1648 1649 bfregi = &context->bfregi; 1650 for (i = 0; i < bfregi->num_static_sys_pages; i++) { 1651 err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]); 1652 if (err) 1653 goto error; 1654 1655 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]); 1656 } 1657 1658 for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++) 1659 bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX; 1660 1661 return 0; 1662 1663 error: 1664 for (--i; i >= 0; i--) 1665 if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i])) 1666 mlx5_ib_warn(dev, "failed to free uar %d\n", i); 1667 1668 return err; 1669 } 1670 1671 static void deallocate_uars(struct mlx5_ib_dev *dev, 1672 struct mlx5_ib_ucontext *context) 1673 { 1674 struct mlx5_bfreg_info *bfregi; 1675 int i; 1676 1677 bfregi = &context->bfregi; 1678 for (i = 0; i < bfregi->num_sys_pages; i++) 1679 if (i < bfregi->num_static_sys_pages || 1680 bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) 1681 mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]); 1682 } 1683 1684 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) 1685 { 1686 int err = 0; 1687 1688 mutex_lock(&dev->lb.mutex); 1689 if (td) 1690 dev->lb.user_td++; 1691 if (qp) 1692 dev->lb.qps++; 1693 1694 if (dev->lb.user_td == 2 || 1695 dev->lb.qps == 1) { 1696 if (!dev->lb.enabled) { 1697 err = mlx5_nic_vport_update_local_lb(dev->mdev, true); 1698 dev->lb.enabled = true; 1699 } 1700 } 1701 1702 mutex_unlock(&dev->lb.mutex); 1703 1704 return err; 1705 } 1706 1707 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) 1708 { 1709 mutex_lock(&dev->lb.mutex); 1710 if (td) 1711 dev->lb.user_td--; 1712 if (qp) 1713 dev->lb.qps--; 1714 1715 if (dev->lb.user_td == 1 && 1716 dev->lb.qps == 0) { 1717 if (dev->lb.enabled) { 1718 mlx5_nic_vport_update_local_lb(dev->mdev, false); 1719 dev->lb.enabled = false; 1720 } 1721 } 1722 1723 mutex_unlock(&dev->lb.mutex); 1724 } 1725 1726 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn, 1727 u16 uid) 1728 { 1729 int err; 1730 1731 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1732 return 0; 1733 1734 err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid); 1735 if (err) 1736 return err; 1737 1738 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || 1739 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && 1740 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 1741 return err; 1742 1743 return mlx5_ib_enable_lb(dev, true, false); 1744 } 1745 1746 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn, 1747 u16 uid) 1748 { 1749 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1750 return; 1751 1752 mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid); 1753 1754 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || 1755 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && 1756 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 1757 return; 1758 1759 mlx5_ib_disable_lb(dev, true, false); 1760 } 1761 1762 static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, 1763 struct ib_udata *udata) 1764 { 1765 struct ib_device *ibdev = uctx->device; 1766 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1767 struct mlx5_ib_alloc_ucontext_req_v2 req = {}; 1768 struct mlx5_ib_alloc_ucontext_resp resp = {}; 1769 struct mlx5_core_dev *mdev = dev->mdev; 1770 struct mlx5_ib_ucontext *context = to_mucontext(uctx); 1771 struct mlx5_bfreg_info *bfregi; 1772 int ver; 1773 int err; 1774 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, 1775 max_cqe_version); 1776 u32 dump_fill_mkey; 1777 bool lib_uar_4k; 1778 1779 if (!dev->ib_active) 1780 return -EAGAIN; 1781 1782 if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) 1783 ver = 0; 1784 else if (udata->inlen >= min_req_v2) 1785 ver = 2; 1786 else 1787 return -EINVAL; 1788 1789 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); 1790 if (err) 1791 return err; 1792 1793 if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX) 1794 return -EOPNOTSUPP; 1795 1796 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) 1797 return -EOPNOTSUPP; 1798 1799 req.total_num_bfregs = ALIGN(req.total_num_bfregs, 1800 MLX5_NON_FP_BFREGS_PER_UAR); 1801 if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) 1802 return -EINVAL; 1803 1804 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); 1805 if (dev->wc_support) 1806 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); 1807 resp.cache_line_size = cache_line_size(); 1808 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); 1809 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); 1810 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1811 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1812 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 1813 resp.cqe_version = min_t(__u8, 1814 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), 1815 req.max_cqe_version); 1816 resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ? 1817 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT; 1818 resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? 1819 MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1; 1820 resp.response_length = min(offsetof(typeof(resp), response_length) + 1821 sizeof(resp.response_length), udata->outlen); 1822 1823 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) { 1824 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS)) 1825 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM; 1826 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA) 1827 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA; 1828 if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) 1829 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING; 1830 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN) 1831 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN; 1832 /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */ 1833 } 1834 1835 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; 1836 bfregi = &context->bfregi; 1837 1838 /* updates req->total_num_bfregs */ 1839 err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi); 1840 if (err) 1841 goto out_ctx; 1842 1843 mutex_init(&bfregi->lock); 1844 bfregi->lib_uar_4k = lib_uar_4k; 1845 bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count), 1846 GFP_KERNEL); 1847 if (!bfregi->count) { 1848 err = -ENOMEM; 1849 goto out_ctx; 1850 } 1851 1852 bfregi->sys_pages = kcalloc(bfregi->num_sys_pages, 1853 sizeof(*bfregi->sys_pages), 1854 GFP_KERNEL); 1855 if (!bfregi->sys_pages) { 1856 err = -ENOMEM; 1857 goto out_count; 1858 } 1859 1860 err = allocate_uars(dev, context); 1861 if (err) 1862 goto out_sys_pages; 1863 1864 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { 1865 err = mlx5_ib_devx_create(dev, true); 1866 if (err < 0) 1867 goto out_uars; 1868 context->devx_uid = err; 1869 } 1870 1871 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn, 1872 context->devx_uid); 1873 if (err) 1874 goto out_devx; 1875 1876 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) { 1877 err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey); 1878 if (err) 1879 goto out_mdev; 1880 } 1881 1882 INIT_LIST_HEAD(&context->db_page_list); 1883 mutex_init(&context->db_page_mutex); 1884 1885 resp.tot_bfregs = req.total_num_bfregs; 1886 resp.num_ports = dev->num_ports; 1887 1888 if (field_avail(typeof(resp), cqe_version, udata->outlen)) 1889 resp.response_length += sizeof(resp.cqe_version); 1890 1891 if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) { 1892 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE | 1893 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH; 1894 resp.response_length += sizeof(resp.cmds_supp_uhw); 1895 } 1896 1897 if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) { 1898 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) { 1899 mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline); 1900 resp.eth_min_inline++; 1901 } 1902 resp.response_length += sizeof(resp.eth_min_inline); 1903 } 1904 1905 if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) { 1906 if (mdev->clock_info) 1907 resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1); 1908 resp.response_length += sizeof(resp.clock_info_versions); 1909 } 1910 1911 /* 1912 * We don't want to expose information from the PCI bar that is located 1913 * after 4096 bytes, so if the arch only supports larger pages, let's 1914 * pretend we don't support reading the HCA's core clock. This is also 1915 * forced by mmap function. 1916 */ 1917 if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { 1918 if (PAGE_SIZE <= 4096) { 1919 resp.comp_mask |= 1920 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; 1921 resp.hca_core_clock_offset = 1922 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE; 1923 } 1924 resp.response_length += sizeof(resp.hca_core_clock_offset); 1925 } 1926 1927 if (field_avail(typeof(resp), log_uar_size, udata->outlen)) 1928 resp.response_length += sizeof(resp.log_uar_size); 1929 1930 if (field_avail(typeof(resp), num_uars_per_page, udata->outlen)) 1931 resp.response_length += sizeof(resp.num_uars_per_page); 1932 1933 if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) { 1934 resp.num_dyn_bfregs = bfregi->num_dyn_bfregs; 1935 resp.response_length += sizeof(resp.num_dyn_bfregs); 1936 } 1937 1938 if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) { 1939 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) { 1940 resp.dump_fill_mkey = dump_fill_mkey; 1941 resp.comp_mask |= 1942 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY; 1943 } 1944 resp.response_length += sizeof(resp.dump_fill_mkey); 1945 } 1946 1947 err = ib_copy_to_udata(udata, &resp, resp.response_length); 1948 if (err) 1949 goto out_mdev; 1950 1951 bfregi->ver = ver; 1952 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs; 1953 context->cqe_version = resp.cqe_version; 1954 context->lib_caps = req.lib_caps; 1955 print_lib_caps(dev, context->lib_caps); 1956 1957 if (dev->lag_active) { 1958 u8 port = mlx5_core_native_port_num(dev->mdev) - 1; 1959 1960 atomic_set(&context->tx_port_affinity, 1961 atomic_add_return( 1962 1, &dev->port[port].roce.tx_port_affinity)); 1963 } 1964 1965 return 0; 1966 1967 out_mdev: 1968 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); 1969 out_devx: 1970 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) 1971 mlx5_ib_devx_destroy(dev, context->devx_uid); 1972 1973 out_uars: 1974 deallocate_uars(dev, context); 1975 1976 out_sys_pages: 1977 kfree(bfregi->sys_pages); 1978 1979 out_count: 1980 kfree(bfregi->count); 1981 1982 out_ctx: 1983 return err; 1984 } 1985 1986 static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 1987 { 1988 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1989 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 1990 struct mlx5_bfreg_info *bfregi; 1991 1992 bfregi = &context->bfregi; 1993 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); 1994 1995 if (context->devx_uid) 1996 mlx5_ib_devx_destroy(dev, context->devx_uid); 1997 1998 deallocate_uars(dev, context); 1999 kfree(bfregi->sys_pages); 2000 kfree(bfregi->count); 2001 } 2002 2003 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, 2004 int uar_idx) 2005 { 2006 int fw_uars_per_page; 2007 2008 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; 2009 2010 return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page; 2011 } 2012 2013 static int get_command(unsigned long offset) 2014 { 2015 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; 2016 } 2017 2018 static int get_arg(unsigned long offset) 2019 { 2020 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); 2021 } 2022 2023 static int get_index(unsigned long offset) 2024 { 2025 return get_arg(offset); 2026 } 2027 2028 /* Index resides in an extra byte to enable larger values than 255 */ 2029 static int get_extended_index(unsigned long offset) 2030 { 2031 return get_arg(offset) | ((offset >> 16) & 0xff) << 8; 2032 } 2033 2034 2035 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) 2036 { 2037 } 2038 2039 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) 2040 { 2041 switch (cmd) { 2042 case MLX5_IB_MMAP_WC_PAGE: 2043 return "WC"; 2044 case MLX5_IB_MMAP_REGULAR_PAGE: 2045 return "best effort WC"; 2046 case MLX5_IB_MMAP_NC_PAGE: 2047 return "NC"; 2048 case MLX5_IB_MMAP_DEVICE_MEM: 2049 return "Device Memory"; 2050 default: 2051 return NULL; 2052 } 2053 } 2054 2055 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev, 2056 struct vm_area_struct *vma, 2057 struct mlx5_ib_ucontext *context) 2058 { 2059 if ((vma->vm_end - vma->vm_start != PAGE_SIZE) || 2060 !(vma->vm_flags & VM_SHARED)) 2061 return -EINVAL; 2062 2063 if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1) 2064 return -EOPNOTSUPP; 2065 2066 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 2067 return -EPERM; 2068 vma->vm_flags &= ~VM_MAYWRITE; 2069 2070 if (!dev->mdev->clock_info) 2071 return -EOPNOTSUPP; 2072 2073 return vm_insert_page(vma, vma->vm_start, 2074 virt_to_page(dev->mdev->clock_info)); 2075 } 2076 2077 static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry) 2078 { 2079 struct mlx5_user_mmap_entry *mentry = to_mmmap(entry); 2080 struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device); 2081 struct mlx5_ib_dm *mdm; 2082 2083 switch (mentry->mmap_flag) { 2084 case MLX5_IB_MMAP_TYPE_MEMIC: 2085 mdm = container_of(mentry, struct mlx5_ib_dm, mentry); 2086 mlx5_cmd_dealloc_memic(&dev->dm, mdm->dev_addr, 2087 mdm->size); 2088 kfree(mdm); 2089 break; 2090 default: 2091 WARN_ON(true); 2092 } 2093 } 2094 2095 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, 2096 struct vm_area_struct *vma, 2097 struct mlx5_ib_ucontext *context) 2098 { 2099 struct mlx5_bfreg_info *bfregi = &context->bfregi; 2100 int err; 2101 unsigned long idx; 2102 phys_addr_t pfn; 2103 pgprot_t prot; 2104 u32 bfreg_dyn_idx = 0; 2105 u32 uar_index; 2106 int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC); 2107 int max_valid_idx = dyn_uar ? bfregi->num_sys_pages : 2108 bfregi->num_static_sys_pages; 2109 2110 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 2111 return -EINVAL; 2112 2113 if (dyn_uar) 2114 idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages; 2115 else 2116 idx = get_index(vma->vm_pgoff); 2117 2118 if (idx >= max_valid_idx) { 2119 mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n", 2120 idx, max_valid_idx); 2121 return -EINVAL; 2122 } 2123 2124 switch (cmd) { 2125 case MLX5_IB_MMAP_WC_PAGE: 2126 case MLX5_IB_MMAP_ALLOC_WC: 2127 /* Some architectures don't support WC memory */ 2128 #if defined(CONFIG_X86) 2129 if (!pat_enabled()) 2130 return -EPERM; 2131 #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU))) 2132 return -EPERM; 2133 #endif 2134 /* fall through */ 2135 case MLX5_IB_MMAP_REGULAR_PAGE: 2136 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */ 2137 prot = pgprot_writecombine(vma->vm_page_prot); 2138 break; 2139 case MLX5_IB_MMAP_NC_PAGE: 2140 prot = pgprot_noncached(vma->vm_page_prot); 2141 break; 2142 default: 2143 return -EINVAL; 2144 } 2145 2146 if (dyn_uar) { 2147 int uars_per_page; 2148 2149 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k); 2150 bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR); 2151 if (bfreg_dyn_idx >= bfregi->total_num_bfregs) { 2152 mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n", 2153 bfreg_dyn_idx, bfregi->total_num_bfregs); 2154 return -EINVAL; 2155 } 2156 2157 mutex_lock(&bfregi->lock); 2158 /* Fail if uar already allocated, first bfreg index of each 2159 * page holds its count. 2160 */ 2161 if (bfregi->count[bfreg_dyn_idx]) { 2162 mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx); 2163 mutex_unlock(&bfregi->lock); 2164 return -EINVAL; 2165 } 2166 2167 bfregi->count[bfreg_dyn_idx]++; 2168 mutex_unlock(&bfregi->lock); 2169 2170 err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index); 2171 if (err) { 2172 mlx5_ib_warn(dev, "UAR alloc failed\n"); 2173 goto free_bfreg; 2174 } 2175 } else { 2176 uar_index = bfregi->sys_pages[idx]; 2177 } 2178 2179 pfn = uar_index2pfn(dev, uar_index); 2180 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); 2181 2182 err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE, 2183 prot, NULL); 2184 if (err) { 2185 mlx5_ib_err(dev, 2186 "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n", 2187 err, mmap_cmd2str(cmd)); 2188 goto err; 2189 } 2190 2191 if (dyn_uar) 2192 bfregi->sys_pages[idx] = uar_index; 2193 return 0; 2194 2195 err: 2196 if (!dyn_uar) 2197 return err; 2198 2199 mlx5_cmd_free_uar(dev->mdev, idx); 2200 2201 free_bfreg: 2202 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx); 2203 2204 return err; 2205 } 2206 2207 static int add_dm_mmap_entry(struct ib_ucontext *context, 2208 struct mlx5_ib_dm *mdm, 2209 u64 address) 2210 { 2211 mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC; 2212 mdm->mentry.address = address; 2213 return rdma_user_mmap_entry_insert_range( 2214 context, &mdm->mentry.rdma_entry, 2215 mdm->size, 2216 MLX5_IB_MMAP_DEVICE_MEM << 16, 2217 (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1); 2218 } 2219 2220 static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma) 2221 { 2222 unsigned long idx; 2223 u8 command; 2224 2225 command = get_command(vma->vm_pgoff); 2226 idx = get_extended_index(vma->vm_pgoff); 2227 2228 return (command << 16 | idx); 2229 } 2230 2231 static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev, 2232 struct vm_area_struct *vma, 2233 struct ib_ucontext *ucontext) 2234 { 2235 struct mlx5_user_mmap_entry *mentry; 2236 struct rdma_user_mmap_entry *entry; 2237 unsigned long pgoff; 2238 pgprot_t prot; 2239 phys_addr_t pfn; 2240 int ret; 2241 2242 pgoff = mlx5_vma_to_pgoff(vma); 2243 entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff); 2244 if (!entry) 2245 return -EINVAL; 2246 2247 mentry = to_mmmap(entry); 2248 pfn = (mentry->address >> PAGE_SHIFT); 2249 prot = pgprot_writecombine(vma->vm_page_prot); 2250 ret = rdma_user_mmap_io(ucontext, vma, pfn, 2251 entry->npages * PAGE_SIZE, 2252 prot, 2253 entry); 2254 rdma_user_mmap_entry_put(&mentry->rdma_entry); 2255 return ret; 2256 } 2257 2258 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) 2259 { 2260 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 2261 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 2262 unsigned long command; 2263 phys_addr_t pfn; 2264 2265 command = get_command(vma->vm_pgoff); 2266 switch (command) { 2267 case MLX5_IB_MMAP_WC_PAGE: 2268 case MLX5_IB_MMAP_NC_PAGE: 2269 case MLX5_IB_MMAP_REGULAR_PAGE: 2270 case MLX5_IB_MMAP_ALLOC_WC: 2271 return uar_mmap(dev, command, vma, context); 2272 2273 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: 2274 return -ENOSYS; 2275 2276 case MLX5_IB_MMAP_CORE_CLOCK: 2277 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 2278 return -EINVAL; 2279 2280 if (vma->vm_flags & VM_WRITE) 2281 return -EPERM; 2282 vma->vm_flags &= ~VM_MAYWRITE; 2283 2284 /* Don't expose to user-space information it shouldn't have */ 2285 if (PAGE_SIZE > 4096) 2286 return -EOPNOTSUPP; 2287 2288 pfn = (dev->mdev->iseg_base + 2289 offsetof(struct mlx5_init_seg, internal_timer_h)) >> 2290 PAGE_SHIFT; 2291 return rdma_user_mmap_io(&context->ibucontext, vma, pfn, 2292 PAGE_SIZE, 2293 pgprot_noncached(vma->vm_page_prot), 2294 NULL); 2295 case MLX5_IB_MMAP_CLOCK_INFO: 2296 return mlx5_ib_mmap_clock_info_page(dev, vma, context); 2297 2298 default: 2299 return mlx5_ib_mmap_offset(dev, vma, ibcontext); 2300 } 2301 2302 return 0; 2303 } 2304 2305 static inline int check_dm_type_support(struct mlx5_ib_dev *dev, 2306 u32 type) 2307 { 2308 switch (type) { 2309 case MLX5_IB_UAPI_DM_TYPE_MEMIC: 2310 if (!MLX5_CAP_DEV_MEM(dev->mdev, memic)) 2311 return -EOPNOTSUPP; 2312 break; 2313 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2314 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2315 if (!capable(CAP_SYS_RAWIO) || 2316 !capable(CAP_NET_RAW)) 2317 return -EPERM; 2318 2319 if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) || 2320 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner))) 2321 return -EOPNOTSUPP; 2322 break; 2323 } 2324 2325 return 0; 2326 } 2327 2328 static int handle_alloc_dm_memic(struct ib_ucontext *ctx, 2329 struct mlx5_ib_dm *dm, 2330 struct ib_dm_alloc_attr *attr, 2331 struct uverbs_attr_bundle *attrs) 2332 { 2333 struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm; 2334 u64 start_offset; 2335 u16 page_idx; 2336 int err; 2337 u64 address; 2338 2339 dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE); 2340 2341 err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr, 2342 dm->size, attr->alignment); 2343 if (err) 2344 return err; 2345 2346 address = dm->dev_addr & PAGE_MASK; 2347 err = add_dm_mmap_entry(ctx, dm, address); 2348 if (err) 2349 goto err_dealloc; 2350 2351 page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF; 2352 err = uverbs_copy_to(attrs, 2353 MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 2354 &page_idx, 2355 sizeof(page_idx)); 2356 if (err) 2357 goto err_copy; 2358 2359 start_offset = dm->dev_addr & ~PAGE_MASK; 2360 err = uverbs_copy_to(attrs, 2361 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 2362 &start_offset, sizeof(start_offset)); 2363 if (err) 2364 goto err_copy; 2365 2366 return 0; 2367 2368 err_copy: 2369 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry); 2370 err_dealloc: 2371 mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size); 2372 2373 return err; 2374 } 2375 2376 static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx, 2377 struct mlx5_ib_dm *dm, 2378 struct ib_dm_alloc_attr *attr, 2379 struct uverbs_attr_bundle *attrs, 2380 int type) 2381 { 2382 struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev; 2383 u64 act_size; 2384 int err; 2385 2386 /* Allocation size must a multiple of the basic block size 2387 * and a power of 2. 2388 */ 2389 act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev)); 2390 act_size = roundup_pow_of_two(act_size); 2391 2392 dm->size = act_size; 2393 err = mlx5_dm_sw_icm_alloc(dev, type, act_size, 2394 to_mucontext(ctx)->devx_uid, &dm->dev_addr, 2395 &dm->icm_dm.obj_id); 2396 if (err) 2397 return err; 2398 2399 err = uverbs_copy_to(attrs, 2400 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 2401 &dm->dev_addr, sizeof(dm->dev_addr)); 2402 if (err) 2403 mlx5_dm_sw_icm_dealloc(dev, type, dm->size, 2404 to_mucontext(ctx)->devx_uid, dm->dev_addr, 2405 dm->icm_dm.obj_id); 2406 2407 return err; 2408 } 2409 2410 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, 2411 struct ib_ucontext *context, 2412 struct ib_dm_alloc_attr *attr, 2413 struct uverbs_attr_bundle *attrs) 2414 { 2415 struct mlx5_ib_dm *dm; 2416 enum mlx5_ib_uapi_dm_type type; 2417 int err; 2418 2419 err = uverbs_get_const_default(&type, attrs, 2420 MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE, 2421 MLX5_IB_UAPI_DM_TYPE_MEMIC); 2422 if (err) 2423 return ERR_PTR(err); 2424 2425 mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n", 2426 type, attr->length, attr->alignment); 2427 2428 err = check_dm_type_support(to_mdev(ibdev), type); 2429 if (err) 2430 return ERR_PTR(err); 2431 2432 dm = kzalloc(sizeof(*dm), GFP_KERNEL); 2433 if (!dm) 2434 return ERR_PTR(-ENOMEM); 2435 2436 dm->type = type; 2437 2438 switch (type) { 2439 case MLX5_IB_UAPI_DM_TYPE_MEMIC: 2440 err = handle_alloc_dm_memic(context, dm, 2441 attr, 2442 attrs); 2443 break; 2444 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2445 err = handle_alloc_dm_sw_icm(context, dm, 2446 attr, attrs, 2447 MLX5_SW_ICM_TYPE_STEERING); 2448 break; 2449 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2450 err = handle_alloc_dm_sw_icm(context, dm, 2451 attr, attrs, 2452 MLX5_SW_ICM_TYPE_HEADER_MODIFY); 2453 break; 2454 default: 2455 err = -EOPNOTSUPP; 2456 } 2457 2458 if (err) 2459 goto err_free; 2460 2461 return &dm->ibdm; 2462 2463 err_free: 2464 kfree(dm); 2465 return ERR_PTR(err); 2466 } 2467 2468 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs) 2469 { 2470 struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context( 2471 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 2472 struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev; 2473 struct mlx5_ib_dm *dm = to_mdm(ibdm); 2474 int ret; 2475 2476 switch (dm->type) { 2477 case MLX5_IB_UAPI_DM_TYPE_MEMIC: 2478 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry); 2479 return 0; 2480 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2481 ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING, 2482 dm->size, ctx->devx_uid, dm->dev_addr, 2483 dm->icm_dm.obj_id); 2484 if (ret) 2485 return ret; 2486 break; 2487 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2488 ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY, 2489 dm->size, ctx->devx_uid, dm->dev_addr, 2490 dm->icm_dm.obj_id); 2491 if (ret) 2492 return ret; 2493 break; 2494 default: 2495 return -EOPNOTSUPP; 2496 } 2497 2498 kfree(dm); 2499 2500 return 0; 2501 } 2502 2503 static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 2504 { 2505 struct mlx5_ib_pd *pd = to_mpd(ibpd); 2506 struct ib_device *ibdev = ibpd->device; 2507 struct mlx5_ib_alloc_pd_resp resp; 2508 int err; 2509 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; 2510 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; 2511 u16 uid = 0; 2512 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( 2513 udata, struct mlx5_ib_ucontext, ibucontext); 2514 2515 uid = context ? context->devx_uid : 0; 2516 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); 2517 MLX5_SET(alloc_pd_in, in, uid, uid); 2518 err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in), 2519 out, sizeof(out)); 2520 if (err) 2521 return err; 2522 2523 pd->pdn = MLX5_GET(alloc_pd_out, out, pd); 2524 pd->uid = uid; 2525 if (udata) { 2526 resp.pdn = pd->pdn; 2527 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 2528 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid); 2529 return -EFAULT; 2530 } 2531 } 2532 2533 return 0; 2534 } 2535 2536 static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) 2537 { 2538 struct mlx5_ib_dev *mdev = to_mdev(pd->device); 2539 struct mlx5_ib_pd *mpd = to_mpd(pd); 2540 2541 mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid); 2542 } 2543 2544 enum { 2545 MATCH_CRITERIA_ENABLE_OUTER_BIT, 2546 MATCH_CRITERIA_ENABLE_MISC_BIT, 2547 MATCH_CRITERIA_ENABLE_INNER_BIT, 2548 MATCH_CRITERIA_ENABLE_MISC2_BIT 2549 }; 2550 2551 #define HEADER_IS_ZERO(match_criteria, headers) \ 2552 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 2553 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 2554 2555 static u8 get_match_criteria_enable(u32 *match_criteria) 2556 { 2557 u8 match_criteria_enable; 2558 2559 match_criteria_enable = 2560 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 2561 MATCH_CRITERIA_ENABLE_OUTER_BIT; 2562 match_criteria_enable |= 2563 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 2564 MATCH_CRITERIA_ENABLE_MISC_BIT; 2565 match_criteria_enable |= 2566 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 2567 MATCH_CRITERIA_ENABLE_INNER_BIT; 2568 match_criteria_enable |= 2569 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) << 2570 MATCH_CRITERIA_ENABLE_MISC2_BIT; 2571 2572 return match_criteria_enable; 2573 } 2574 2575 static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) 2576 { 2577 u8 entry_mask; 2578 u8 entry_val; 2579 int err = 0; 2580 2581 if (!mask) 2582 goto out; 2583 2584 entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c, 2585 ip_protocol); 2586 entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v, 2587 ip_protocol); 2588 if (!entry_mask) { 2589 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); 2590 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 2591 goto out; 2592 } 2593 /* Don't override existing ip protocol */ 2594 if (mask != entry_mask || val != entry_val) 2595 err = -EINVAL; 2596 out: 2597 return err; 2598 } 2599 2600 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val, 2601 bool inner) 2602 { 2603 if (inner) { 2604 MLX5_SET(fte_match_set_misc, 2605 misc_c, inner_ipv6_flow_label, mask); 2606 MLX5_SET(fte_match_set_misc, 2607 misc_v, inner_ipv6_flow_label, val); 2608 } else { 2609 MLX5_SET(fte_match_set_misc, 2610 misc_c, outer_ipv6_flow_label, mask); 2611 MLX5_SET(fte_match_set_misc, 2612 misc_v, outer_ipv6_flow_label, val); 2613 } 2614 } 2615 2616 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) 2617 { 2618 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask); 2619 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val); 2620 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2); 2621 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2); 2622 } 2623 2624 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask) 2625 { 2626 if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) && 2627 !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL)) 2628 return -EOPNOTSUPP; 2629 2630 if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) && 2631 !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP)) 2632 return -EOPNOTSUPP; 2633 2634 if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) && 2635 !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS)) 2636 return -EOPNOTSUPP; 2637 2638 if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) && 2639 !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL)) 2640 return -EOPNOTSUPP; 2641 2642 return 0; 2643 } 2644 2645 #define LAST_ETH_FIELD vlan_tag 2646 #define LAST_IB_FIELD sl 2647 #define LAST_IPV4_FIELD tos 2648 #define LAST_IPV6_FIELD traffic_class 2649 #define LAST_TCP_UDP_FIELD src_port 2650 #define LAST_TUNNEL_FIELD tunnel_id 2651 #define LAST_FLOW_TAG_FIELD tag_id 2652 #define LAST_DROP_FIELD size 2653 #define LAST_COUNTERS_FIELD counters 2654 2655 /* Field is the last supported field */ 2656 #define FIELDS_NOT_SUPPORTED(filter, field)\ 2657 memchr_inv((void *)&filter.field +\ 2658 sizeof(filter.field), 0,\ 2659 sizeof(filter) -\ 2660 offsetof(typeof(filter), field) -\ 2661 sizeof(filter.field)) 2662 2663 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, 2664 bool is_egress, 2665 struct mlx5_flow_act *action) 2666 { 2667 2668 switch (maction->ib_action.type) { 2669 case IB_FLOW_ACTION_ESP: 2670 if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 2671 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)) 2672 return -EINVAL; 2673 /* Currently only AES_GCM keymat is supported by the driver */ 2674 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx; 2675 action->action |= is_egress ? 2676 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT : 2677 MLX5_FLOW_CONTEXT_ACTION_DECRYPT; 2678 return 0; 2679 case IB_FLOW_ACTION_UNSPECIFIED: 2680 if (maction->flow_action_raw.sub_type == 2681 MLX5_IB_FLOW_ACTION_MODIFY_HEADER) { 2682 if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 2683 return -EINVAL; 2684 action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 2685 action->modify_hdr = 2686 maction->flow_action_raw.modify_hdr; 2687 return 0; 2688 } 2689 if (maction->flow_action_raw.sub_type == 2690 MLX5_IB_FLOW_ACTION_DECAP) { 2691 if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) 2692 return -EINVAL; 2693 action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 2694 return 0; 2695 } 2696 if (maction->flow_action_raw.sub_type == 2697 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) { 2698 if (action->action & 2699 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) 2700 return -EINVAL; 2701 action->action |= 2702 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 2703 action->pkt_reformat = 2704 maction->flow_action_raw.pkt_reformat; 2705 return 0; 2706 } 2707 /* fall through */ 2708 default: 2709 return -EOPNOTSUPP; 2710 } 2711 } 2712 2713 static int parse_flow_attr(struct mlx5_core_dev *mdev, 2714 struct mlx5_flow_spec *spec, 2715 const union ib_flow_spec *ib_spec, 2716 const struct ib_flow_attr *flow_attr, 2717 struct mlx5_flow_act *action, u32 prev_type) 2718 { 2719 struct mlx5_flow_context *flow_context = &spec->flow_context; 2720 u32 *match_c = spec->match_criteria; 2721 u32 *match_v = spec->match_value; 2722 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, 2723 misc_parameters); 2724 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, 2725 misc_parameters); 2726 void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c, 2727 misc_parameters_2); 2728 void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v, 2729 misc_parameters_2); 2730 void *headers_c; 2731 void *headers_v; 2732 int match_ipv; 2733 int ret; 2734 2735 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 2736 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 2737 inner_headers); 2738 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 2739 inner_headers); 2740 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2741 ft_field_support.inner_ip_version); 2742 } else { 2743 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 2744 outer_headers); 2745 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 2746 outer_headers); 2747 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2748 ft_field_support.outer_ip_version); 2749 } 2750 2751 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 2752 case IB_FLOW_SPEC_ETH: 2753 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) 2754 return -EOPNOTSUPP; 2755 2756 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2757 dmac_47_16), 2758 ib_spec->eth.mask.dst_mac); 2759 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2760 dmac_47_16), 2761 ib_spec->eth.val.dst_mac); 2762 2763 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2764 smac_47_16), 2765 ib_spec->eth.mask.src_mac); 2766 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2767 smac_47_16), 2768 ib_spec->eth.val.src_mac); 2769 2770 if (ib_spec->eth.mask.vlan_tag) { 2771 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2772 cvlan_tag, 1); 2773 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2774 cvlan_tag, 1); 2775 2776 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2777 first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); 2778 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2779 first_vid, ntohs(ib_spec->eth.val.vlan_tag)); 2780 2781 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2782 first_cfi, 2783 ntohs(ib_spec->eth.mask.vlan_tag) >> 12); 2784 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2785 first_cfi, 2786 ntohs(ib_spec->eth.val.vlan_tag) >> 12); 2787 2788 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2789 first_prio, 2790 ntohs(ib_spec->eth.mask.vlan_tag) >> 13); 2791 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2792 first_prio, 2793 ntohs(ib_spec->eth.val.vlan_tag) >> 13); 2794 } 2795 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2796 ethertype, ntohs(ib_spec->eth.mask.ether_type)); 2797 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2798 ethertype, ntohs(ib_spec->eth.val.ether_type)); 2799 break; 2800 case IB_FLOW_SPEC_IPV4: 2801 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) 2802 return -EOPNOTSUPP; 2803 2804 if (match_ipv) { 2805 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2806 ip_version, 0xf); 2807 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2808 ip_version, MLX5_FS_IPV4_VERSION); 2809 } else { 2810 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2811 ethertype, 0xffff); 2812 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2813 ethertype, ETH_P_IP); 2814 } 2815 2816 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2817 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2818 &ib_spec->ipv4.mask.src_ip, 2819 sizeof(ib_spec->ipv4.mask.src_ip)); 2820 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2821 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2822 &ib_spec->ipv4.val.src_ip, 2823 sizeof(ib_spec->ipv4.val.src_ip)); 2824 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2825 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2826 &ib_spec->ipv4.mask.dst_ip, 2827 sizeof(ib_spec->ipv4.mask.dst_ip)); 2828 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2829 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2830 &ib_spec->ipv4.val.dst_ip, 2831 sizeof(ib_spec->ipv4.val.dst_ip)); 2832 2833 set_tos(headers_c, headers_v, 2834 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); 2835 2836 if (set_proto(headers_c, headers_v, 2837 ib_spec->ipv4.mask.proto, 2838 ib_spec->ipv4.val.proto)) 2839 return -EINVAL; 2840 break; 2841 case IB_FLOW_SPEC_IPV6: 2842 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) 2843 return -EOPNOTSUPP; 2844 2845 if (match_ipv) { 2846 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2847 ip_version, 0xf); 2848 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2849 ip_version, MLX5_FS_IPV6_VERSION); 2850 } else { 2851 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2852 ethertype, 0xffff); 2853 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2854 ethertype, ETH_P_IPV6); 2855 } 2856 2857 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2858 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2859 &ib_spec->ipv6.mask.src_ip, 2860 sizeof(ib_spec->ipv6.mask.src_ip)); 2861 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2862 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2863 &ib_spec->ipv6.val.src_ip, 2864 sizeof(ib_spec->ipv6.val.src_ip)); 2865 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2866 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2867 &ib_spec->ipv6.mask.dst_ip, 2868 sizeof(ib_spec->ipv6.mask.dst_ip)); 2869 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2870 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2871 &ib_spec->ipv6.val.dst_ip, 2872 sizeof(ib_spec->ipv6.val.dst_ip)); 2873 2874 set_tos(headers_c, headers_v, 2875 ib_spec->ipv6.mask.traffic_class, 2876 ib_spec->ipv6.val.traffic_class); 2877 2878 if (set_proto(headers_c, headers_v, 2879 ib_spec->ipv6.mask.next_hdr, 2880 ib_spec->ipv6.val.next_hdr)) 2881 return -EINVAL; 2882 2883 set_flow_label(misc_params_c, misc_params_v, 2884 ntohl(ib_spec->ipv6.mask.flow_label), 2885 ntohl(ib_spec->ipv6.val.flow_label), 2886 ib_spec->type & IB_FLOW_SPEC_INNER); 2887 break; 2888 case IB_FLOW_SPEC_ESP: 2889 if (ib_spec->esp.mask.seq) 2890 return -EOPNOTSUPP; 2891 2892 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 2893 ntohl(ib_spec->esp.mask.spi)); 2894 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 2895 ntohl(ib_spec->esp.val.spi)); 2896 break; 2897 case IB_FLOW_SPEC_TCP: 2898 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 2899 LAST_TCP_UDP_FIELD)) 2900 return -EOPNOTSUPP; 2901 2902 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP)) 2903 return -EINVAL; 2904 2905 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport, 2906 ntohs(ib_spec->tcp_udp.mask.src_port)); 2907 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, 2908 ntohs(ib_spec->tcp_udp.val.src_port)); 2909 2910 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport, 2911 ntohs(ib_spec->tcp_udp.mask.dst_port)); 2912 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport, 2913 ntohs(ib_spec->tcp_udp.val.dst_port)); 2914 break; 2915 case IB_FLOW_SPEC_UDP: 2916 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 2917 LAST_TCP_UDP_FIELD)) 2918 return -EOPNOTSUPP; 2919 2920 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP)) 2921 return -EINVAL; 2922 2923 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, 2924 ntohs(ib_spec->tcp_udp.mask.src_port)); 2925 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, 2926 ntohs(ib_spec->tcp_udp.val.src_port)); 2927 2928 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, 2929 ntohs(ib_spec->tcp_udp.mask.dst_port)); 2930 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 2931 ntohs(ib_spec->tcp_udp.val.dst_port)); 2932 break; 2933 case IB_FLOW_SPEC_GRE: 2934 if (ib_spec->gre.mask.c_ks_res0_ver) 2935 return -EOPNOTSUPP; 2936 2937 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE)) 2938 return -EINVAL; 2939 2940 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 2941 0xff); 2942 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 2943 IPPROTO_GRE); 2944 2945 MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol, 2946 ntohs(ib_spec->gre.mask.protocol)); 2947 MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol, 2948 ntohs(ib_spec->gre.val.protocol)); 2949 2950 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, 2951 gre_key.nvgre.hi), 2952 &ib_spec->gre.mask.key, 2953 sizeof(ib_spec->gre.mask.key)); 2954 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v, 2955 gre_key.nvgre.hi), 2956 &ib_spec->gre.val.key, 2957 sizeof(ib_spec->gre.val.key)); 2958 break; 2959 case IB_FLOW_SPEC_MPLS: 2960 switch (prev_type) { 2961 case IB_FLOW_SPEC_UDP: 2962 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2963 ft_field_support.outer_first_mpls_over_udp), 2964 &ib_spec->mpls.mask.tag)) 2965 return -EOPNOTSUPP; 2966 2967 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 2968 outer_first_mpls_over_udp), 2969 &ib_spec->mpls.val.tag, 2970 sizeof(ib_spec->mpls.val.tag)); 2971 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 2972 outer_first_mpls_over_udp), 2973 &ib_spec->mpls.mask.tag, 2974 sizeof(ib_spec->mpls.mask.tag)); 2975 break; 2976 case IB_FLOW_SPEC_GRE: 2977 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2978 ft_field_support.outer_first_mpls_over_gre), 2979 &ib_spec->mpls.mask.tag)) 2980 return -EOPNOTSUPP; 2981 2982 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 2983 outer_first_mpls_over_gre), 2984 &ib_spec->mpls.val.tag, 2985 sizeof(ib_spec->mpls.val.tag)); 2986 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 2987 outer_first_mpls_over_gre), 2988 &ib_spec->mpls.mask.tag, 2989 sizeof(ib_spec->mpls.mask.tag)); 2990 break; 2991 default: 2992 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 2993 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2994 ft_field_support.inner_first_mpls), 2995 &ib_spec->mpls.mask.tag)) 2996 return -EOPNOTSUPP; 2997 2998 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 2999 inner_first_mpls), 3000 &ib_spec->mpls.val.tag, 3001 sizeof(ib_spec->mpls.val.tag)); 3002 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 3003 inner_first_mpls), 3004 &ib_spec->mpls.mask.tag, 3005 sizeof(ib_spec->mpls.mask.tag)); 3006 } else { 3007 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 3008 ft_field_support.outer_first_mpls), 3009 &ib_spec->mpls.mask.tag)) 3010 return -EOPNOTSUPP; 3011 3012 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 3013 outer_first_mpls), 3014 &ib_spec->mpls.val.tag, 3015 sizeof(ib_spec->mpls.val.tag)); 3016 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 3017 outer_first_mpls), 3018 &ib_spec->mpls.mask.tag, 3019 sizeof(ib_spec->mpls.mask.tag)); 3020 } 3021 } 3022 break; 3023 case IB_FLOW_SPEC_VXLAN_TUNNEL: 3024 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask, 3025 LAST_TUNNEL_FIELD)) 3026 return -EOPNOTSUPP; 3027 3028 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni, 3029 ntohl(ib_spec->tunnel.mask.tunnel_id)); 3030 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni, 3031 ntohl(ib_spec->tunnel.val.tunnel_id)); 3032 break; 3033 case IB_FLOW_SPEC_ACTION_TAG: 3034 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag, 3035 LAST_FLOW_TAG_FIELD)) 3036 return -EOPNOTSUPP; 3037 if (ib_spec->flow_tag.tag_id >= BIT(24)) 3038 return -EINVAL; 3039 3040 flow_context->flow_tag = ib_spec->flow_tag.tag_id; 3041 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 3042 break; 3043 case IB_FLOW_SPEC_ACTION_DROP: 3044 if (FIELDS_NOT_SUPPORTED(ib_spec->drop, 3045 LAST_DROP_FIELD)) 3046 return -EOPNOTSUPP; 3047 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 3048 break; 3049 case IB_FLOW_SPEC_ACTION_HANDLE: 3050 ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act), 3051 flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action); 3052 if (ret) 3053 return ret; 3054 break; 3055 case IB_FLOW_SPEC_ACTION_COUNT: 3056 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count, 3057 LAST_COUNTERS_FIELD)) 3058 return -EOPNOTSUPP; 3059 3060 /* for now support only one counters spec per flow */ 3061 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 3062 return -EINVAL; 3063 3064 action->counters = ib_spec->flow_count.counters; 3065 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 3066 break; 3067 default: 3068 return -EINVAL; 3069 } 3070 3071 return 0; 3072 } 3073 3074 /* If a flow could catch both multicast and unicast packets, 3075 * it won't fall into the multicast flow steering table and this rule 3076 * could steal other multicast packets. 3077 */ 3078 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr) 3079 { 3080 union ib_flow_spec *flow_spec; 3081 3082 if (ib_attr->type != IB_FLOW_ATTR_NORMAL || 3083 ib_attr->num_of_specs < 1) 3084 return false; 3085 3086 flow_spec = (union ib_flow_spec *)(ib_attr + 1); 3087 if (flow_spec->type == IB_FLOW_SPEC_IPV4) { 3088 struct ib_flow_spec_ipv4 *ipv4_spec; 3089 3090 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec; 3091 if (ipv4_is_multicast(ipv4_spec->val.dst_ip)) 3092 return true; 3093 3094 return false; 3095 } 3096 3097 if (flow_spec->type == IB_FLOW_SPEC_ETH) { 3098 struct ib_flow_spec_eth *eth_spec; 3099 3100 eth_spec = (struct ib_flow_spec_eth *)flow_spec; 3101 return is_multicast_ether_addr(eth_spec->mask.dst_mac) && 3102 is_multicast_ether_addr(eth_spec->val.dst_mac); 3103 } 3104 3105 return false; 3106 } 3107 3108 enum valid_spec { 3109 VALID_SPEC_INVALID, 3110 VALID_SPEC_VALID, 3111 VALID_SPEC_NA, 3112 }; 3113 3114 static enum valid_spec 3115 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev, 3116 const struct mlx5_flow_spec *spec, 3117 const struct mlx5_flow_act *flow_act, 3118 bool egress) 3119 { 3120 const u32 *match_c = spec->match_criteria; 3121 bool is_crypto = 3122 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 3123 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)); 3124 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c); 3125 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP; 3126 3127 /* 3128 * Currently only crypto is supported in egress, when regular egress 3129 * rules would be supported, always return VALID_SPEC_NA. 3130 */ 3131 if (!is_crypto) 3132 return VALID_SPEC_NA; 3133 3134 return is_crypto && is_ipsec && 3135 (!egress || (!is_drop && 3136 !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ? 3137 VALID_SPEC_VALID : VALID_SPEC_INVALID; 3138 } 3139 3140 static bool is_valid_spec(struct mlx5_core_dev *mdev, 3141 const struct mlx5_flow_spec *spec, 3142 const struct mlx5_flow_act *flow_act, 3143 bool egress) 3144 { 3145 /* We curretly only support ipsec egress flow */ 3146 return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID; 3147 } 3148 3149 static bool is_valid_ethertype(struct mlx5_core_dev *mdev, 3150 const struct ib_flow_attr *flow_attr, 3151 bool check_inner) 3152 { 3153 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1); 3154 int match_ipv = check_inner ? 3155 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 3156 ft_field_support.inner_ip_version) : 3157 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 3158 ft_field_support.outer_ip_version); 3159 int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0; 3160 bool ipv4_spec_valid, ipv6_spec_valid; 3161 unsigned int ip_spec_type = 0; 3162 bool has_ethertype = false; 3163 unsigned int spec_index; 3164 bool mask_valid = true; 3165 u16 eth_type = 0; 3166 bool type_valid; 3167 3168 /* Validate that ethertype is correct */ 3169 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 3170 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) && 3171 ib_spec->eth.mask.ether_type) { 3172 mask_valid = (ib_spec->eth.mask.ether_type == 3173 htons(0xffff)); 3174 has_ethertype = true; 3175 eth_type = ntohs(ib_spec->eth.val.ether_type); 3176 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) || 3177 (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) { 3178 ip_spec_type = ib_spec->type; 3179 } 3180 ib_spec = (void *)ib_spec + ib_spec->size; 3181 } 3182 3183 type_valid = (!has_ethertype) || (!ip_spec_type); 3184 if (!type_valid && mask_valid) { 3185 ipv4_spec_valid = (eth_type == ETH_P_IP) && 3186 (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit)); 3187 ipv6_spec_valid = (eth_type == ETH_P_IPV6) && 3188 (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit)); 3189 3190 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) || 3191 (((eth_type == ETH_P_MPLS_UC) || 3192 (eth_type == ETH_P_MPLS_MC)) && match_ipv); 3193 } 3194 3195 return type_valid; 3196 } 3197 3198 static bool is_valid_attr(struct mlx5_core_dev *mdev, 3199 const struct ib_flow_attr *flow_attr) 3200 { 3201 return is_valid_ethertype(mdev, flow_attr, false) && 3202 is_valid_ethertype(mdev, flow_attr, true); 3203 } 3204 3205 static void put_flow_table(struct mlx5_ib_dev *dev, 3206 struct mlx5_ib_flow_prio *prio, bool ft_added) 3207 { 3208 prio->refcount -= !!ft_added; 3209 if (!prio->refcount) { 3210 mlx5_destroy_flow_table(prio->flow_table); 3211 prio->flow_table = NULL; 3212 } 3213 } 3214 3215 static void counters_clear_description(struct ib_counters *counters) 3216 { 3217 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 3218 3219 mutex_lock(&mcounters->mcntrs_mutex); 3220 kfree(mcounters->counters_data); 3221 mcounters->counters_data = NULL; 3222 mcounters->cntrs_max_index = 0; 3223 mutex_unlock(&mcounters->mcntrs_mutex); 3224 } 3225 3226 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) 3227 { 3228 struct mlx5_ib_flow_handler *handler = container_of(flow_id, 3229 struct mlx5_ib_flow_handler, 3230 ibflow); 3231 struct mlx5_ib_flow_handler *iter, *tmp; 3232 struct mlx5_ib_dev *dev = handler->dev; 3233 3234 mutex_lock(&dev->flow_db->lock); 3235 3236 list_for_each_entry_safe(iter, tmp, &handler->list, list) { 3237 mlx5_del_flow_rules(iter->rule); 3238 put_flow_table(dev, iter->prio, true); 3239 list_del(&iter->list); 3240 kfree(iter); 3241 } 3242 3243 mlx5_del_flow_rules(handler->rule); 3244 put_flow_table(dev, handler->prio, true); 3245 if (handler->ibcounters && 3246 atomic_read(&handler->ibcounters->usecnt) == 1) 3247 counters_clear_description(handler->ibcounters); 3248 3249 mutex_unlock(&dev->flow_db->lock); 3250 if (handler->flow_matcher) 3251 atomic_dec(&handler->flow_matcher->usecnt); 3252 kfree(handler); 3253 3254 return 0; 3255 } 3256 3257 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap) 3258 { 3259 priority *= 2; 3260 if (!dont_trap) 3261 priority++; 3262 return priority; 3263 } 3264 3265 enum flow_table_type { 3266 MLX5_IB_FT_RX, 3267 MLX5_IB_FT_TX 3268 }; 3269 3270 #define MLX5_FS_MAX_TYPES 6 3271 #define MLX5_FS_MAX_ENTRIES BIT(16) 3272 3273 static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns, 3274 struct mlx5_ib_flow_prio *prio, 3275 int priority, 3276 int num_entries, int num_groups, 3277 u32 flags) 3278 { 3279 struct mlx5_flow_table *ft; 3280 3281 ft = mlx5_create_auto_grouped_flow_table(ns, priority, 3282 num_entries, 3283 num_groups, 3284 0, flags); 3285 if (IS_ERR(ft)) 3286 return ERR_CAST(ft); 3287 3288 prio->flow_table = ft; 3289 prio->refcount = 0; 3290 return prio; 3291 } 3292 3293 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, 3294 struct ib_flow_attr *flow_attr, 3295 enum flow_table_type ft_type) 3296 { 3297 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP; 3298 struct mlx5_flow_namespace *ns = NULL; 3299 struct mlx5_ib_flow_prio *prio; 3300 struct mlx5_flow_table *ft; 3301 int max_table_size; 3302 int num_entries; 3303 int num_groups; 3304 bool esw_encap; 3305 u32 flags = 0; 3306 int priority; 3307 3308 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3309 log_max_ft_size)); 3310 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != 3311 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 3312 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 3313 enum mlx5_flow_namespace_type fn_type; 3314 3315 if (flow_is_multicast_only(flow_attr) && 3316 !dont_trap) 3317 priority = MLX5_IB_FLOW_MCAST_PRIO; 3318 else 3319 priority = ib_prio_to_core_prio(flow_attr->priority, 3320 dont_trap); 3321 if (ft_type == MLX5_IB_FT_RX) { 3322 fn_type = MLX5_FLOW_NAMESPACE_BYPASS; 3323 prio = &dev->flow_db->prios[priority]; 3324 if (!dev->is_rep && !esw_encap && 3325 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap)) 3326 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 3327 if (!dev->is_rep && !esw_encap && 3328 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3329 reformat_l3_tunnel_to_l2)) 3330 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 3331 } else { 3332 max_table_size = 3333 BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, 3334 log_max_ft_size)); 3335 fn_type = MLX5_FLOW_NAMESPACE_EGRESS; 3336 prio = &dev->flow_db->egress_prios[priority]; 3337 if (!dev->is_rep && !esw_encap && 3338 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat)) 3339 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 3340 } 3341 ns = mlx5_get_flow_namespace(dev->mdev, fn_type); 3342 num_entries = MLX5_FS_MAX_ENTRIES; 3343 num_groups = MLX5_FS_MAX_TYPES; 3344 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3345 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 3346 ns = mlx5_get_flow_namespace(dev->mdev, 3347 MLX5_FLOW_NAMESPACE_LEFTOVERS); 3348 build_leftovers_ft_param(&priority, 3349 &num_entries, 3350 &num_groups); 3351 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; 3352 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 3353 if (!MLX5_CAP_FLOWTABLE(dev->mdev, 3354 allow_sniffer_and_nic_rx_shared_tir)) 3355 return ERR_PTR(-ENOTSUPP); 3356 3357 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ? 3358 MLX5_FLOW_NAMESPACE_SNIFFER_RX : 3359 MLX5_FLOW_NAMESPACE_SNIFFER_TX); 3360 3361 prio = &dev->flow_db->sniffer[ft_type]; 3362 priority = 0; 3363 num_entries = 1; 3364 num_groups = 1; 3365 } 3366 3367 if (!ns) 3368 return ERR_PTR(-ENOTSUPP); 3369 3370 max_table_size = min_t(int, num_entries, max_table_size); 3371 3372 ft = prio->flow_table; 3373 if (!ft) 3374 return _get_prio(ns, prio, priority, max_table_size, num_groups, 3375 flags); 3376 3377 return prio; 3378 } 3379 3380 static void set_underlay_qp(struct mlx5_ib_dev *dev, 3381 struct mlx5_flow_spec *spec, 3382 u32 underlay_qpn) 3383 { 3384 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, 3385 spec->match_criteria, 3386 misc_parameters); 3387 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 3388 misc_parameters); 3389 3390 if (underlay_qpn && 3391 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3392 ft_field_support.bth_dst_qp)) { 3393 MLX5_SET(fte_match_set_misc, 3394 misc_params_v, bth_dst_qp, underlay_qpn); 3395 MLX5_SET(fte_match_set_misc, 3396 misc_params_c, bth_dst_qp, 0xffffff); 3397 } 3398 } 3399 3400 static int read_flow_counters(struct ib_device *ibdev, 3401 struct mlx5_read_counters_attr *read_attr) 3402 { 3403 struct mlx5_fc *fc = read_attr->hw_cntrs_hndl; 3404 struct mlx5_ib_dev *dev = to_mdev(ibdev); 3405 3406 return mlx5_fc_query(dev->mdev, fc, 3407 &read_attr->out[IB_COUNTER_PACKETS], 3408 &read_attr->out[IB_COUNTER_BYTES]); 3409 } 3410 3411 /* flow counters currently expose two counters packets and bytes */ 3412 #define FLOW_COUNTERS_NUM 2 3413 static int counters_set_description(struct ib_counters *counters, 3414 enum mlx5_ib_counters_type counters_type, 3415 struct mlx5_ib_flow_counters_desc *desc_data, 3416 u32 ncounters) 3417 { 3418 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 3419 u32 cntrs_max_index = 0; 3420 int i; 3421 3422 if (counters_type != MLX5_IB_COUNTERS_FLOW) 3423 return -EINVAL; 3424 3425 /* init the fields for the object */ 3426 mcounters->type = counters_type; 3427 mcounters->read_counters = read_flow_counters; 3428 mcounters->counters_num = FLOW_COUNTERS_NUM; 3429 mcounters->ncounters = ncounters; 3430 /* each counter entry have both description and index pair */ 3431 for (i = 0; i < ncounters; i++) { 3432 if (desc_data[i].description > IB_COUNTER_BYTES) 3433 return -EINVAL; 3434 3435 if (cntrs_max_index <= desc_data[i].index) 3436 cntrs_max_index = desc_data[i].index + 1; 3437 } 3438 3439 mutex_lock(&mcounters->mcntrs_mutex); 3440 mcounters->counters_data = desc_data; 3441 mcounters->cntrs_max_index = cntrs_max_index; 3442 mutex_unlock(&mcounters->mcntrs_mutex); 3443 3444 return 0; 3445 } 3446 3447 #define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2)) 3448 static int flow_counters_set_data(struct ib_counters *ibcounters, 3449 struct mlx5_ib_create_flow *ucmd) 3450 { 3451 struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters); 3452 struct mlx5_ib_flow_counters_data *cntrs_data = NULL; 3453 struct mlx5_ib_flow_counters_desc *desc_data = NULL; 3454 bool hw_hndl = false; 3455 int ret = 0; 3456 3457 if (ucmd && ucmd->ncounters_data != 0) { 3458 cntrs_data = ucmd->data; 3459 if (cntrs_data->ncounters > MAX_COUNTERS_NUM) 3460 return -EINVAL; 3461 3462 desc_data = kcalloc(cntrs_data->ncounters, 3463 sizeof(*desc_data), 3464 GFP_KERNEL); 3465 if (!desc_data) 3466 return -ENOMEM; 3467 3468 if (copy_from_user(desc_data, 3469 u64_to_user_ptr(cntrs_data->counters_data), 3470 sizeof(*desc_data) * cntrs_data->ncounters)) { 3471 ret = -EFAULT; 3472 goto free; 3473 } 3474 } 3475 3476 if (!mcounters->hw_cntrs_hndl) { 3477 mcounters->hw_cntrs_hndl = mlx5_fc_create( 3478 to_mdev(ibcounters->device)->mdev, false); 3479 if (IS_ERR(mcounters->hw_cntrs_hndl)) { 3480 ret = PTR_ERR(mcounters->hw_cntrs_hndl); 3481 goto free; 3482 } 3483 hw_hndl = true; 3484 } 3485 3486 if (desc_data) { 3487 /* counters already bound to at least one flow */ 3488 if (mcounters->cntrs_max_index) { 3489 ret = -EINVAL; 3490 goto free_hndl; 3491 } 3492 3493 ret = counters_set_description(ibcounters, 3494 MLX5_IB_COUNTERS_FLOW, 3495 desc_data, 3496 cntrs_data->ncounters); 3497 if (ret) 3498 goto free_hndl; 3499 3500 } else if (!mcounters->cntrs_max_index) { 3501 /* counters not bound yet, must have udata passed */ 3502 ret = -EINVAL; 3503 goto free_hndl; 3504 } 3505 3506 return 0; 3507 3508 free_hndl: 3509 if (hw_hndl) { 3510 mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev, 3511 mcounters->hw_cntrs_hndl); 3512 mcounters->hw_cntrs_hndl = NULL; 3513 } 3514 free: 3515 kfree(desc_data); 3516 return ret; 3517 } 3518 3519 static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev, 3520 struct mlx5_flow_spec *spec, 3521 struct mlx5_eswitch_rep *rep) 3522 { 3523 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; 3524 void *misc; 3525 3526 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 3527 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 3528 misc_parameters_2); 3529 3530 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 3531 mlx5_eswitch_get_vport_metadata_for_match(esw, 3532 rep->vport)); 3533 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 3534 misc_parameters_2); 3535 3536 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0); 3537 } else { 3538 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 3539 misc_parameters); 3540 3541 MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport); 3542 3543 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 3544 misc_parameters); 3545 3546 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 3547 } 3548 } 3549 3550 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, 3551 struct mlx5_ib_flow_prio *ft_prio, 3552 const struct ib_flow_attr *flow_attr, 3553 struct mlx5_flow_destination *dst, 3554 u32 underlay_qpn, 3555 struct mlx5_ib_create_flow *ucmd) 3556 { 3557 struct mlx5_flow_table *ft = ft_prio->flow_table; 3558 struct mlx5_ib_flow_handler *handler; 3559 struct mlx5_flow_act flow_act = {}; 3560 struct mlx5_flow_spec *spec; 3561 struct mlx5_flow_destination dest_arr[2] = {}; 3562 struct mlx5_flow_destination *rule_dst = dest_arr; 3563 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); 3564 unsigned int spec_index; 3565 u32 prev_type = 0; 3566 int err = 0; 3567 int dest_num = 0; 3568 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 3569 3570 if (!is_valid_attr(dev->mdev, flow_attr)) 3571 return ERR_PTR(-EINVAL); 3572 3573 if (dev->is_rep && is_egress) 3574 return ERR_PTR(-EINVAL); 3575 3576 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 3577 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 3578 if (!handler || !spec) { 3579 err = -ENOMEM; 3580 goto free; 3581 } 3582 3583 INIT_LIST_HEAD(&handler->list); 3584 3585 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 3586 err = parse_flow_attr(dev->mdev, spec, 3587 ib_flow, flow_attr, &flow_act, 3588 prev_type); 3589 if (err < 0) 3590 goto free; 3591 3592 prev_type = ((union ib_flow_spec *)ib_flow)->type; 3593 ib_flow += ((union ib_flow_spec *)ib_flow)->size; 3594 } 3595 3596 if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) { 3597 memcpy(&dest_arr[0], dst, sizeof(*dst)); 3598 dest_num++; 3599 } 3600 3601 if (!flow_is_multicast_only(flow_attr)) 3602 set_underlay_qp(dev, spec, underlay_qpn); 3603 3604 if (dev->is_rep) { 3605 struct mlx5_eswitch_rep *rep; 3606 3607 rep = dev->port[flow_attr->port - 1].rep; 3608 if (!rep) { 3609 err = -EINVAL; 3610 goto free; 3611 } 3612 3613 mlx5_ib_set_rule_source_port(dev, spec, rep); 3614 } 3615 3616 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); 3617 3618 if (is_egress && 3619 !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) { 3620 err = -EINVAL; 3621 goto free; 3622 } 3623 3624 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 3625 struct mlx5_ib_mcounters *mcounters; 3626 3627 err = flow_counters_set_data(flow_act.counters, ucmd); 3628 if (err) 3629 goto free; 3630 3631 mcounters = to_mcounters(flow_act.counters); 3632 handler->ibcounters = flow_act.counters; 3633 dest_arr[dest_num].type = 3634 MLX5_FLOW_DESTINATION_TYPE_COUNTER; 3635 dest_arr[dest_num].counter_id = 3636 mlx5_fc_id(mcounters->hw_cntrs_hndl); 3637 dest_num++; 3638 } 3639 3640 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { 3641 if (!dest_num) 3642 rule_dst = NULL; 3643 } else { 3644 if (is_egress) 3645 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 3646 else 3647 flow_act.action |= 3648 dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : 3649 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 3650 } 3651 3652 if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) && 3653 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3654 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 3655 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n", 3656 spec->flow_context.flow_tag, flow_attr->type); 3657 err = -EINVAL; 3658 goto free; 3659 } 3660 handler->rule = mlx5_add_flow_rules(ft, spec, 3661 &flow_act, 3662 rule_dst, dest_num); 3663 3664 if (IS_ERR(handler->rule)) { 3665 err = PTR_ERR(handler->rule); 3666 goto free; 3667 } 3668 3669 ft_prio->refcount++; 3670 handler->prio = ft_prio; 3671 handler->dev = dev; 3672 3673 ft_prio->flow_table = ft; 3674 free: 3675 if (err && handler) { 3676 if (handler->ibcounters && 3677 atomic_read(&handler->ibcounters->usecnt) == 1) 3678 counters_clear_description(handler->ibcounters); 3679 kfree(handler); 3680 } 3681 kvfree(spec); 3682 return err ? ERR_PTR(err) : handler; 3683 } 3684 3685 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, 3686 struct mlx5_ib_flow_prio *ft_prio, 3687 const struct ib_flow_attr *flow_attr, 3688 struct mlx5_flow_destination *dst) 3689 { 3690 return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL); 3691 } 3692 3693 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev, 3694 struct mlx5_ib_flow_prio *ft_prio, 3695 struct ib_flow_attr *flow_attr, 3696 struct mlx5_flow_destination *dst) 3697 { 3698 struct mlx5_ib_flow_handler *handler_dst = NULL; 3699 struct mlx5_ib_flow_handler *handler = NULL; 3700 3701 handler = create_flow_rule(dev, ft_prio, flow_attr, NULL); 3702 if (!IS_ERR(handler)) { 3703 handler_dst = create_flow_rule(dev, ft_prio, 3704 flow_attr, dst); 3705 if (IS_ERR(handler_dst)) { 3706 mlx5_del_flow_rules(handler->rule); 3707 ft_prio->refcount--; 3708 kfree(handler); 3709 handler = handler_dst; 3710 } else { 3711 list_add(&handler_dst->list, &handler->list); 3712 } 3713 } 3714 3715 return handler; 3716 } 3717 enum { 3718 LEFTOVERS_MC, 3719 LEFTOVERS_UC, 3720 }; 3721 3722 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev, 3723 struct mlx5_ib_flow_prio *ft_prio, 3724 struct ib_flow_attr *flow_attr, 3725 struct mlx5_flow_destination *dst) 3726 { 3727 struct mlx5_ib_flow_handler *handler_ucast = NULL; 3728 struct mlx5_ib_flow_handler *handler = NULL; 3729 3730 static struct { 3731 struct ib_flow_attr flow_attr; 3732 struct ib_flow_spec_eth eth_flow; 3733 } leftovers_specs[] = { 3734 [LEFTOVERS_MC] = { 3735 .flow_attr = { 3736 .num_of_specs = 1, 3737 .size = sizeof(leftovers_specs[0]) 3738 }, 3739 .eth_flow = { 3740 .type = IB_FLOW_SPEC_ETH, 3741 .size = sizeof(struct ib_flow_spec_eth), 3742 .mask = {.dst_mac = {0x1} }, 3743 .val = {.dst_mac = {0x1} } 3744 } 3745 }, 3746 [LEFTOVERS_UC] = { 3747 .flow_attr = { 3748 .num_of_specs = 1, 3749 .size = sizeof(leftovers_specs[0]) 3750 }, 3751 .eth_flow = { 3752 .type = IB_FLOW_SPEC_ETH, 3753 .size = sizeof(struct ib_flow_spec_eth), 3754 .mask = {.dst_mac = {0x1} }, 3755 .val = {.dst_mac = {} } 3756 } 3757 } 3758 }; 3759 3760 handler = create_flow_rule(dev, ft_prio, 3761 &leftovers_specs[LEFTOVERS_MC].flow_attr, 3762 dst); 3763 if (!IS_ERR(handler) && 3764 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) { 3765 handler_ucast = create_flow_rule(dev, ft_prio, 3766 &leftovers_specs[LEFTOVERS_UC].flow_attr, 3767 dst); 3768 if (IS_ERR(handler_ucast)) { 3769 mlx5_del_flow_rules(handler->rule); 3770 ft_prio->refcount--; 3771 kfree(handler); 3772 handler = handler_ucast; 3773 } else { 3774 list_add(&handler_ucast->list, &handler->list); 3775 } 3776 } 3777 3778 return handler; 3779 } 3780 3781 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, 3782 struct mlx5_ib_flow_prio *ft_rx, 3783 struct mlx5_ib_flow_prio *ft_tx, 3784 struct mlx5_flow_destination *dst) 3785 { 3786 struct mlx5_ib_flow_handler *handler_rx; 3787 struct mlx5_ib_flow_handler *handler_tx; 3788 int err; 3789 static const struct ib_flow_attr flow_attr = { 3790 .num_of_specs = 0, 3791 .size = sizeof(flow_attr) 3792 }; 3793 3794 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst); 3795 if (IS_ERR(handler_rx)) { 3796 err = PTR_ERR(handler_rx); 3797 goto err; 3798 } 3799 3800 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst); 3801 if (IS_ERR(handler_tx)) { 3802 err = PTR_ERR(handler_tx); 3803 goto err_tx; 3804 } 3805 3806 list_add(&handler_tx->list, &handler_rx->list); 3807 3808 return handler_rx; 3809 3810 err_tx: 3811 mlx5_del_flow_rules(handler_rx->rule); 3812 ft_rx->refcount--; 3813 kfree(handler_rx); 3814 err: 3815 return ERR_PTR(err); 3816 } 3817 3818 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, 3819 struct ib_flow_attr *flow_attr, 3820 int domain, 3821 struct ib_udata *udata) 3822 { 3823 struct mlx5_ib_dev *dev = to_mdev(qp->device); 3824 struct mlx5_ib_qp *mqp = to_mqp(qp); 3825 struct mlx5_ib_flow_handler *handler = NULL; 3826 struct mlx5_flow_destination *dst = NULL; 3827 struct mlx5_ib_flow_prio *ft_prio_tx = NULL; 3828 struct mlx5_ib_flow_prio *ft_prio; 3829 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 3830 struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr; 3831 size_t min_ucmd_sz, required_ucmd_sz; 3832 int err; 3833 int underlay_qpn; 3834 3835 if (udata && udata->inlen) { 3836 min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) + 3837 sizeof(ucmd_hdr.reserved); 3838 if (udata->inlen < min_ucmd_sz) 3839 return ERR_PTR(-EOPNOTSUPP); 3840 3841 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz); 3842 if (err) 3843 return ERR_PTR(err); 3844 3845 /* currently supports only one counters data */ 3846 if (ucmd_hdr.ncounters_data > 1) 3847 return ERR_PTR(-EINVAL); 3848 3849 required_ucmd_sz = min_ucmd_sz + 3850 sizeof(struct mlx5_ib_flow_counters_data) * 3851 ucmd_hdr.ncounters_data; 3852 if (udata->inlen > required_ucmd_sz && 3853 !ib_is_udata_cleared(udata, required_ucmd_sz, 3854 udata->inlen - required_ucmd_sz)) 3855 return ERR_PTR(-EOPNOTSUPP); 3856 3857 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL); 3858 if (!ucmd) 3859 return ERR_PTR(-ENOMEM); 3860 3861 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz); 3862 if (err) 3863 goto free_ucmd; 3864 } 3865 3866 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) { 3867 err = -ENOMEM; 3868 goto free_ucmd; 3869 } 3870 3871 if (domain != IB_FLOW_DOMAIN_USER || 3872 flow_attr->port > dev->num_ports || 3873 (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | 3874 IB_FLOW_ATTR_FLAGS_EGRESS))) { 3875 err = -EINVAL; 3876 goto free_ucmd; 3877 } 3878 3879 if (is_egress && 3880 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3881 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 3882 err = -EINVAL; 3883 goto free_ucmd; 3884 } 3885 3886 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 3887 if (!dst) { 3888 err = -ENOMEM; 3889 goto free_ucmd; 3890 } 3891 3892 mutex_lock(&dev->flow_db->lock); 3893 3894 ft_prio = get_flow_table(dev, flow_attr, 3895 is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX); 3896 if (IS_ERR(ft_prio)) { 3897 err = PTR_ERR(ft_prio); 3898 goto unlock; 3899 } 3900 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 3901 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX); 3902 if (IS_ERR(ft_prio_tx)) { 3903 err = PTR_ERR(ft_prio_tx); 3904 ft_prio_tx = NULL; 3905 goto destroy_ft; 3906 } 3907 } 3908 3909 if (is_egress) { 3910 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT; 3911 } else { 3912 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; 3913 if (mqp->flags & MLX5_IB_QP_RSS) 3914 dst->tir_num = mqp->rss_qp.tirn; 3915 else 3916 dst->tir_num = mqp->raw_packet_qp.rq.tirn; 3917 } 3918 3919 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 3920 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { 3921 handler = create_dont_trap_rule(dev, ft_prio, 3922 flow_attr, dst); 3923 } else { 3924 underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ? 3925 mqp->underlay_qpn : 0; 3926 handler = _create_flow_rule(dev, ft_prio, flow_attr, 3927 dst, underlay_qpn, ucmd); 3928 } 3929 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3930 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 3931 handler = create_leftovers_rule(dev, ft_prio, flow_attr, 3932 dst); 3933 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 3934 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst); 3935 } else { 3936 err = -EINVAL; 3937 goto destroy_ft; 3938 } 3939 3940 if (IS_ERR(handler)) { 3941 err = PTR_ERR(handler); 3942 handler = NULL; 3943 goto destroy_ft; 3944 } 3945 3946 mutex_unlock(&dev->flow_db->lock); 3947 kfree(dst); 3948 kfree(ucmd); 3949 3950 return &handler->ibflow; 3951 3952 destroy_ft: 3953 put_flow_table(dev, ft_prio, false); 3954 if (ft_prio_tx) 3955 put_flow_table(dev, ft_prio_tx, false); 3956 unlock: 3957 mutex_unlock(&dev->flow_db->lock); 3958 kfree(dst); 3959 free_ucmd: 3960 kfree(ucmd); 3961 return ERR_PTR(err); 3962 } 3963 3964 static struct mlx5_ib_flow_prio * 3965 _get_flow_table(struct mlx5_ib_dev *dev, 3966 struct mlx5_ib_flow_matcher *fs_matcher, 3967 bool mcast) 3968 { 3969 struct mlx5_flow_namespace *ns = NULL; 3970 struct mlx5_ib_flow_prio *prio = NULL; 3971 int max_table_size = 0; 3972 bool esw_encap; 3973 u32 flags = 0; 3974 int priority; 3975 3976 if (mcast) 3977 priority = MLX5_IB_FLOW_MCAST_PRIO; 3978 else 3979 priority = ib_prio_to_core_prio(fs_matcher->priority, false); 3980 3981 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != 3982 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 3983 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) { 3984 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3985 log_max_ft_size)); 3986 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap) 3987 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 3988 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3989 reformat_l3_tunnel_to_l2) && 3990 !esw_encap) 3991 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 3992 } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) { 3993 max_table_size = BIT( 3994 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size)); 3995 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && !esw_encap) 3996 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 3997 } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) { 3998 max_table_size = BIT( 3999 MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size)); 4000 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap) 4001 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 4002 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, reformat_l3_tunnel_to_l2) && 4003 esw_encap) 4004 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 4005 priority = FDB_BYPASS_PATH; 4006 } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) { 4007 max_table_size = 4008 BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, 4009 log_max_ft_size)); 4010 priority = fs_matcher->priority; 4011 } 4012 4013 max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES); 4014 4015 ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type); 4016 if (!ns) 4017 return ERR_PTR(-ENOTSUPP); 4018 4019 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) 4020 prio = &dev->flow_db->prios[priority]; 4021 else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) 4022 prio = &dev->flow_db->egress_prios[priority]; 4023 else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) 4024 prio = &dev->flow_db->fdb; 4025 else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) 4026 prio = &dev->flow_db->rdma_rx[priority]; 4027 4028 if (!prio) 4029 return ERR_PTR(-EINVAL); 4030 4031 if (prio->flow_table) 4032 return prio; 4033 4034 return _get_prio(ns, prio, priority, max_table_size, 4035 MLX5_FS_MAX_TYPES, flags); 4036 } 4037 4038 static struct mlx5_ib_flow_handler * 4039 _create_raw_flow_rule(struct mlx5_ib_dev *dev, 4040 struct mlx5_ib_flow_prio *ft_prio, 4041 struct mlx5_flow_destination *dst, 4042 struct mlx5_ib_flow_matcher *fs_matcher, 4043 struct mlx5_flow_context *flow_context, 4044 struct mlx5_flow_act *flow_act, 4045 void *cmd_in, int inlen, 4046 int dst_num) 4047 { 4048 struct mlx5_ib_flow_handler *handler; 4049 struct mlx5_flow_spec *spec; 4050 struct mlx5_flow_table *ft = ft_prio->flow_table; 4051 int err = 0; 4052 4053 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 4054 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 4055 if (!handler || !spec) { 4056 err = -ENOMEM; 4057 goto free; 4058 } 4059 4060 INIT_LIST_HEAD(&handler->list); 4061 4062 memcpy(spec->match_value, cmd_in, inlen); 4063 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params, 4064 fs_matcher->mask_len); 4065 spec->match_criteria_enable = fs_matcher->match_criteria_enable; 4066 spec->flow_context = *flow_context; 4067 4068 handler->rule = mlx5_add_flow_rules(ft, spec, 4069 flow_act, dst, dst_num); 4070 4071 if (IS_ERR(handler->rule)) { 4072 err = PTR_ERR(handler->rule); 4073 goto free; 4074 } 4075 4076 ft_prio->refcount++; 4077 handler->prio = ft_prio; 4078 handler->dev = dev; 4079 ft_prio->flow_table = ft; 4080 4081 free: 4082 if (err) 4083 kfree(handler); 4084 kvfree(spec); 4085 return err ? ERR_PTR(err) : handler; 4086 } 4087 4088 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher, 4089 void *match_v) 4090 { 4091 void *match_c; 4092 void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4; 4093 void *dmac, *dmac_mask; 4094 void *ipv4, *ipv4_mask; 4095 4096 if (!(fs_matcher->match_criteria_enable & 4097 (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT))) 4098 return false; 4099 4100 match_c = fs_matcher->matcher_mask.match_params; 4101 match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v, 4102 outer_headers); 4103 match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c, 4104 outer_headers); 4105 4106 dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 4107 dmac_47_16); 4108 dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 4109 dmac_47_16); 4110 4111 if (is_multicast_ether_addr(dmac) && 4112 is_multicast_ether_addr(dmac_mask)) 4113 return true; 4114 4115 ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 4116 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 4117 4118 ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 4119 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 4120 4121 if (ipv4_is_multicast(*(__be32 *)(ipv4)) && 4122 ipv4_is_multicast(*(__be32 *)(ipv4_mask))) 4123 return true; 4124 4125 return false; 4126 } 4127 4128 struct mlx5_ib_flow_handler * 4129 mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev, 4130 struct mlx5_ib_flow_matcher *fs_matcher, 4131 struct mlx5_flow_context *flow_context, 4132 struct mlx5_flow_act *flow_act, 4133 u32 counter_id, 4134 void *cmd_in, int inlen, int dest_id, 4135 int dest_type) 4136 { 4137 struct mlx5_flow_destination *dst; 4138 struct mlx5_ib_flow_prio *ft_prio; 4139 struct mlx5_ib_flow_handler *handler; 4140 int dst_num = 0; 4141 bool mcast; 4142 int err; 4143 4144 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL) 4145 return ERR_PTR(-EOPNOTSUPP); 4146 4147 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO) 4148 return ERR_PTR(-ENOMEM); 4149 4150 dst = kcalloc(2, sizeof(*dst), GFP_KERNEL); 4151 if (!dst) 4152 return ERR_PTR(-ENOMEM); 4153 4154 mcast = raw_fs_is_multicast(fs_matcher, cmd_in); 4155 mutex_lock(&dev->flow_db->lock); 4156 4157 ft_prio = _get_flow_table(dev, fs_matcher, mcast); 4158 if (IS_ERR(ft_prio)) { 4159 err = PTR_ERR(ft_prio); 4160 goto unlock; 4161 } 4162 4163 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) { 4164 dst[dst_num].type = dest_type; 4165 dst[dst_num].tir_num = dest_id; 4166 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 4167 } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) { 4168 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM; 4169 dst[dst_num].ft_num = dest_id; 4170 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 4171 } else { 4172 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT; 4173 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 4174 } 4175 4176 dst_num++; 4177 4178 if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 4179 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 4180 dst[dst_num].counter_id = counter_id; 4181 dst_num++; 4182 } 4183 4184 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, 4185 flow_context, flow_act, 4186 cmd_in, inlen, dst_num); 4187 4188 if (IS_ERR(handler)) { 4189 err = PTR_ERR(handler); 4190 goto destroy_ft; 4191 } 4192 4193 mutex_unlock(&dev->flow_db->lock); 4194 atomic_inc(&fs_matcher->usecnt); 4195 handler->flow_matcher = fs_matcher; 4196 4197 kfree(dst); 4198 4199 return handler; 4200 4201 destroy_ft: 4202 put_flow_table(dev, ft_prio, false); 4203 unlock: 4204 mutex_unlock(&dev->flow_db->lock); 4205 kfree(dst); 4206 4207 return ERR_PTR(err); 4208 } 4209 4210 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags) 4211 { 4212 u32 flags = 0; 4213 4214 if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA) 4215 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA; 4216 4217 return flags; 4218 } 4219 4220 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA 4221 static struct ib_flow_action * 4222 mlx5_ib_create_flow_action_esp(struct ib_device *device, 4223 const struct ib_flow_action_attrs_esp *attr, 4224 struct uverbs_attr_bundle *attrs) 4225 { 4226 struct mlx5_ib_dev *mdev = to_mdev(device); 4227 struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm; 4228 struct mlx5_accel_esp_xfrm_attrs accel_attrs = {}; 4229 struct mlx5_ib_flow_action *action; 4230 u64 action_flags; 4231 u64 flags; 4232 int err = 0; 4233 4234 err = uverbs_get_flags64( 4235 &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 4236 ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1)); 4237 if (err) 4238 return ERR_PTR(err); 4239 4240 flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags); 4241 4242 /* We current only support a subset of the standard features. Only a 4243 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn 4244 * (with overlap). Full offload mode isn't supported. 4245 */ 4246 if (!attr->keymat || attr->replay || attr->encap || 4247 attr->spi || attr->seq || attr->tfc_pad || 4248 attr->hard_limit_pkts || 4249 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 4250 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT))) 4251 return ERR_PTR(-EOPNOTSUPP); 4252 4253 if (attr->keymat->protocol != 4254 IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM) 4255 return ERR_PTR(-EOPNOTSUPP); 4256 4257 aes_gcm = &attr->keymat->keymat.aes_gcm; 4258 4259 if (aes_gcm->icv_len != 16 || 4260 aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ) 4261 return ERR_PTR(-EOPNOTSUPP); 4262 4263 action = kmalloc(sizeof(*action), GFP_KERNEL); 4264 if (!action) 4265 return ERR_PTR(-ENOMEM); 4266 4267 action->esp_aes_gcm.ib_flags = attr->flags; 4268 memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key, 4269 sizeof(accel_attrs.keymat.aes_gcm.aes_key)); 4270 accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8; 4271 memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt, 4272 sizeof(accel_attrs.keymat.aes_gcm.salt)); 4273 memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv, 4274 sizeof(accel_attrs.keymat.aes_gcm.seq_iv)); 4275 accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8; 4276 accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ; 4277 accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM; 4278 4279 accel_attrs.esn = attr->esn; 4280 if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) 4281 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; 4282 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 4283 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 4284 4285 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT) 4286 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT; 4287 4288 action->esp_aes_gcm.ctx = 4289 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags); 4290 if (IS_ERR(action->esp_aes_gcm.ctx)) { 4291 err = PTR_ERR(action->esp_aes_gcm.ctx); 4292 goto err_parse; 4293 } 4294 4295 action->esp_aes_gcm.ib_flags = attr->flags; 4296 4297 return &action->ib_action; 4298 4299 err_parse: 4300 kfree(action); 4301 return ERR_PTR(err); 4302 } 4303 4304 static int 4305 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action, 4306 const struct ib_flow_action_attrs_esp *attr, 4307 struct uverbs_attr_bundle *attrs) 4308 { 4309 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 4310 struct mlx5_accel_esp_xfrm_attrs accel_attrs; 4311 int err = 0; 4312 4313 if (attr->keymat || attr->replay || attr->encap || 4314 attr->spi || attr->seq || attr->tfc_pad || 4315 attr->hard_limit_pkts || 4316 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 4317 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS | 4318 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))) 4319 return -EOPNOTSUPP; 4320 4321 /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can 4322 * be modified. 4323 */ 4324 if (!(maction->esp_aes_gcm.ib_flags & 4325 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) && 4326 attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 4327 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)) 4328 return -EINVAL; 4329 4330 memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs, 4331 sizeof(accel_attrs)); 4332 4333 accel_attrs.esn = attr->esn; 4334 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 4335 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 4336 else 4337 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 4338 4339 err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx, 4340 &accel_attrs); 4341 if (err) 4342 return err; 4343 4344 maction->esp_aes_gcm.ib_flags &= 4345 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 4346 maction->esp_aes_gcm.ib_flags |= 4347 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 4348 4349 return 0; 4350 } 4351 4352 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action) 4353 { 4354 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 4355 4356 switch (action->type) { 4357 case IB_FLOW_ACTION_ESP: 4358 /* 4359 * We only support aes_gcm by now, so we implicitly know this is 4360 * the underline crypto. 4361 */ 4362 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx); 4363 break; 4364 case IB_FLOW_ACTION_UNSPECIFIED: 4365 mlx5_ib_destroy_flow_action_raw(maction); 4366 break; 4367 default: 4368 WARN_ON(true); 4369 break; 4370 } 4371 4372 kfree(maction); 4373 return 0; 4374 } 4375 4376 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 4377 { 4378 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4379 struct mlx5_ib_qp *mqp = to_mqp(ibqp); 4380 int err; 4381 u16 uid; 4382 4383 uid = ibqp->pd ? 4384 to_mpd(ibqp->pd)->uid : 0; 4385 4386 if (mqp->flags & MLX5_IB_QP_UNDERLAY) { 4387 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n"); 4388 return -EOPNOTSUPP; 4389 } 4390 4391 err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid); 4392 if (err) 4393 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", 4394 ibqp->qp_num, gid->raw); 4395 4396 return err; 4397 } 4398 4399 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 4400 { 4401 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4402 int err; 4403 u16 uid; 4404 4405 uid = ibqp->pd ? 4406 to_mpd(ibqp->pd)->uid : 0; 4407 err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid); 4408 if (err) 4409 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", 4410 ibqp->qp_num, gid->raw); 4411 4412 return err; 4413 } 4414 4415 static int init_node_data(struct mlx5_ib_dev *dev) 4416 { 4417 int err; 4418 4419 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); 4420 if (err) 4421 return err; 4422 4423 dev->mdev->rev_id = dev->mdev->pdev->revision; 4424 4425 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); 4426 } 4427 4428 static ssize_t fw_pages_show(struct device *device, 4429 struct device_attribute *attr, char *buf) 4430 { 4431 struct mlx5_ib_dev *dev = 4432 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4433 4434 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages); 4435 } 4436 static DEVICE_ATTR_RO(fw_pages); 4437 4438 static ssize_t reg_pages_show(struct device *device, 4439 struct device_attribute *attr, char *buf) 4440 { 4441 struct mlx5_ib_dev *dev = 4442 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4443 4444 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages)); 4445 } 4446 static DEVICE_ATTR_RO(reg_pages); 4447 4448 static ssize_t hca_type_show(struct device *device, 4449 struct device_attribute *attr, char *buf) 4450 { 4451 struct mlx5_ib_dev *dev = 4452 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4453 4454 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device); 4455 } 4456 static DEVICE_ATTR_RO(hca_type); 4457 4458 static ssize_t hw_rev_show(struct device *device, 4459 struct device_attribute *attr, char *buf) 4460 { 4461 struct mlx5_ib_dev *dev = 4462 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4463 4464 return sprintf(buf, "%x\n", dev->mdev->rev_id); 4465 } 4466 static DEVICE_ATTR_RO(hw_rev); 4467 4468 static ssize_t board_id_show(struct device *device, 4469 struct device_attribute *attr, char *buf) 4470 { 4471 struct mlx5_ib_dev *dev = 4472 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4473 4474 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, 4475 dev->mdev->board_id); 4476 } 4477 static DEVICE_ATTR_RO(board_id); 4478 4479 static struct attribute *mlx5_class_attributes[] = { 4480 &dev_attr_hw_rev.attr, 4481 &dev_attr_hca_type.attr, 4482 &dev_attr_board_id.attr, 4483 &dev_attr_fw_pages.attr, 4484 &dev_attr_reg_pages.attr, 4485 NULL, 4486 }; 4487 4488 static const struct attribute_group mlx5_attr_group = { 4489 .attrs = mlx5_class_attributes, 4490 }; 4491 4492 static void pkey_change_handler(struct work_struct *work) 4493 { 4494 struct mlx5_ib_port_resources *ports = 4495 container_of(work, struct mlx5_ib_port_resources, 4496 pkey_change_work); 4497 4498 mutex_lock(&ports->devr->mutex); 4499 mlx5_ib_gsi_pkey_change(ports->gsi); 4500 mutex_unlock(&ports->devr->mutex); 4501 } 4502 4503 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev) 4504 { 4505 struct mlx5_ib_qp *mqp; 4506 struct mlx5_ib_cq *send_mcq, *recv_mcq; 4507 struct mlx5_core_cq *mcq; 4508 struct list_head cq_armed_list; 4509 unsigned long flags_qp; 4510 unsigned long flags_cq; 4511 unsigned long flags; 4512 4513 INIT_LIST_HEAD(&cq_armed_list); 4514 4515 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ 4516 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); 4517 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { 4518 spin_lock_irqsave(&mqp->sq.lock, flags_qp); 4519 if (mqp->sq.tail != mqp->sq.head) { 4520 send_mcq = to_mcq(mqp->ibqp.send_cq); 4521 spin_lock_irqsave(&send_mcq->lock, flags_cq); 4522 if (send_mcq->mcq.comp && 4523 mqp->ibqp.send_cq->comp_handler) { 4524 if (!send_mcq->mcq.reset_notify_added) { 4525 send_mcq->mcq.reset_notify_added = 1; 4526 list_add_tail(&send_mcq->mcq.reset_notify, 4527 &cq_armed_list); 4528 } 4529 } 4530 spin_unlock_irqrestore(&send_mcq->lock, flags_cq); 4531 } 4532 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); 4533 spin_lock_irqsave(&mqp->rq.lock, flags_qp); 4534 /* no handling is needed for SRQ */ 4535 if (!mqp->ibqp.srq) { 4536 if (mqp->rq.tail != mqp->rq.head) { 4537 recv_mcq = to_mcq(mqp->ibqp.recv_cq); 4538 spin_lock_irqsave(&recv_mcq->lock, flags_cq); 4539 if (recv_mcq->mcq.comp && 4540 mqp->ibqp.recv_cq->comp_handler) { 4541 if (!recv_mcq->mcq.reset_notify_added) { 4542 recv_mcq->mcq.reset_notify_added = 1; 4543 list_add_tail(&recv_mcq->mcq.reset_notify, 4544 &cq_armed_list); 4545 } 4546 } 4547 spin_unlock_irqrestore(&recv_mcq->lock, 4548 flags_cq); 4549 } 4550 } 4551 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); 4552 } 4553 /*At that point all inflight post send were put to be executed as of we 4554 * lock/unlock above locks Now need to arm all involved CQs. 4555 */ 4556 list_for_each_entry(mcq, &cq_armed_list, reset_notify) { 4557 mcq->comp(mcq, NULL); 4558 } 4559 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); 4560 } 4561 4562 static void delay_drop_handler(struct work_struct *work) 4563 { 4564 int err; 4565 struct mlx5_ib_delay_drop *delay_drop = 4566 container_of(work, struct mlx5_ib_delay_drop, 4567 delay_drop_work); 4568 4569 atomic_inc(&delay_drop->events_cnt); 4570 4571 mutex_lock(&delay_drop->lock); 4572 err = mlx5_core_set_delay_drop(delay_drop->dev->mdev, 4573 delay_drop->timeout); 4574 if (err) { 4575 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n", 4576 delay_drop->timeout); 4577 delay_drop->activate = false; 4578 } 4579 mutex_unlock(&delay_drop->lock); 4580 } 4581 4582 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, 4583 struct ib_event *ibev) 4584 { 4585 u8 port = (eqe->data.port.port >> 4) & 0xf; 4586 4587 switch (eqe->sub_type) { 4588 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT: 4589 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == 4590 IB_LINK_LAYER_ETHERNET) 4591 schedule_work(&ibdev->delay_drop.delay_drop_work); 4592 break; 4593 default: /* do nothing */ 4594 return; 4595 } 4596 } 4597 4598 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, 4599 struct ib_event *ibev) 4600 { 4601 u8 port = (eqe->data.port.port >> 4) & 0xf; 4602 4603 ibev->element.port_num = port; 4604 4605 switch (eqe->sub_type) { 4606 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 4607 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 4608 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 4609 /* In RoCE, port up/down events are handled in 4610 * mlx5_netdev_event(). 4611 */ 4612 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == 4613 IB_LINK_LAYER_ETHERNET) 4614 return -EINVAL; 4615 4616 ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ? 4617 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 4618 break; 4619 4620 case MLX5_PORT_CHANGE_SUBTYPE_LID: 4621 ibev->event = IB_EVENT_LID_CHANGE; 4622 break; 4623 4624 case MLX5_PORT_CHANGE_SUBTYPE_PKEY: 4625 ibev->event = IB_EVENT_PKEY_CHANGE; 4626 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 4627 break; 4628 4629 case MLX5_PORT_CHANGE_SUBTYPE_GUID: 4630 ibev->event = IB_EVENT_GID_CHANGE; 4631 break; 4632 4633 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 4634 ibev->event = IB_EVENT_CLIENT_REREGISTER; 4635 break; 4636 default: 4637 return -EINVAL; 4638 } 4639 4640 return 0; 4641 } 4642 4643 static void mlx5_ib_handle_event(struct work_struct *_work) 4644 { 4645 struct mlx5_ib_event_work *work = 4646 container_of(_work, struct mlx5_ib_event_work, work); 4647 struct mlx5_ib_dev *ibdev; 4648 struct ib_event ibev; 4649 bool fatal = false; 4650 4651 if (work->is_slave) { 4652 ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi); 4653 if (!ibdev) 4654 goto out; 4655 } else { 4656 ibdev = work->dev; 4657 } 4658 4659 switch (work->event) { 4660 case MLX5_DEV_EVENT_SYS_ERROR: 4661 ibev.event = IB_EVENT_DEVICE_FATAL; 4662 mlx5_ib_handle_internal_error(ibdev); 4663 ibev.element.port_num = (u8)(unsigned long)work->param; 4664 fatal = true; 4665 break; 4666 case MLX5_EVENT_TYPE_PORT_CHANGE: 4667 if (handle_port_change(ibdev, work->param, &ibev)) 4668 goto out; 4669 break; 4670 case MLX5_EVENT_TYPE_GENERAL_EVENT: 4671 handle_general_event(ibdev, work->param, &ibev); 4672 /* fall through */ 4673 default: 4674 goto out; 4675 } 4676 4677 ibev.device = &ibdev->ib_dev; 4678 4679 if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) { 4680 mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num); 4681 goto out; 4682 } 4683 4684 if (ibdev->ib_active) 4685 ib_dispatch_event(&ibev); 4686 4687 if (fatal) 4688 ibdev->ib_active = false; 4689 out: 4690 kfree(work); 4691 } 4692 4693 static int mlx5_ib_event(struct notifier_block *nb, 4694 unsigned long event, void *param) 4695 { 4696 struct mlx5_ib_event_work *work; 4697 4698 work = kmalloc(sizeof(*work), GFP_ATOMIC); 4699 if (!work) 4700 return NOTIFY_DONE; 4701 4702 INIT_WORK(&work->work, mlx5_ib_handle_event); 4703 work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events); 4704 work->is_slave = false; 4705 work->param = param; 4706 work->event = event; 4707 4708 queue_work(mlx5_ib_event_wq, &work->work); 4709 4710 return NOTIFY_OK; 4711 } 4712 4713 static int mlx5_ib_event_slave_port(struct notifier_block *nb, 4714 unsigned long event, void *param) 4715 { 4716 struct mlx5_ib_event_work *work; 4717 4718 work = kmalloc(sizeof(*work), GFP_ATOMIC); 4719 if (!work) 4720 return NOTIFY_DONE; 4721 4722 INIT_WORK(&work->work, mlx5_ib_handle_event); 4723 work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events); 4724 work->is_slave = true; 4725 work->param = param; 4726 work->event = event; 4727 queue_work(mlx5_ib_event_wq, &work->work); 4728 4729 return NOTIFY_OK; 4730 } 4731 4732 static int set_has_smi_cap(struct mlx5_ib_dev *dev) 4733 { 4734 struct mlx5_hca_vport_context vport_ctx; 4735 int err; 4736 int port; 4737 4738 for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) { 4739 dev->mdev->port_caps[port - 1].has_smi = false; 4740 if (MLX5_CAP_GEN(dev->mdev, port_type) == 4741 MLX5_CAP_PORT_TYPE_IB) { 4742 if (MLX5_CAP_GEN(dev->mdev, ib_virt)) { 4743 err = mlx5_query_hca_vport_context(dev->mdev, 0, 4744 port, 0, 4745 &vport_ctx); 4746 if (err) { 4747 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n", 4748 port, err); 4749 return err; 4750 } 4751 dev->mdev->port_caps[port - 1].has_smi = 4752 vport_ctx.has_smi; 4753 } else { 4754 dev->mdev->port_caps[port - 1].has_smi = true; 4755 } 4756 } 4757 } 4758 return 0; 4759 } 4760 4761 static void get_ext_port_caps(struct mlx5_ib_dev *dev) 4762 { 4763 int port; 4764 4765 for (port = 1; port <= dev->num_ports; port++) 4766 mlx5_query_ext_port_caps(dev, port); 4767 } 4768 4769 static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port) 4770 { 4771 struct ib_device_attr *dprops = NULL; 4772 struct ib_port_attr *pprops = NULL; 4773 int err = -ENOMEM; 4774 struct ib_udata uhw = {.inlen = 0, .outlen = 0}; 4775 4776 pprops = kzalloc(sizeof(*pprops), GFP_KERNEL); 4777 if (!pprops) 4778 goto out; 4779 4780 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); 4781 if (!dprops) 4782 goto out; 4783 4784 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw); 4785 if (err) { 4786 mlx5_ib_warn(dev, "query_device failed %d\n", err); 4787 goto out; 4788 } 4789 4790 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 4791 if (err) { 4792 mlx5_ib_warn(dev, "query_port %d failed %d\n", 4793 port, err); 4794 goto out; 4795 } 4796 4797 dev->mdev->port_caps[port - 1].pkey_table_len = 4798 dprops->max_pkeys; 4799 dev->mdev->port_caps[port - 1].gid_table_len = 4800 pprops->gid_tbl_len; 4801 mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n", 4802 port, dprops->max_pkeys, pprops->gid_tbl_len); 4803 4804 out: 4805 kfree(pprops); 4806 kfree(dprops); 4807 4808 return err; 4809 } 4810 4811 static int get_port_caps(struct mlx5_ib_dev *dev, u8 port) 4812 { 4813 /* For representors use port 1, is this is the only native 4814 * port 4815 */ 4816 if (dev->is_rep) 4817 return __get_port_caps(dev, 1); 4818 return __get_port_caps(dev, port); 4819 } 4820 4821 static void destroy_umrc_res(struct mlx5_ib_dev *dev) 4822 { 4823 int err; 4824 4825 err = mlx5_mr_cache_cleanup(dev); 4826 if (err) 4827 mlx5_ib_warn(dev, "mr cache cleanup failed\n"); 4828 4829 if (dev->umrc.qp) 4830 mlx5_ib_destroy_qp(dev->umrc.qp, NULL); 4831 if (dev->umrc.cq) 4832 ib_free_cq(dev->umrc.cq); 4833 if (dev->umrc.pd) 4834 ib_dealloc_pd(dev->umrc.pd); 4835 } 4836 4837 enum { 4838 MAX_UMR_WR = 128, 4839 }; 4840 4841 static int create_umr_res(struct mlx5_ib_dev *dev) 4842 { 4843 struct ib_qp_init_attr *init_attr = NULL; 4844 struct ib_qp_attr *attr = NULL; 4845 struct ib_pd *pd; 4846 struct ib_cq *cq; 4847 struct ib_qp *qp; 4848 int ret; 4849 4850 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 4851 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 4852 if (!attr || !init_attr) { 4853 ret = -ENOMEM; 4854 goto error_0; 4855 } 4856 4857 pd = ib_alloc_pd(&dev->ib_dev, 0); 4858 if (IS_ERR(pd)) { 4859 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); 4860 ret = PTR_ERR(pd); 4861 goto error_0; 4862 } 4863 4864 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); 4865 if (IS_ERR(cq)) { 4866 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); 4867 ret = PTR_ERR(cq); 4868 goto error_2; 4869 } 4870 4871 init_attr->send_cq = cq; 4872 init_attr->recv_cq = cq; 4873 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 4874 init_attr->cap.max_send_wr = MAX_UMR_WR; 4875 init_attr->cap.max_send_sge = 1; 4876 init_attr->qp_type = MLX5_IB_QPT_REG_UMR; 4877 init_attr->port_num = 1; 4878 qp = mlx5_ib_create_qp(pd, init_attr, NULL); 4879 if (IS_ERR(qp)) { 4880 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); 4881 ret = PTR_ERR(qp); 4882 goto error_3; 4883 } 4884 qp->device = &dev->ib_dev; 4885 qp->real_qp = qp; 4886 qp->uobject = NULL; 4887 qp->qp_type = MLX5_IB_QPT_REG_UMR; 4888 qp->send_cq = init_attr->send_cq; 4889 qp->recv_cq = init_attr->recv_cq; 4890 4891 attr->qp_state = IB_QPS_INIT; 4892 attr->port_num = 1; 4893 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | 4894 IB_QP_PORT, NULL); 4895 if (ret) { 4896 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); 4897 goto error_4; 4898 } 4899 4900 memset(attr, 0, sizeof(*attr)); 4901 attr->qp_state = IB_QPS_RTR; 4902 attr->path_mtu = IB_MTU_256; 4903 4904 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 4905 if (ret) { 4906 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); 4907 goto error_4; 4908 } 4909 4910 memset(attr, 0, sizeof(*attr)); 4911 attr->qp_state = IB_QPS_RTS; 4912 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 4913 if (ret) { 4914 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); 4915 goto error_4; 4916 } 4917 4918 dev->umrc.qp = qp; 4919 dev->umrc.cq = cq; 4920 dev->umrc.pd = pd; 4921 4922 sema_init(&dev->umrc.sem, MAX_UMR_WR); 4923 ret = mlx5_mr_cache_init(dev); 4924 if (ret) { 4925 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); 4926 goto error_4; 4927 } 4928 4929 kfree(attr); 4930 kfree(init_attr); 4931 4932 return 0; 4933 4934 error_4: 4935 mlx5_ib_destroy_qp(qp, NULL); 4936 dev->umrc.qp = NULL; 4937 4938 error_3: 4939 ib_free_cq(cq); 4940 dev->umrc.cq = NULL; 4941 4942 error_2: 4943 ib_dealloc_pd(pd); 4944 dev->umrc.pd = NULL; 4945 4946 error_0: 4947 kfree(attr); 4948 kfree(init_attr); 4949 return ret; 4950 } 4951 4952 static u8 mlx5_get_umr_fence(u8 umr_fence_cap) 4953 { 4954 switch (umr_fence_cap) { 4955 case MLX5_CAP_UMR_FENCE_NONE: 4956 return MLX5_FENCE_MODE_NONE; 4957 case MLX5_CAP_UMR_FENCE_SMALL: 4958 return MLX5_FENCE_MODE_INITIATOR_SMALL; 4959 default: 4960 return MLX5_FENCE_MODE_STRONG_ORDERING; 4961 } 4962 } 4963 4964 static int create_dev_resources(struct mlx5_ib_resources *devr) 4965 { 4966 struct ib_srq_init_attr attr; 4967 struct mlx5_ib_dev *dev; 4968 struct ib_device *ibdev; 4969 struct ib_cq_init_attr cq_attr = {.cqe = 1}; 4970 int port; 4971 int ret = 0; 4972 4973 dev = container_of(devr, struct mlx5_ib_dev, devr); 4974 ibdev = &dev->ib_dev; 4975 4976 mutex_init(&devr->mutex); 4977 4978 devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd); 4979 if (!devr->p0) 4980 return -ENOMEM; 4981 4982 devr->p0->device = ibdev; 4983 devr->p0->uobject = NULL; 4984 atomic_set(&devr->p0->usecnt, 0); 4985 4986 ret = mlx5_ib_alloc_pd(devr->p0, NULL); 4987 if (ret) 4988 goto error0; 4989 4990 devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq); 4991 if (!devr->c0) { 4992 ret = -ENOMEM; 4993 goto error1; 4994 } 4995 4996 devr->c0->device = &dev->ib_dev; 4997 atomic_set(&devr->c0->usecnt, 0); 4998 4999 ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL); 5000 if (ret) 5001 goto err_create_cq; 5002 5003 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); 5004 if (IS_ERR(devr->x0)) { 5005 ret = PTR_ERR(devr->x0); 5006 goto error2; 5007 } 5008 devr->x0->device = &dev->ib_dev; 5009 devr->x0->inode = NULL; 5010 atomic_set(&devr->x0->usecnt, 0); 5011 mutex_init(&devr->x0->tgt_qp_mutex); 5012 INIT_LIST_HEAD(&devr->x0->tgt_qp_list); 5013 5014 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); 5015 if (IS_ERR(devr->x1)) { 5016 ret = PTR_ERR(devr->x1); 5017 goto error3; 5018 } 5019 devr->x1->device = &dev->ib_dev; 5020 devr->x1->inode = NULL; 5021 atomic_set(&devr->x1->usecnt, 0); 5022 mutex_init(&devr->x1->tgt_qp_mutex); 5023 INIT_LIST_HEAD(&devr->x1->tgt_qp_list); 5024 5025 memset(&attr, 0, sizeof(attr)); 5026 attr.attr.max_sge = 1; 5027 attr.attr.max_wr = 1; 5028 attr.srq_type = IB_SRQT_XRC; 5029 attr.ext.cq = devr->c0; 5030 attr.ext.xrc.xrcd = devr->x0; 5031 5032 devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq); 5033 if (!devr->s0) { 5034 ret = -ENOMEM; 5035 goto error4; 5036 } 5037 5038 devr->s0->device = &dev->ib_dev; 5039 devr->s0->pd = devr->p0; 5040 devr->s0->srq_type = IB_SRQT_XRC; 5041 devr->s0->ext.xrc.xrcd = devr->x0; 5042 devr->s0->ext.cq = devr->c0; 5043 ret = mlx5_ib_create_srq(devr->s0, &attr, NULL); 5044 if (ret) 5045 goto err_create; 5046 5047 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); 5048 atomic_inc(&devr->s0->ext.cq->usecnt); 5049 atomic_inc(&devr->p0->usecnt); 5050 atomic_set(&devr->s0->usecnt, 0); 5051 5052 memset(&attr, 0, sizeof(attr)); 5053 attr.attr.max_sge = 1; 5054 attr.attr.max_wr = 1; 5055 attr.srq_type = IB_SRQT_BASIC; 5056 devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq); 5057 if (!devr->s1) { 5058 ret = -ENOMEM; 5059 goto error5; 5060 } 5061 5062 devr->s1->device = &dev->ib_dev; 5063 devr->s1->pd = devr->p0; 5064 devr->s1->srq_type = IB_SRQT_BASIC; 5065 devr->s1->ext.cq = devr->c0; 5066 5067 ret = mlx5_ib_create_srq(devr->s1, &attr, NULL); 5068 if (ret) 5069 goto error6; 5070 5071 atomic_inc(&devr->p0->usecnt); 5072 atomic_set(&devr->s1->usecnt, 0); 5073 5074 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) { 5075 INIT_WORK(&devr->ports[port].pkey_change_work, 5076 pkey_change_handler); 5077 devr->ports[port].devr = devr; 5078 } 5079 5080 return 0; 5081 5082 error6: 5083 kfree(devr->s1); 5084 error5: 5085 mlx5_ib_destroy_srq(devr->s0, NULL); 5086 err_create: 5087 kfree(devr->s0); 5088 error4: 5089 mlx5_ib_dealloc_xrcd(devr->x1, NULL); 5090 error3: 5091 mlx5_ib_dealloc_xrcd(devr->x0, NULL); 5092 error2: 5093 mlx5_ib_destroy_cq(devr->c0, NULL); 5094 err_create_cq: 5095 kfree(devr->c0); 5096 error1: 5097 mlx5_ib_dealloc_pd(devr->p0, NULL); 5098 error0: 5099 kfree(devr->p0); 5100 return ret; 5101 } 5102 5103 static void destroy_dev_resources(struct mlx5_ib_resources *devr) 5104 { 5105 int port; 5106 5107 mlx5_ib_destroy_srq(devr->s1, NULL); 5108 kfree(devr->s1); 5109 mlx5_ib_destroy_srq(devr->s0, NULL); 5110 kfree(devr->s0); 5111 mlx5_ib_dealloc_xrcd(devr->x0, NULL); 5112 mlx5_ib_dealloc_xrcd(devr->x1, NULL); 5113 mlx5_ib_destroy_cq(devr->c0, NULL); 5114 kfree(devr->c0); 5115 mlx5_ib_dealloc_pd(devr->p0, NULL); 5116 kfree(devr->p0); 5117 5118 /* Make sure no change P_Key work items are still executing */ 5119 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) 5120 cancel_work_sync(&devr->ports[port].pkey_change_work); 5121 } 5122 5123 static u32 get_core_cap_flags(struct ib_device *ibdev, 5124 struct mlx5_hca_vport_context *rep) 5125 { 5126 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5127 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1); 5128 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type); 5129 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version); 5130 bool raw_support = !mlx5_core_mp_enabled(dev->mdev); 5131 u32 ret = 0; 5132 5133 if (rep->grh_required) 5134 ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED; 5135 5136 if (ll == IB_LINK_LAYER_INFINIBAND) 5137 return ret | RDMA_CORE_PORT_IBA_IB; 5138 5139 if (raw_support) 5140 ret |= RDMA_CORE_PORT_RAW_PACKET; 5141 5142 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP)) 5143 return ret; 5144 5145 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP)) 5146 return ret; 5147 5148 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP) 5149 ret |= RDMA_CORE_PORT_IBA_ROCE; 5150 5151 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP) 5152 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 5153 5154 return ret; 5155 } 5156 5157 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, 5158 struct ib_port_immutable *immutable) 5159 { 5160 struct ib_port_attr attr; 5161 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5162 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num); 5163 struct mlx5_hca_vport_context rep = {0}; 5164 int err; 5165 5166 err = ib_query_port(ibdev, port_num, &attr); 5167 if (err) 5168 return err; 5169 5170 if (ll == IB_LINK_LAYER_INFINIBAND) { 5171 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0, 5172 &rep); 5173 if (err) 5174 return err; 5175 } 5176 5177 immutable->pkey_tbl_len = attr.pkey_tbl_len; 5178 immutable->gid_tbl_len = attr.gid_tbl_len; 5179 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep); 5180 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 5181 5182 return 0; 5183 } 5184 5185 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num, 5186 struct ib_port_immutable *immutable) 5187 { 5188 struct ib_port_attr attr; 5189 int err; 5190 5191 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; 5192 5193 err = ib_query_port(ibdev, port_num, &attr); 5194 if (err) 5195 return err; 5196 5197 immutable->pkey_tbl_len = attr.pkey_tbl_len; 5198 immutable->gid_tbl_len = attr.gid_tbl_len; 5199 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; 5200 5201 return 0; 5202 } 5203 5204 static void get_dev_fw_str(struct ib_device *ibdev, char *str) 5205 { 5206 struct mlx5_ib_dev *dev = 5207 container_of(ibdev, struct mlx5_ib_dev, ib_dev); 5208 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d", 5209 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev), 5210 fw_rev_sub(dev->mdev)); 5211 } 5212 5213 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) 5214 { 5215 struct mlx5_core_dev *mdev = dev->mdev; 5216 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev, 5217 MLX5_FLOW_NAMESPACE_LAG); 5218 struct mlx5_flow_table *ft; 5219 int err; 5220 5221 if (!ns || !mlx5_lag_is_roce(mdev)) 5222 return 0; 5223 5224 err = mlx5_cmd_create_vport_lag(mdev); 5225 if (err) 5226 return err; 5227 5228 ft = mlx5_create_lag_demux_flow_table(ns, 0, 0); 5229 if (IS_ERR(ft)) { 5230 err = PTR_ERR(ft); 5231 goto err_destroy_vport_lag; 5232 } 5233 5234 dev->flow_db->lag_demux_ft = ft; 5235 dev->lag_active = true; 5236 return 0; 5237 5238 err_destroy_vport_lag: 5239 mlx5_cmd_destroy_vport_lag(mdev); 5240 return err; 5241 } 5242 5243 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev) 5244 { 5245 struct mlx5_core_dev *mdev = dev->mdev; 5246 5247 if (dev->lag_active) { 5248 dev->lag_active = false; 5249 5250 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft); 5251 dev->flow_db->lag_demux_ft = NULL; 5252 5253 mlx5_cmd_destroy_vport_lag(mdev); 5254 } 5255 } 5256 5257 static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) 5258 { 5259 int err; 5260 5261 dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event; 5262 err = register_netdevice_notifier(&dev->port[port_num].roce.nb); 5263 if (err) { 5264 dev->port[port_num].roce.nb.notifier_call = NULL; 5265 return err; 5266 } 5267 5268 return 0; 5269 } 5270 5271 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) 5272 { 5273 if (dev->port[port_num].roce.nb.notifier_call) { 5274 unregister_netdevice_notifier(&dev->port[port_num].roce.nb); 5275 dev->port[port_num].roce.nb.notifier_call = NULL; 5276 } 5277 } 5278 5279 static int mlx5_enable_eth(struct mlx5_ib_dev *dev) 5280 { 5281 int err; 5282 5283 err = mlx5_nic_vport_enable_roce(dev->mdev); 5284 if (err) 5285 return err; 5286 5287 err = mlx5_eth_lag_init(dev); 5288 if (err) 5289 goto err_disable_roce; 5290 5291 return 0; 5292 5293 err_disable_roce: 5294 mlx5_nic_vport_disable_roce(dev->mdev); 5295 5296 return err; 5297 } 5298 5299 static void mlx5_disable_eth(struct mlx5_ib_dev *dev) 5300 { 5301 mlx5_eth_lag_cleanup(dev); 5302 mlx5_nic_vport_disable_roce(dev->mdev); 5303 } 5304 5305 struct mlx5_ib_counter { 5306 const char *name; 5307 size_t offset; 5308 }; 5309 5310 #define INIT_Q_COUNTER(_name) \ 5311 { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)} 5312 5313 static const struct mlx5_ib_counter basic_q_cnts[] = { 5314 INIT_Q_COUNTER(rx_write_requests), 5315 INIT_Q_COUNTER(rx_read_requests), 5316 INIT_Q_COUNTER(rx_atomic_requests), 5317 INIT_Q_COUNTER(out_of_buffer), 5318 }; 5319 5320 static const struct mlx5_ib_counter out_of_seq_q_cnts[] = { 5321 INIT_Q_COUNTER(out_of_sequence), 5322 }; 5323 5324 static const struct mlx5_ib_counter retrans_q_cnts[] = { 5325 INIT_Q_COUNTER(duplicate_request), 5326 INIT_Q_COUNTER(rnr_nak_retry_err), 5327 INIT_Q_COUNTER(packet_seq_err), 5328 INIT_Q_COUNTER(implied_nak_seq_err), 5329 INIT_Q_COUNTER(local_ack_timeout_err), 5330 }; 5331 5332 #define INIT_CONG_COUNTER(_name) \ 5333 { .name = #_name, .offset = \ 5334 MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)} 5335 5336 static const struct mlx5_ib_counter cong_cnts[] = { 5337 INIT_CONG_COUNTER(rp_cnp_ignored), 5338 INIT_CONG_COUNTER(rp_cnp_handled), 5339 INIT_CONG_COUNTER(np_ecn_marked_roce_packets), 5340 INIT_CONG_COUNTER(np_cnp_sent), 5341 }; 5342 5343 static const struct mlx5_ib_counter extended_err_cnts[] = { 5344 INIT_Q_COUNTER(resp_local_length_error), 5345 INIT_Q_COUNTER(resp_cqe_error), 5346 INIT_Q_COUNTER(req_cqe_error), 5347 INIT_Q_COUNTER(req_remote_invalid_request), 5348 INIT_Q_COUNTER(req_remote_access_errors), 5349 INIT_Q_COUNTER(resp_remote_access_errors), 5350 INIT_Q_COUNTER(resp_cqe_flush_error), 5351 INIT_Q_COUNTER(req_cqe_flush_error), 5352 }; 5353 5354 #define INIT_EXT_PPCNT_COUNTER(_name) \ 5355 { .name = #_name, .offset = \ 5356 MLX5_BYTE_OFF(ppcnt_reg, \ 5357 counter_set.eth_extended_cntrs_grp_data_layout._name##_high)} 5358 5359 static const struct mlx5_ib_counter ext_ppcnt_cnts[] = { 5360 INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated), 5361 }; 5362 5363 static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev) 5364 { 5365 return MLX5_ESWITCH_MANAGER(mdev) && 5366 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == 5367 MLX5_ESWITCH_OFFLOADS; 5368 } 5369 5370 static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) 5371 { 5372 int num_cnt_ports; 5373 int i; 5374 5375 num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; 5376 5377 for (i = 0; i < num_cnt_ports; i++) { 5378 if (dev->port[i].cnts.set_id_valid) 5379 mlx5_core_dealloc_q_counter(dev->mdev, 5380 dev->port[i].cnts.set_id); 5381 kfree(dev->port[i].cnts.names); 5382 kfree(dev->port[i].cnts.offsets); 5383 } 5384 } 5385 5386 static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev, 5387 struct mlx5_ib_counters *cnts) 5388 { 5389 u32 num_counters; 5390 5391 num_counters = ARRAY_SIZE(basic_q_cnts); 5392 5393 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) 5394 num_counters += ARRAY_SIZE(out_of_seq_q_cnts); 5395 5396 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) 5397 num_counters += ARRAY_SIZE(retrans_q_cnts); 5398 5399 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) 5400 num_counters += ARRAY_SIZE(extended_err_cnts); 5401 5402 cnts->num_q_counters = num_counters; 5403 5404 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { 5405 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts); 5406 num_counters += ARRAY_SIZE(cong_cnts); 5407 } 5408 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { 5409 cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts); 5410 num_counters += ARRAY_SIZE(ext_ppcnt_cnts); 5411 } 5412 cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL); 5413 if (!cnts->names) 5414 return -ENOMEM; 5415 5416 cnts->offsets = kcalloc(num_counters, 5417 sizeof(cnts->offsets), GFP_KERNEL); 5418 if (!cnts->offsets) 5419 goto err_names; 5420 5421 return 0; 5422 5423 err_names: 5424 kfree(cnts->names); 5425 cnts->names = NULL; 5426 return -ENOMEM; 5427 } 5428 5429 static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, 5430 const char **names, 5431 size_t *offsets) 5432 { 5433 int i; 5434 int j = 0; 5435 5436 for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) { 5437 names[j] = basic_q_cnts[i].name; 5438 offsets[j] = basic_q_cnts[i].offset; 5439 } 5440 5441 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { 5442 for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) { 5443 names[j] = out_of_seq_q_cnts[i].name; 5444 offsets[j] = out_of_seq_q_cnts[i].offset; 5445 } 5446 } 5447 5448 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { 5449 for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) { 5450 names[j] = retrans_q_cnts[i].name; 5451 offsets[j] = retrans_q_cnts[i].offset; 5452 } 5453 } 5454 5455 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) { 5456 for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) { 5457 names[j] = extended_err_cnts[i].name; 5458 offsets[j] = extended_err_cnts[i].offset; 5459 } 5460 } 5461 5462 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { 5463 for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) { 5464 names[j] = cong_cnts[i].name; 5465 offsets[j] = cong_cnts[i].offset; 5466 } 5467 } 5468 5469 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { 5470 for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) { 5471 names[j] = ext_ppcnt_cnts[i].name; 5472 offsets[j] = ext_ppcnt_cnts[i].offset; 5473 } 5474 } 5475 } 5476 5477 static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) 5478 { 5479 int num_cnt_ports; 5480 int err = 0; 5481 int i; 5482 bool is_shared; 5483 5484 is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; 5485 num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; 5486 5487 for (i = 0; i < num_cnt_ports; i++) { 5488 err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts); 5489 if (err) 5490 goto err_alloc; 5491 5492 mlx5_ib_fill_counters(dev, dev->port[i].cnts.names, 5493 dev->port[i].cnts.offsets); 5494 5495 err = mlx5_cmd_alloc_q_counter(dev->mdev, 5496 &dev->port[i].cnts.set_id, 5497 is_shared ? 5498 MLX5_SHARED_RESOURCE_UID : 0); 5499 if (err) { 5500 mlx5_ib_warn(dev, 5501 "couldn't allocate queue counter for port %d, err %d\n", 5502 i + 1, err); 5503 goto err_alloc; 5504 } 5505 dev->port[i].cnts.set_id_valid = true; 5506 } 5507 return 0; 5508 5509 err_alloc: 5510 mlx5_ib_dealloc_counters(dev); 5511 return err; 5512 } 5513 5514 static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev, 5515 u8 port_num) 5516 { 5517 return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts : 5518 &dev->port[port_num].cnts; 5519 } 5520 5521 /** 5522 * mlx5_ib_get_counters_id - Returns counters id to use for device+port 5523 * @dev: Pointer to mlx5 IB device 5524 * @port_num: Zero based port number 5525 * 5526 * mlx5_ib_get_counters_id() Returns counters set id to use for given 5527 * device port combination in switchdev and non switchdev mode of the 5528 * parent device. 5529 */ 5530 u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num) 5531 { 5532 const struct mlx5_ib_counters *cnts = get_counters(dev, port_num); 5533 5534 return cnts->set_id; 5535 } 5536 5537 static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, 5538 u8 port_num) 5539 { 5540 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5541 const struct mlx5_ib_counters *cnts; 5542 bool is_switchdev = is_mdev_switchdev_mode(dev->mdev); 5543 5544 if ((is_switchdev && port_num) || (!is_switchdev && !port_num)) 5545 return NULL; 5546 5547 cnts = get_counters(dev, port_num - 1); 5548 5549 return rdma_alloc_hw_stats_struct(cnts->names, 5550 cnts->num_q_counters + 5551 cnts->num_cong_counters + 5552 cnts->num_ext_ppcnt_counters, 5553 RDMA_HW_STATS_DEFAULT_LIFESPAN); 5554 } 5555 5556 static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev, 5557 const struct mlx5_ib_counters *cnts, 5558 struct rdma_hw_stats *stats, 5559 u16 set_id) 5560 { 5561 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); 5562 void *out; 5563 __be32 val; 5564 int ret, i; 5565 5566 out = kvzalloc(outlen, GFP_KERNEL); 5567 if (!out) 5568 return -ENOMEM; 5569 5570 ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen); 5571 if (ret) 5572 goto free; 5573 5574 for (i = 0; i < cnts->num_q_counters; i++) { 5575 val = *(__be32 *)(out + cnts->offsets[i]); 5576 stats->value[i] = (u64)be32_to_cpu(val); 5577 } 5578 5579 free: 5580 kvfree(out); 5581 return ret; 5582 } 5583 5584 static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev, 5585 const struct mlx5_ib_counters *cnts, 5586 struct rdma_hw_stats *stats) 5587 { 5588 int offset = cnts->num_q_counters + cnts->num_cong_counters; 5589 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 5590 int ret, i; 5591 void *out; 5592 5593 out = kvzalloc(sz, GFP_KERNEL); 5594 if (!out) 5595 return -ENOMEM; 5596 5597 ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out); 5598 if (ret) 5599 goto free; 5600 5601 for (i = 0; i < cnts->num_ext_ppcnt_counters; i++) 5602 stats->value[i + offset] = 5603 be64_to_cpup((__be64 *)(out + 5604 cnts->offsets[i + offset])); 5605 free: 5606 kvfree(out); 5607 return ret; 5608 } 5609 5610 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, 5611 struct rdma_hw_stats *stats, 5612 u8 port_num, int index) 5613 { 5614 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5615 const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1); 5616 struct mlx5_core_dev *mdev; 5617 int ret, num_counters; 5618 u8 mdev_port_num; 5619 5620 if (!stats) 5621 return -EINVAL; 5622 5623 num_counters = cnts->num_q_counters + 5624 cnts->num_cong_counters + 5625 cnts->num_ext_ppcnt_counters; 5626 5627 /* q_counters are per IB device, query the master mdev */ 5628 ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id); 5629 if (ret) 5630 return ret; 5631 5632 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { 5633 ret = mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats); 5634 if (ret) 5635 return ret; 5636 } 5637 5638 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { 5639 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, 5640 &mdev_port_num); 5641 if (!mdev) { 5642 /* If port is not affiliated yet, its in down state 5643 * which doesn't have any counters yet, so it would be 5644 * zero. So no need to read from the HCA. 5645 */ 5646 goto done; 5647 } 5648 ret = mlx5_lag_query_cong_counters(dev->mdev, 5649 stats->value + 5650 cnts->num_q_counters, 5651 cnts->num_cong_counters, 5652 cnts->offsets + 5653 cnts->num_q_counters); 5654 5655 mlx5_ib_put_native_port_mdev(dev, port_num); 5656 if (ret) 5657 return ret; 5658 } 5659 5660 done: 5661 return num_counters; 5662 } 5663 5664 static struct rdma_hw_stats * 5665 mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) 5666 { 5667 struct mlx5_ib_dev *dev = to_mdev(counter->device); 5668 const struct mlx5_ib_counters *cnts = 5669 get_counters(dev, counter->port - 1); 5670 5671 /* Q counters are in the beginning of all counters */ 5672 return rdma_alloc_hw_stats_struct(cnts->names, 5673 cnts->num_q_counters, 5674 RDMA_HW_STATS_DEFAULT_LIFESPAN); 5675 } 5676 5677 static int mlx5_ib_counter_update_stats(struct rdma_counter *counter) 5678 { 5679 struct mlx5_ib_dev *dev = to_mdev(counter->device); 5680 const struct mlx5_ib_counters *cnts = 5681 get_counters(dev, counter->port - 1); 5682 5683 return mlx5_ib_query_q_counters(dev->mdev, cnts, 5684 counter->stats, counter->id); 5685 } 5686 5687 static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, 5688 struct ib_qp *qp) 5689 { 5690 struct mlx5_ib_dev *dev = to_mdev(qp->device); 5691 u16 cnt_set_id = 0; 5692 int err; 5693 5694 if (!counter->id) { 5695 err = mlx5_cmd_alloc_q_counter(dev->mdev, 5696 &cnt_set_id, 5697 MLX5_SHARED_RESOURCE_UID); 5698 if (err) 5699 return err; 5700 counter->id = cnt_set_id; 5701 } 5702 5703 err = mlx5_ib_qp_set_counter(qp, counter); 5704 if (err) 5705 goto fail_set_counter; 5706 5707 return 0; 5708 5709 fail_set_counter: 5710 mlx5_core_dealloc_q_counter(dev->mdev, cnt_set_id); 5711 counter->id = 0; 5712 5713 return err; 5714 } 5715 5716 static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp) 5717 { 5718 return mlx5_ib_qp_set_counter(qp, NULL); 5719 } 5720 5721 static int mlx5_ib_counter_dealloc(struct rdma_counter *counter) 5722 { 5723 struct mlx5_ib_dev *dev = to_mdev(counter->device); 5724 5725 return mlx5_core_dealloc_q_counter(dev->mdev, counter->id); 5726 } 5727 5728 static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num, 5729 enum rdma_netdev_t type, 5730 struct rdma_netdev_alloc_params *params) 5731 { 5732 if (type != RDMA_NETDEV_IPOIB) 5733 return -EOPNOTSUPP; 5734 5735 return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params); 5736 } 5737 5738 static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev) 5739 { 5740 if (!dev->delay_drop.dir_debugfs) 5741 return; 5742 debugfs_remove_recursive(dev->delay_drop.dir_debugfs); 5743 dev->delay_drop.dir_debugfs = NULL; 5744 } 5745 5746 static void cancel_delay_drop(struct mlx5_ib_dev *dev) 5747 { 5748 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) 5749 return; 5750 5751 cancel_work_sync(&dev->delay_drop.delay_drop_work); 5752 delay_drop_debugfs_cleanup(dev); 5753 } 5754 5755 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf, 5756 size_t count, loff_t *pos) 5757 { 5758 struct mlx5_ib_delay_drop *delay_drop = filp->private_data; 5759 char lbuf[20]; 5760 int len; 5761 5762 len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout); 5763 return simple_read_from_buffer(buf, count, pos, lbuf, len); 5764 } 5765 5766 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf, 5767 size_t count, loff_t *pos) 5768 { 5769 struct mlx5_ib_delay_drop *delay_drop = filp->private_data; 5770 u32 timeout; 5771 u32 var; 5772 5773 if (kstrtouint_from_user(buf, count, 0, &var)) 5774 return -EFAULT; 5775 5776 timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 5777 1000); 5778 if (timeout != var) 5779 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n", 5780 timeout); 5781 5782 delay_drop->timeout = timeout; 5783 5784 return count; 5785 } 5786 5787 static const struct file_operations fops_delay_drop_timeout = { 5788 .owner = THIS_MODULE, 5789 .open = simple_open, 5790 .write = delay_drop_timeout_write, 5791 .read = delay_drop_timeout_read, 5792 }; 5793 5794 static void delay_drop_debugfs_init(struct mlx5_ib_dev *dev) 5795 { 5796 struct dentry *root; 5797 5798 if (!mlx5_debugfs_root) 5799 return; 5800 5801 root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root); 5802 dev->delay_drop.dir_debugfs = root; 5803 5804 debugfs_create_atomic_t("num_timeout_events", 0400, root, 5805 &dev->delay_drop.events_cnt); 5806 debugfs_create_atomic_t("num_rqs", 0400, root, 5807 &dev->delay_drop.rqs_cnt); 5808 debugfs_create_file("timeout", 0600, root, &dev->delay_drop, 5809 &fops_delay_drop_timeout); 5810 } 5811 5812 static void init_delay_drop(struct mlx5_ib_dev *dev) 5813 { 5814 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) 5815 return; 5816 5817 mutex_init(&dev->delay_drop.lock); 5818 dev->delay_drop.dev = dev; 5819 dev->delay_drop.activate = false; 5820 dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000; 5821 INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler); 5822 atomic_set(&dev->delay_drop.rqs_cnt, 0); 5823 atomic_set(&dev->delay_drop.events_cnt, 0); 5824 5825 delay_drop_debugfs_init(dev); 5826 } 5827 5828 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, 5829 struct mlx5_ib_multiport_info *mpi) 5830 { 5831 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 5832 struct mlx5_ib_port *port = &ibdev->port[port_num]; 5833 int comps; 5834 int err; 5835 int i; 5836 5837 lockdep_assert_held(&mlx5_ib_multiport_mutex); 5838 5839 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); 5840 5841 spin_lock(&port->mp.mpi_lock); 5842 if (!mpi->ibdev) { 5843 spin_unlock(&port->mp.mpi_lock); 5844 return; 5845 } 5846 5847 mpi->ibdev = NULL; 5848 5849 spin_unlock(&port->mp.mpi_lock); 5850 if (mpi->mdev_events.notifier_call) 5851 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); 5852 mpi->mdev_events.notifier_call = NULL; 5853 mlx5_remove_netdev_notifier(ibdev, port_num); 5854 spin_lock(&port->mp.mpi_lock); 5855 5856 comps = mpi->mdev_refcnt; 5857 if (comps) { 5858 mpi->unaffiliate = true; 5859 init_completion(&mpi->unref_comp); 5860 spin_unlock(&port->mp.mpi_lock); 5861 5862 for (i = 0; i < comps; i++) 5863 wait_for_completion(&mpi->unref_comp); 5864 5865 spin_lock(&port->mp.mpi_lock); 5866 mpi->unaffiliate = false; 5867 } 5868 5869 port->mp.mpi = NULL; 5870 5871 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); 5872 5873 spin_unlock(&port->mp.mpi_lock); 5874 5875 err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev); 5876 5877 mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1); 5878 /* Log an error, still needed to cleanup the pointers and add 5879 * it back to the list. 5880 */ 5881 if (err) 5882 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n", 5883 port_num + 1); 5884 5885 ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN; 5886 } 5887 5888 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, 5889 struct mlx5_ib_multiport_info *mpi) 5890 { 5891 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 5892 int err; 5893 5894 lockdep_assert_held(&mlx5_ib_multiport_mutex); 5895 5896 spin_lock(&ibdev->port[port_num].mp.mpi_lock); 5897 if (ibdev->port[port_num].mp.mpi) { 5898 mlx5_ib_dbg(ibdev, "port %d already affiliated.\n", 5899 port_num + 1); 5900 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); 5901 return false; 5902 } 5903 5904 ibdev->port[port_num].mp.mpi = mpi; 5905 mpi->ibdev = ibdev; 5906 mpi->mdev_events.notifier_call = NULL; 5907 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); 5908 5909 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev); 5910 if (err) 5911 goto unbind; 5912 5913 err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev)); 5914 if (err) 5915 goto unbind; 5916 5917 err = mlx5_add_netdev_notifier(ibdev, port_num); 5918 if (err) { 5919 mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n", 5920 port_num + 1); 5921 goto unbind; 5922 } 5923 5924 mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port; 5925 mlx5_notifier_register(mpi->mdev, &mpi->mdev_events); 5926 5927 mlx5_ib_init_cong_debugfs(ibdev, port_num); 5928 5929 return true; 5930 5931 unbind: 5932 mlx5_ib_unbind_slave_port(ibdev, mpi); 5933 return false; 5934 } 5935 5936 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev) 5937 { 5938 int port_num = mlx5_core_native_port_num(dev->mdev) - 1; 5939 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 5940 port_num + 1); 5941 struct mlx5_ib_multiport_info *mpi; 5942 int err; 5943 int i; 5944 5945 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 5946 return 0; 5947 5948 err = mlx5_query_nic_vport_system_image_guid(dev->mdev, 5949 &dev->sys_image_guid); 5950 if (err) 5951 return err; 5952 5953 err = mlx5_nic_vport_enable_roce(dev->mdev); 5954 if (err) 5955 return err; 5956 5957 mutex_lock(&mlx5_ib_multiport_mutex); 5958 for (i = 0; i < dev->num_ports; i++) { 5959 bool bound = false; 5960 5961 /* build a stub multiport info struct for the native port. */ 5962 if (i == port_num) { 5963 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL); 5964 if (!mpi) { 5965 mutex_unlock(&mlx5_ib_multiport_mutex); 5966 mlx5_nic_vport_disable_roce(dev->mdev); 5967 return -ENOMEM; 5968 } 5969 5970 mpi->is_master = true; 5971 mpi->mdev = dev->mdev; 5972 mpi->sys_image_guid = dev->sys_image_guid; 5973 dev->port[i].mp.mpi = mpi; 5974 mpi->ibdev = dev; 5975 mpi = NULL; 5976 continue; 5977 } 5978 5979 list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list, 5980 list) { 5981 if (dev->sys_image_guid == mpi->sys_image_guid && 5982 (mlx5_core_native_port_num(mpi->mdev) - 1) == i) { 5983 bound = mlx5_ib_bind_slave_port(dev, mpi); 5984 } 5985 5986 if (bound) { 5987 dev_dbg(mpi->mdev->device, 5988 "removing port from unaffiliated list.\n"); 5989 mlx5_ib_dbg(dev, "port %d bound\n", i + 1); 5990 list_del(&mpi->list); 5991 break; 5992 } 5993 } 5994 if (!bound) { 5995 get_port_caps(dev, i + 1); 5996 mlx5_ib_dbg(dev, "no free port found for port %d\n", 5997 i + 1); 5998 } 5999 } 6000 6001 list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list); 6002 mutex_unlock(&mlx5_ib_multiport_mutex); 6003 return err; 6004 } 6005 6006 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev) 6007 { 6008 int port_num = mlx5_core_native_port_num(dev->mdev) - 1; 6009 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 6010 port_num + 1); 6011 int i; 6012 6013 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 6014 return; 6015 6016 mutex_lock(&mlx5_ib_multiport_mutex); 6017 for (i = 0; i < dev->num_ports; i++) { 6018 if (dev->port[i].mp.mpi) { 6019 /* Destroy the native port stub */ 6020 if (i == port_num) { 6021 kfree(dev->port[i].mp.mpi); 6022 dev->port[i].mp.mpi = NULL; 6023 } else { 6024 mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1); 6025 mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi); 6026 } 6027 } 6028 } 6029 6030 mlx5_ib_dbg(dev, "removing from devlist\n"); 6031 list_del(&dev->ib_dev_list); 6032 mutex_unlock(&mlx5_ib_multiport_mutex); 6033 6034 mlx5_nic_vport_disable_roce(dev->mdev); 6035 } 6036 6037 ADD_UVERBS_ATTRIBUTES_SIMPLE( 6038 mlx5_ib_dm, 6039 UVERBS_OBJECT_DM, 6040 UVERBS_METHOD_DM_ALLOC, 6041 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 6042 UVERBS_ATTR_TYPE(u64), 6043 UA_MANDATORY), 6044 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 6045 UVERBS_ATTR_TYPE(u16), 6046 UA_OPTIONAL), 6047 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE, 6048 enum mlx5_ib_uapi_dm_type, 6049 UA_OPTIONAL)); 6050 6051 ADD_UVERBS_ATTRIBUTES_SIMPLE( 6052 mlx5_ib_flow_action, 6053 UVERBS_OBJECT_FLOW_ACTION, 6054 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, 6055 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 6056 enum mlx5_ib_uapi_flow_action_flags)); 6057 6058 static const struct uapi_definition mlx5_ib_defs[] = { 6059 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) 6060 UAPI_DEF_CHAIN(mlx5_ib_devx_defs), 6061 UAPI_DEF_CHAIN(mlx5_ib_flow_defs), 6062 #endif 6063 6064 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 6065 &mlx5_ib_flow_action), 6066 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm), 6067 {} 6068 }; 6069 6070 static int mlx5_ib_read_counters(struct ib_counters *counters, 6071 struct ib_counters_read_attr *read_attr, 6072 struct uverbs_attr_bundle *attrs) 6073 { 6074 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 6075 struct mlx5_read_counters_attr mread_attr = {}; 6076 struct mlx5_ib_flow_counters_desc *desc; 6077 int ret, i; 6078 6079 mutex_lock(&mcounters->mcntrs_mutex); 6080 if (mcounters->cntrs_max_index > read_attr->ncounters) { 6081 ret = -EINVAL; 6082 goto err_bound; 6083 } 6084 6085 mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64), 6086 GFP_KERNEL); 6087 if (!mread_attr.out) { 6088 ret = -ENOMEM; 6089 goto err_bound; 6090 } 6091 6092 mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl; 6093 mread_attr.flags = read_attr->flags; 6094 ret = mcounters->read_counters(counters->device, &mread_attr); 6095 if (ret) 6096 goto err_read; 6097 6098 /* do the pass over the counters data array to assign according to the 6099 * descriptions and indexing pairs 6100 */ 6101 desc = mcounters->counters_data; 6102 for (i = 0; i < mcounters->ncounters; i++) 6103 read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description]; 6104 6105 err_read: 6106 kfree(mread_attr.out); 6107 err_bound: 6108 mutex_unlock(&mcounters->mcntrs_mutex); 6109 return ret; 6110 } 6111 6112 static int mlx5_ib_destroy_counters(struct ib_counters *counters) 6113 { 6114 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 6115 6116 counters_clear_description(counters); 6117 if (mcounters->hw_cntrs_hndl) 6118 mlx5_fc_destroy(to_mdev(counters->device)->mdev, 6119 mcounters->hw_cntrs_hndl); 6120 6121 kfree(mcounters); 6122 6123 return 0; 6124 } 6125 6126 static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device, 6127 struct uverbs_attr_bundle *attrs) 6128 { 6129 struct mlx5_ib_mcounters *mcounters; 6130 6131 mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL); 6132 if (!mcounters) 6133 return ERR_PTR(-ENOMEM); 6134 6135 mutex_init(&mcounters->mcntrs_mutex); 6136 6137 return &mcounters->ibcntrs; 6138 } 6139 6140 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) 6141 { 6142 mlx5_ib_cleanup_multiport_master(dev); 6143 WARN_ON(!xa_empty(&dev->odp_mkeys)); 6144 cleanup_srcu_struct(&dev->odp_srcu); 6145 6146 WARN_ON(!xa_empty(&dev->sig_mrs)); 6147 WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES)); 6148 } 6149 6150 static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) 6151 { 6152 struct mlx5_core_dev *mdev = dev->mdev; 6153 int err; 6154 int i; 6155 6156 for (i = 0; i < dev->num_ports; i++) { 6157 spin_lock_init(&dev->port[i].mp.mpi_lock); 6158 rwlock_init(&dev->port[i].roce.netdev_lock); 6159 dev->port[i].roce.dev = dev; 6160 dev->port[i].roce.native_port_num = i + 1; 6161 dev->port[i].roce.last_port_state = IB_PORT_DOWN; 6162 } 6163 6164 mlx5_ib_internal_fill_odp_caps(dev); 6165 6166 err = mlx5_ib_init_multiport_master(dev); 6167 if (err) 6168 return err; 6169 6170 err = set_has_smi_cap(dev); 6171 if (err) 6172 return err; 6173 6174 if (!mlx5_core_mp_enabled(mdev)) { 6175 for (i = 1; i <= dev->num_ports; i++) { 6176 err = get_port_caps(dev, i); 6177 if (err) 6178 break; 6179 } 6180 } else { 6181 err = get_port_caps(dev, mlx5_core_native_port_num(mdev)); 6182 } 6183 if (err) 6184 goto err_mp; 6185 6186 if (mlx5_use_mad_ifc(dev)) 6187 get_ext_port_caps(dev); 6188 6189 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 6190 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; 6191 dev->ib_dev.phys_port_cnt = dev->num_ports; 6192 dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev); 6193 dev->ib_dev.dev.parent = mdev->device; 6194 6195 mutex_init(&dev->cap_mask_mutex); 6196 INIT_LIST_HEAD(&dev->qp_list); 6197 spin_lock_init(&dev->reset_flow_resource_lock); 6198 xa_init(&dev->odp_mkeys); 6199 xa_init(&dev->sig_mrs); 6200 6201 spin_lock_init(&dev->dm.lock); 6202 dev->dm.dev = mdev; 6203 6204 err = init_srcu_struct(&dev->odp_srcu); 6205 if (err) 6206 goto err_mp; 6207 6208 return 0; 6209 6210 err_mp: 6211 mlx5_ib_cleanup_multiport_master(dev); 6212 6213 return -ENOMEM; 6214 } 6215 6216 static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev) 6217 { 6218 dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL); 6219 6220 if (!dev->flow_db) 6221 return -ENOMEM; 6222 6223 mutex_init(&dev->flow_db->lock); 6224 6225 return 0; 6226 } 6227 6228 static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev) 6229 { 6230 kfree(dev->flow_db); 6231 } 6232 6233 static const struct ib_device_ops mlx5_ib_dev_ops = { 6234 .owner = THIS_MODULE, 6235 .driver_id = RDMA_DRIVER_MLX5, 6236 .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION, 6237 6238 .add_gid = mlx5_ib_add_gid, 6239 .alloc_mr = mlx5_ib_alloc_mr, 6240 .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity, 6241 .alloc_pd = mlx5_ib_alloc_pd, 6242 .alloc_ucontext = mlx5_ib_alloc_ucontext, 6243 .attach_mcast = mlx5_ib_mcg_attach, 6244 .check_mr_status = mlx5_ib_check_mr_status, 6245 .create_ah = mlx5_ib_create_ah, 6246 .create_counters = mlx5_ib_create_counters, 6247 .create_cq = mlx5_ib_create_cq, 6248 .create_flow = mlx5_ib_create_flow, 6249 .create_qp = mlx5_ib_create_qp, 6250 .create_srq = mlx5_ib_create_srq, 6251 .dealloc_pd = mlx5_ib_dealloc_pd, 6252 .dealloc_ucontext = mlx5_ib_dealloc_ucontext, 6253 .del_gid = mlx5_ib_del_gid, 6254 .dereg_mr = mlx5_ib_dereg_mr, 6255 .destroy_ah = mlx5_ib_destroy_ah, 6256 .destroy_counters = mlx5_ib_destroy_counters, 6257 .destroy_cq = mlx5_ib_destroy_cq, 6258 .destroy_flow = mlx5_ib_destroy_flow, 6259 .destroy_flow_action = mlx5_ib_destroy_flow_action, 6260 .destroy_qp = mlx5_ib_destroy_qp, 6261 .destroy_srq = mlx5_ib_destroy_srq, 6262 .detach_mcast = mlx5_ib_mcg_detach, 6263 .disassociate_ucontext = mlx5_ib_disassociate_ucontext, 6264 .drain_rq = mlx5_ib_drain_rq, 6265 .drain_sq = mlx5_ib_drain_sq, 6266 .enable_driver = mlx5_ib_enable_driver, 6267 .fill_res_entry = mlx5_ib_fill_res_entry, 6268 .fill_stat_entry = mlx5_ib_fill_stat_entry, 6269 .get_dev_fw_str = get_dev_fw_str, 6270 .get_dma_mr = mlx5_ib_get_dma_mr, 6271 .get_link_layer = mlx5_ib_port_link_layer, 6272 .map_mr_sg = mlx5_ib_map_mr_sg, 6273 .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi, 6274 .mmap = mlx5_ib_mmap, 6275 .mmap_free = mlx5_ib_mmap_free, 6276 .modify_cq = mlx5_ib_modify_cq, 6277 .modify_device = mlx5_ib_modify_device, 6278 .modify_port = mlx5_ib_modify_port, 6279 .modify_qp = mlx5_ib_modify_qp, 6280 .modify_srq = mlx5_ib_modify_srq, 6281 .poll_cq = mlx5_ib_poll_cq, 6282 .post_recv = mlx5_ib_post_recv, 6283 .post_send = mlx5_ib_post_send, 6284 .post_srq_recv = mlx5_ib_post_srq_recv, 6285 .process_mad = mlx5_ib_process_mad, 6286 .query_ah = mlx5_ib_query_ah, 6287 .query_device = mlx5_ib_query_device, 6288 .query_gid = mlx5_ib_query_gid, 6289 .query_pkey = mlx5_ib_query_pkey, 6290 .query_qp = mlx5_ib_query_qp, 6291 .query_srq = mlx5_ib_query_srq, 6292 .read_counters = mlx5_ib_read_counters, 6293 .reg_user_mr = mlx5_ib_reg_user_mr, 6294 .req_notify_cq = mlx5_ib_arm_cq, 6295 .rereg_user_mr = mlx5_ib_rereg_user_mr, 6296 .resize_cq = mlx5_ib_resize_cq, 6297 6298 INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah), 6299 INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq), 6300 INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), 6301 INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq), 6302 INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext), 6303 }; 6304 6305 static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = { 6306 .create_flow_action_esp = mlx5_ib_create_flow_action_esp, 6307 .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp, 6308 }; 6309 6310 static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = { 6311 .rdma_netdev_get_params = mlx5_ib_rn_get_params, 6312 }; 6313 6314 static const struct ib_device_ops mlx5_ib_dev_sriov_ops = { 6315 .get_vf_config = mlx5_ib_get_vf_config, 6316 .get_vf_guid = mlx5_ib_get_vf_guid, 6317 .get_vf_stats = mlx5_ib_get_vf_stats, 6318 .set_vf_guid = mlx5_ib_set_vf_guid, 6319 .set_vf_link_state = mlx5_ib_set_vf_link_state, 6320 }; 6321 6322 static const struct ib_device_ops mlx5_ib_dev_mw_ops = { 6323 .alloc_mw = mlx5_ib_alloc_mw, 6324 .dealloc_mw = mlx5_ib_dealloc_mw, 6325 }; 6326 6327 static const struct ib_device_ops mlx5_ib_dev_xrc_ops = { 6328 .alloc_xrcd = mlx5_ib_alloc_xrcd, 6329 .dealloc_xrcd = mlx5_ib_dealloc_xrcd, 6330 }; 6331 6332 static const struct ib_device_ops mlx5_ib_dev_dm_ops = { 6333 .alloc_dm = mlx5_ib_alloc_dm, 6334 .dealloc_dm = mlx5_ib_dealloc_dm, 6335 .reg_dm_mr = mlx5_ib_reg_dm_mr, 6336 }; 6337 6338 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) 6339 { 6340 struct mlx5_core_dev *mdev = dev->mdev; 6341 int err; 6342 6343 dev->ib_dev.uverbs_cmd_mask = 6344 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 6345 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 6346 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 6347 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 6348 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 6349 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 6350 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | 6351 (1ull << IB_USER_VERBS_CMD_REG_MR) | 6352 (1ull << IB_USER_VERBS_CMD_REREG_MR) | 6353 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 6354 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 6355 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 6356 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 6357 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 6358 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 6359 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 6360 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 6361 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 6362 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 6363 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 6364 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 6365 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 6366 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 6367 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 6368 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 6369 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 6370 dev->ib_dev.uverbs_ex_cmd_mask = 6371 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | 6372 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | 6373 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) | 6374 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) | 6375 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) | 6376 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | 6377 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); 6378 6379 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && 6380 IS_ENABLED(CONFIG_MLX5_CORE_IPOIB)) 6381 ib_set_device_ops(&dev->ib_dev, 6382 &mlx5_ib_dev_ipoib_enhanced_ops); 6383 6384 if (mlx5_core_is_pf(mdev)) 6385 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops); 6386 6387 dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); 6388 6389 if (MLX5_CAP_GEN(mdev, imaicl)) { 6390 dev->ib_dev.uverbs_cmd_mask |= 6391 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | 6392 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); 6393 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops); 6394 } 6395 6396 if (MLX5_CAP_GEN(mdev, xrc)) { 6397 dev->ib_dev.uverbs_cmd_mask |= 6398 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | 6399 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); 6400 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops); 6401 } 6402 6403 if (MLX5_CAP_DEV_MEM(mdev, memic) || 6404 MLX5_CAP_GEN_64(dev->mdev, general_obj_types) & 6405 MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) 6406 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops); 6407 6408 if (mlx5_accel_ipsec_device_caps(dev->mdev) & 6409 MLX5_ACCEL_IPSEC_CAP_DEVICE) 6410 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops); 6411 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops); 6412 6413 if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)) 6414 dev->ib_dev.driver_def = mlx5_ib_defs; 6415 6416 err = init_node_data(dev); 6417 if (err) 6418 return err; 6419 6420 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && 6421 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) || 6422 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 6423 mutex_init(&dev->lb.mutex); 6424 6425 dev->ib_dev.use_cq_dim = true; 6426 6427 return 0; 6428 } 6429 6430 static const struct ib_device_ops mlx5_ib_dev_port_ops = { 6431 .get_port_immutable = mlx5_port_immutable, 6432 .query_port = mlx5_ib_query_port, 6433 }; 6434 6435 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev) 6436 { 6437 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops); 6438 return 0; 6439 } 6440 6441 static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = { 6442 .get_port_immutable = mlx5_port_rep_immutable, 6443 .query_port = mlx5_ib_rep_query_port, 6444 }; 6445 6446 static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev) 6447 { 6448 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops); 6449 return 0; 6450 } 6451 6452 static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = { 6453 .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table, 6454 .create_wq = mlx5_ib_create_wq, 6455 .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table, 6456 .destroy_wq = mlx5_ib_destroy_wq, 6457 .get_netdev = mlx5_ib_get_netdev, 6458 .modify_wq = mlx5_ib_modify_wq, 6459 }; 6460 6461 static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev) 6462 { 6463 u8 port_num; 6464 6465 dev->ib_dev.uverbs_ex_cmd_mask |= 6466 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | 6467 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | 6468 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | 6469 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | 6470 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); 6471 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops); 6472 6473 port_num = mlx5_core_native_port_num(dev->mdev) - 1; 6474 6475 /* Register only for native ports */ 6476 return mlx5_add_netdev_notifier(dev, port_num); 6477 } 6478 6479 static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev) 6480 { 6481 u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1; 6482 6483 mlx5_remove_netdev_notifier(dev, port_num); 6484 } 6485 6486 static int mlx5_ib_stage_raw_eth_roce_init(struct mlx5_ib_dev *dev) 6487 { 6488 struct mlx5_core_dev *mdev = dev->mdev; 6489 enum rdma_link_layer ll; 6490 int port_type_cap; 6491 int err = 0; 6492 6493 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 6494 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 6495 6496 if (ll == IB_LINK_LAYER_ETHERNET) 6497 err = mlx5_ib_stage_common_roce_init(dev); 6498 6499 return err; 6500 } 6501 6502 static void mlx5_ib_stage_raw_eth_roce_cleanup(struct mlx5_ib_dev *dev) 6503 { 6504 mlx5_ib_stage_common_roce_cleanup(dev); 6505 } 6506 6507 static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev) 6508 { 6509 struct mlx5_core_dev *mdev = dev->mdev; 6510 enum rdma_link_layer ll; 6511 int port_type_cap; 6512 int err; 6513 6514 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 6515 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 6516 6517 if (ll == IB_LINK_LAYER_ETHERNET) { 6518 err = mlx5_ib_stage_common_roce_init(dev); 6519 if (err) 6520 return err; 6521 6522 err = mlx5_enable_eth(dev); 6523 if (err) 6524 goto cleanup; 6525 } 6526 6527 return 0; 6528 cleanup: 6529 mlx5_ib_stage_common_roce_cleanup(dev); 6530 6531 return err; 6532 } 6533 6534 static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev) 6535 { 6536 struct mlx5_core_dev *mdev = dev->mdev; 6537 enum rdma_link_layer ll; 6538 int port_type_cap; 6539 6540 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 6541 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 6542 6543 if (ll == IB_LINK_LAYER_ETHERNET) { 6544 mlx5_disable_eth(dev); 6545 mlx5_ib_stage_common_roce_cleanup(dev); 6546 } 6547 } 6548 6549 static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev) 6550 { 6551 return create_dev_resources(&dev->devr); 6552 } 6553 6554 static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev) 6555 { 6556 destroy_dev_resources(&dev->devr); 6557 } 6558 6559 static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) 6560 { 6561 return mlx5_ib_odp_init_one(dev); 6562 } 6563 6564 static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev) 6565 { 6566 mlx5_ib_odp_cleanup_one(dev); 6567 } 6568 6569 static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = { 6570 .alloc_hw_stats = mlx5_ib_alloc_hw_stats, 6571 .get_hw_stats = mlx5_ib_get_hw_stats, 6572 .counter_bind_qp = mlx5_ib_counter_bind_qp, 6573 .counter_unbind_qp = mlx5_ib_counter_unbind_qp, 6574 .counter_dealloc = mlx5_ib_counter_dealloc, 6575 .counter_alloc_stats = mlx5_ib_counter_alloc_stats, 6576 .counter_update_stats = mlx5_ib_counter_update_stats, 6577 }; 6578 6579 static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev) 6580 { 6581 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) { 6582 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops); 6583 6584 return mlx5_ib_alloc_counters(dev); 6585 } 6586 6587 return 0; 6588 } 6589 6590 static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev) 6591 { 6592 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) 6593 mlx5_ib_dealloc_counters(dev); 6594 } 6595 6596 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev) 6597 { 6598 mlx5_ib_init_cong_debugfs(dev, 6599 mlx5_core_native_port_num(dev->mdev) - 1); 6600 return 0; 6601 } 6602 6603 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev) 6604 { 6605 mlx5_ib_cleanup_cong_debugfs(dev, 6606 mlx5_core_native_port_num(dev->mdev) - 1); 6607 } 6608 6609 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev) 6610 { 6611 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); 6612 return PTR_ERR_OR_ZERO(dev->mdev->priv.uar); 6613 } 6614 6615 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev) 6616 { 6617 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); 6618 } 6619 6620 static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) 6621 { 6622 int err; 6623 6624 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false); 6625 if (err) 6626 return err; 6627 6628 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true); 6629 if (err) 6630 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); 6631 6632 return err; 6633 } 6634 6635 static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) 6636 { 6637 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); 6638 mlx5_free_bfreg(dev->mdev, &dev->bfreg); 6639 } 6640 6641 static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) 6642 { 6643 const char *name; 6644 6645 rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group); 6646 if (!mlx5_lag_is_roce(dev->mdev)) 6647 name = "mlx5_%d"; 6648 else 6649 name = "mlx5_bond_%d"; 6650 return ib_register_device(&dev->ib_dev, name); 6651 } 6652 6653 static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) 6654 { 6655 destroy_umrc_res(dev); 6656 } 6657 6658 static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) 6659 { 6660 ib_unregister_device(&dev->ib_dev); 6661 } 6662 6663 static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) 6664 { 6665 return create_umr_res(dev); 6666 } 6667 6668 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) 6669 { 6670 init_delay_drop(dev); 6671 6672 return 0; 6673 } 6674 6675 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev) 6676 { 6677 cancel_delay_drop(dev); 6678 } 6679 6680 static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev) 6681 { 6682 dev->mdev_events.notifier_call = mlx5_ib_event; 6683 mlx5_notifier_register(dev->mdev, &dev->mdev_events); 6684 return 0; 6685 } 6686 6687 static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev) 6688 { 6689 mlx5_notifier_unregister(dev->mdev, &dev->mdev_events); 6690 } 6691 6692 static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev) 6693 { 6694 int uid; 6695 6696 uid = mlx5_ib_devx_create(dev, false); 6697 if (uid > 0) { 6698 dev->devx_whitelist_uid = uid; 6699 mlx5_ib_devx_init_event_table(dev); 6700 } 6701 6702 return 0; 6703 } 6704 static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev) 6705 { 6706 if (dev->devx_whitelist_uid) { 6707 mlx5_ib_devx_cleanup_event_table(dev); 6708 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid); 6709 } 6710 } 6711 6712 int mlx5_ib_enable_driver(struct ib_device *dev) 6713 { 6714 struct mlx5_ib_dev *mdev = to_mdev(dev); 6715 int ret; 6716 6717 ret = mlx5_ib_test_wc(mdev); 6718 mlx5_ib_dbg(mdev, "Write-Combining %s", 6719 mdev->wc_support ? "supported" : "not supported"); 6720 6721 return ret; 6722 } 6723 6724 void __mlx5_ib_remove(struct mlx5_ib_dev *dev, 6725 const struct mlx5_ib_profile *profile, 6726 int stage) 6727 { 6728 /* Number of stages to cleanup */ 6729 while (stage) { 6730 stage--; 6731 if (profile->stage[stage].cleanup) 6732 profile->stage[stage].cleanup(dev); 6733 } 6734 6735 kfree(dev->port); 6736 ib_dealloc_device(&dev->ib_dev); 6737 } 6738 6739 void *__mlx5_ib_add(struct mlx5_ib_dev *dev, 6740 const struct mlx5_ib_profile *profile) 6741 { 6742 int err; 6743 int i; 6744 6745 for (i = 0; i < MLX5_IB_STAGE_MAX; i++) { 6746 if (profile->stage[i].init) { 6747 err = profile->stage[i].init(dev); 6748 if (err) 6749 goto err_out; 6750 } 6751 } 6752 6753 dev->profile = profile; 6754 dev->ib_active = true; 6755 6756 return dev; 6757 6758 err_out: 6759 __mlx5_ib_remove(dev, profile, i); 6760 6761 return NULL; 6762 } 6763 6764 static const struct mlx5_ib_profile pf_profile = { 6765 STAGE_CREATE(MLX5_IB_STAGE_INIT, 6766 mlx5_ib_stage_init_init, 6767 mlx5_ib_stage_init_cleanup), 6768 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB, 6769 mlx5_ib_stage_flow_db_init, 6770 mlx5_ib_stage_flow_db_cleanup), 6771 STAGE_CREATE(MLX5_IB_STAGE_CAPS, 6772 mlx5_ib_stage_caps_init, 6773 NULL), 6774 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, 6775 mlx5_ib_stage_non_default_cb, 6776 NULL), 6777 STAGE_CREATE(MLX5_IB_STAGE_ROCE, 6778 mlx5_ib_stage_roce_init, 6779 mlx5_ib_stage_roce_cleanup), 6780 STAGE_CREATE(MLX5_IB_STAGE_SRQ, 6781 mlx5_init_srq_table, 6782 mlx5_cleanup_srq_table), 6783 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, 6784 mlx5_ib_stage_dev_res_init, 6785 mlx5_ib_stage_dev_res_cleanup), 6786 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, 6787 mlx5_ib_stage_dev_notifier_init, 6788 mlx5_ib_stage_dev_notifier_cleanup), 6789 STAGE_CREATE(MLX5_IB_STAGE_ODP, 6790 mlx5_ib_stage_odp_init, 6791 mlx5_ib_stage_odp_cleanup), 6792 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, 6793 mlx5_ib_stage_counters_init, 6794 mlx5_ib_stage_counters_cleanup), 6795 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, 6796 mlx5_ib_stage_cong_debugfs_init, 6797 mlx5_ib_stage_cong_debugfs_cleanup), 6798 STAGE_CREATE(MLX5_IB_STAGE_UAR, 6799 mlx5_ib_stage_uar_init, 6800 mlx5_ib_stage_uar_cleanup), 6801 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 6802 mlx5_ib_stage_bfrag_init, 6803 mlx5_ib_stage_bfrag_cleanup), 6804 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, 6805 NULL, 6806 mlx5_ib_stage_pre_ib_reg_umr_cleanup), 6807 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID, 6808 mlx5_ib_stage_devx_init, 6809 mlx5_ib_stage_devx_cleanup), 6810 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 6811 mlx5_ib_stage_ib_reg_init, 6812 mlx5_ib_stage_ib_reg_cleanup), 6813 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, 6814 mlx5_ib_stage_post_ib_reg_umr_init, 6815 NULL), 6816 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, 6817 mlx5_ib_stage_delay_drop_init, 6818 mlx5_ib_stage_delay_drop_cleanup), 6819 }; 6820 6821 const struct mlx5_ib_profile raw_eth_profile = { 6822 STAGE_CREATE(MLX5_IB_STAGE_INIT, 6823 mlx5_ib_stage_init_init, 6824 mlx5_ib_stage_init_cleanup), 6825 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB, 6826 mlx5_ib_stage_flow_db_init, 6827 mlx5_ib_stage_flow_db_cleanup), 6828 STAGE_CREATE(MLX5_IB_STAGE_CAPS, 6829 mlx5_ib_stage_caps_init, 6830 NULL), 6831 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, 6832 mlx5_ib_stage_raw_eth_non_default_cb, 6833 NULL), 6834 STAGE_CREATE(MLX5_IB_STAGE_ROCE, 6835 mlx5_ib_stage_raw_eth_roce_init, 6836 mlx5_ib_stage_raw_eth_roce_cleanup), 6837 STAGE_CREATE(MLX5_IB_STAGE_SRQ, 6838 mlx5_init_srq_table, 6839 mlx5_cleanup_srq_table), 6840 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, 6841 mlx5_ib_stage_dev_res_init, 6842 mlx5_ib_stage_dev_res_cleanup), 6843 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, 6844 mlx5_ib_stage_dev_notifier_init, 6845 mlx5_ib_stage_dev_notifier_cleanup), 6846 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, 6847 mlx5_ib_stage_counters_init, 6848 mlx5_ib_stage_counters_cleanup), 6849 STAGE_CREATE(MLX5_IB_STAGE_UAR, 6850 mlx5_ib_stage_uar_init, 6851 mlx5_ib_stage_uar_cleanup), 6852 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 6853 mlx5_ib_stage_bfrag_init, 6854 mlx5_ib_stage_bfrag_cleanup), 6855 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, 6856 NULL, 6857 mlx5_ib_stage_pre_ib_reg_umr_cleanup), 6858 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID, 6859 mlx5_ib_stage_devx_init, 6860 mlx5_ib_stage_devx_cleanup), 6861 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 6862 mlx5_ib_stage_ib_reg_init, 6863 mlx5_ib_stage_ib_reg_cleanup), 6864 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, 6865 mlx5_ib_stage_post_ib_reg_umr_init, 6866 NULL), 6867 }; 6868 6869 static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev) 6870 { 6871 struct mlx5_ib_multiport_info *mpi; 6872 struct mlx5_ib_dev *dev; 6873 bool bound = false; 6874 int err; 6875 6876 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL); 6877 if (!mpi) 6878 return NULL; 6879 6880 mpi->mdev = mdev; 6881 6882 err = mlx5_query_nic_vport_system_image_guid(mdev, 6883 &mpi->sys_image_guid); 6884 if (err) { 6885 kfree(mpi); 6886 return NULL; 6887 } 6888 6889 mutex_lock(&mlx5_ib_multiport_mutex); 6890 list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) { 6891 if (dev->sys_image_guid == mpi->sys_image_guid) 6892 bound = mlx5_ib_bind_slave_port(dev, mpi); 6893 6894 if (bound) { 6895 rdma_roce_rescan_device(&dev->ib_dev); 6896 break; 6897 } 6898 } 6899 6900 if (!bound) { 6901 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); 6902 dev_dbg(mdev->device, 6903 "no suitable IB device found to bind to, added to unaffiliated list.\n"); 6904 } 6905 mutex_unlock(&mlx5_ib_multiport_mutex); 6906 6907 return mpi; 6908 } 6909 6910 static void *mlx5_ib_add(struct mlx5_core_dev *mdev) 6911 { 6912 const struct mlx5_ib_profile *profile; 6913 enum rdma_link_layer ll; 6914 struct mlx5_ib_dev *dev; 6915 int port_type_cap; 6916 int num_ports; 6917 6918 printk_once(KERN_INFO "%s", mlx5_version); 6919 6920 if (MLX5_ESWITCH_MANAGER(mdev) && 6921 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) { 6922 if (!mlx5_core_mp_enabled(mdev)) 6923 mlx5_ib_register_vport_reps(mdev); 6924 return mdev; 6925 } 6926 6927 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 6928 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 6929 6930 if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) 6931 return mlx5_ib_add_slave_port(mdev); 6932 6933 num_ports = max(MLX5_CAP_GEN(mdev, num_ports), 6934 MLX5_CAP_GEN(mdev, num_vhca_ports)); 6935 dev = ib_alloc_device(mlx5_ib_dev, ib_dev); 6936 if (!dev) 6937 return NULL; 6938 dev->port = kcalloc(num_ports, sizeof(*dev->port), 6939 GFP_KERNEL); 6940 if (!dev->port) { 6941 ib_dealloc_device(&dev->ib_dev); 6942 return NULL; 6943 } 6944 6945 dev->mdev = mdev; 6946 dev->num_ports = num_ports; 6947 6948 if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev)) 6949 profile = &raw_eth_profile; 6950 else 6951 profile = &pf_profile; 6952 6953 return __mlx5_ib_add(dev, profile); 6954 } 6955 6956 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) 6957 { 6958 struct mlx5_ib_multiport_info *mpi; 6959 struct mlx5_ib_dev *dev; 6960 6961 if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) { 6962 mlx5_ib_unregister_vport_reps(mdev); 6963 return; 6964 } 6965 6966 if (mlx5_core_is_mp_slave(mdev)) { 6967 mpi = context; 6968 mutex_lock(&mlx5_ib_multiport_mutex); 6969 if (mpi->ibdev) 6970 mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); 6971 list_del(&mpi->list); 6972 mutex_unlock(&mlx5_ib_multiport_mutex); 6973 kfree(mpi); 6974 return; 6975 } 6976 6977 dev = context; 6978 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); 6979 } 6980 6981 static struct mlx5_interface mlx5_ib_interface = { 6982 .add = mlx5_ib_add, 6983 .remove = mlx5_ib_remove, 6984 .protocol = MLX5_INTERFACE_PROTOCOL_IB, 6985 }; 6986 6987 unsigned long mlx5_ib_get_xlt_emergency_page(void) 6988 { 6989 mutex_lock(&xlt_emergency_page_mutex); 6990 return xlt_emergency_page; 6991 } 6992 6993 void mlx5_ib_put_xlt_emergency_page(void) 6994 { 6995 mutex_unlock(&xlt_emergency_page_mutex); 6996 } 6997 6998 static int __init mlx5_ib_init(void) 6999 { 7000 int err; 7001 7002 xlt_emergency_page = __get_free_page(GFP_KERNEL); 7003 if (!xlt_emergency_page) 7004 return -ENOMEM; 7005 7006 mutex_init(&xlt_emergency_page_mutex); 7007 7008 mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0); 7009 if (!mlx5_ib_event_wq) { 7010 free_page(xlt_emergency_page); 7011 return -ENOMEM; 7012 } 7013 7014 mlx5_ib_odp_init(); 7015 7016 err = mlx5_register_interface(&mlx5_ib_interface); 7017 7018 return err; 7019 } 7020 7021 static void __exit mlx5_ib_cleanup(void) 7022 { 7023 mlx5_unregister_interface(&mlx5_ib_interface); 7024 destroy_workqueue(mlx5_ib_event_wq); 7025 mutex_destroy(&xlt_emergency_page_mutex); 7026 free_page(xlt_emergency_page); 7027 } 7028 7029 module_init(mlx5_ib_init); 7030 module_exit(mlx5_ib_cleanup); 7031