1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/init.h> 36 #include <linux/slab.h> 37 #include <linux/errno.h> 38 #include <linux/netdevice.h> 39 #include <linux/inetdevice.h> 40 #include <linux/rtnetlink.h> 41 #include <linux/if_vlan.h> 42 #include <linux/sched/mm.h> 43 #include <linux/sched/task.h> 44 45 #include <net/ipv6.h> 46 #include <net/addrconf.h> 47 #include <net/devlink.h> 48 49 #include <rdma/ib_smi.h> 50 #include <rdma/ib_user_verbs.h> 51 #include <rdma/ib_addr.h> 52 #include <rdma/ib_cache.h> 53 54 #include <net/bonding.h> 55 56 #include <linux/mlx4/driver.h> 57 #include <linux/mlx4/cmd.h> 58 #include <linux/mlx4/qp.h> 59 60 #include "mlx4_ib.h" 61 #include <rdma/mlx4-abi.h> 62 63 #define DRV_NAME MLX4_IB_DRV_NAME 64 #define DRV_VERSION "4.0-0" 65 66 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF 67 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF 68 #define MLX4_IB_CARD_REV_A0 0xA0 69 70 MODULE_AUTHOR("Roland Dreier"); 71 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); 72 MODULE_LICENSE("Dual BSD/GPL"); 73 74 int mlx4_ib_sm_guid_assign = 0; 75 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444); 76 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)"); 77 78 static const char mlx4_ib_version[] = 79 DRV_NAME ": Mellanox ConnectX InfiniBand driver v" 80 DRV_VERSION "\n"; 81 82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init); 83 static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device, 84 u8 port_num); 85 86 static struct workqueue_struct *wq; 87 88 static void init_query_mad(struct ib_smp *mad) 89 { 90 mad->base_version = 1; 91 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 92 mad->class_version = 1; 93 mad->method = IB_MGMT_METHOD_GET; 94 } 95 96 static int check_flow_steering_support(struct mlx4_dev *dev) 97 { 98 int eth_num_ports = 0; 99 int ib_num_ports = 0; 100 101 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED; 102 103 if (dmfs) { 104 int i; 105 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) 106 eth_num_ports++; 107 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 108 ib_num_ports++; 109 dmfs &= (!ib_num_ports || 110 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) && 111 (!eth_num_ports || 112 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)); 113 if (ib_num_ports && mlx4_is_mfunc(dev)) { 114 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n"); 115 dmfs = 0; 116 } 117 } 118 return dmfs; 119 } 120 121 static int num_ib_ports(struct mlx4_dev *dev) 122 { 123 int ib_ports = 0; 124 int i; 125 126 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 127 ib_ports++; 128 129 return ib_ports; 130 } 131 132 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num) 133 { 134 struct mlx4_ib_dev *ibdev = to_mdev(device); 135 struct net_device *dev; 136 137 rcu_read_lock(); 138 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num); 139 140 if (dev) { 141 if (mlx4_is_bonded(ibdev->dev)) { 142 struct net_device *upper = NULL; 143 144 upper = netdev_master_upper_dev_get_rcu(dev); 145 if (upper) { 146 struct net_device *active; 147 148 active = bond_option_active_slave_get_rcu(netdev_priv(upper)); 149 if (active) 150 dev = active; 151 } 152 } 153 } 154 if (dev) 155 dev_hold(dev); 156 157 rcu_read_unlock(); 158 return dev; 159 } 160 161 static int mlx4_ib_update_gids_v1(struct gid_entry *gids, 162 struct mlx4_ib_dev *ibdev, 163 u8 port_num) 164 { 165 struct mlx4_cmd_mailbox *mailbox; 166 int err; 167 struct mlx4_dev *dev = ibdev->dev; 168 int i; 169 union ib_gid *gid_tbl; 170 171 mailbox = mlx4_alloc_cmd_mailbox(dev); 172 if (IS_ERR(mailbox)) 173 return -ENOMEM; 174 175 gid_tbl = mailbox->buf; 176 177 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) 178 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid)); 179 180 err = mlx4_cmd(dev, mailbox->dma, 181 MLX4_SET_PORT_GID_TABLE << 8 | port_num, 182 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 183 MLX4_CMD_WRAPPED); 184 if (mlx4_is_bonded(dev)) 185 err += mlx4_cmd(dev, mailbox->dma, 186 MLX4_SET_PORT_GID_TABLE << 8 | 2, 187 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 188 MLX4_CMD_WRAPPED); 189 190 mlx4_free_cmd_mailbox(dev, mailbox); 191 return err; 192 } 193 194 static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, 195 struct mlx4_ib_dev *ibdev, 196 u8 port_num) 197 { 198 struct mlx4_cmd_mailbox *mailbox; 199 int err; 200 struct mlx4_dev *dev = ibdev->dev; 201 int i; 202 struct { 203 union ib_gid gid; 204 __be32 rsrvd1[2]; 205 __be16 rsrvd2; 206 u8 type; 207 u8 version; 208 __be32 rsrvd3; 209 } *gid_tbl; 210 211 mailbox = mlx4_alloc_cmd_mailbox(dev); 212 if (IS_ERR(mailbox)) 213 return -ENOMEM; 214 215 gid_tbl = mailbox->buf; 216 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) { 217 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid)); 218 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { 219 gid_tbl[i].version = 2; 220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) 221 gid_tbl[i].type = 1; 222 else 223 memset(&gid_tbl[i].gid, 0, 12); 224 } 225 } 226 227 err = mlx4_cmd(dev, mailbox->dma, 228 MLX4_SET_PORT_ROCE_ADDR << 8 | port_num, 229 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 230 MLX4_CMD_WRAPPED); 231 if (mlx4_is_bonded(dev)) 232 err += mlx4_cmd(dev, mailbox->dma, 233 MLX4_SET_PORT_ROCE_ADDR << 8 | 2, 234 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 235 MLX4_CMD_WRAPPED); 236 237 mlx4_free_cmd_mailbox(dev, mailbox); 238 return err; 239 } 240 241 static int mlx4_ib_update_gids(struct gid_entry *gids, 242 struct mlx4_ib_dev *ibdev, 243 u8 port_num) 244 { 245 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) 246 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num); 247 248 return mlx4_ib_update_gids_v1(gids, ibdev, port_num); 249 } 250 251 static int mlx4_ib_add_gid(struct ib_device *device, 252 u8 port_num, 253 unsigned int index, 254 const union ib_gid *gid, 255 const struct ib_gid_attr *attr, 256 void **context) 257 { 258 struct mlx4_ib_dev *ibdev = to_mdev(device); 259 struct mlx4_ib_iboe *iboe = &ibdev->iboe; 260 struct mlx4_port_gid_table *port_gid_table; 261 int free = -1, found = -1; 262 int ret = 0; 263 int hw_update = 0; 264 int i; 265 struct gid_entry *gids = NULL; 266 267 if (!rdma_cap_roce_gid_table(device, port_num)) 268 return -EINVAL; 269 270 if (port_num > MLX4_MAX_PORTS) 271 return -EINVAL; 272 273 if (!context) 274 return -EINVAL; 275 276 port_gid_table = &iboe->gids[port_num - 1]; 277 spin_lock_bh(&iboe->lock); 278 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) { 279 if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) && 280 (port_gid_table->gids[i].gid_type == attr->gid_type)) { 281 found = i; 282 break; 283 } 284 if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid))) 285 free = i; /* HW has space */ 286 } 287 288 if (found < 0) { 289 if (free < 0) { 290 ret = -ENOSPC; 291 } else { 292 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC); 293 if (!port_gid_table->gids[free].ctx) { 294 ret = -ENOMEM; 295 } else { 296 *context = port_gid_table->gids[free].ctx; 297 memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid)); 298 port_gid_table->gids[free].gid_type = attr->gid_type; 299 port_gid_table->gids[free].ctx->real_index = free; 300 port_gid_table->gids[free].ctx->refcount = 1; 301 hw_update = 1; 302 } 303 } 304 } else { 305 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx; 306 *context = ctx; 307 ctx->refcount++; 308 } 309 if (!ret && hw_update) { 310 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC); 311 if (!gids) { 312 ret = -ENOMEM; 313 } else { 314 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { 315 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); 316 gids[i].gid_type = port_gid_table->gids[i].gid_type; 317 } 318 } 319 } 320 spin_unlock_bh(&iboe->lock); 321 322 if (!ret && hw_update) { 323 ret = mlx4_ib_update_gids(gids, ibdev, port_num); 324 kfree(gids); 325 } 326 327 return ret; 328 } 329 330 static int mlx4_ib_del_gid(struct ib_device *device, 331 u8 port_num, 332 unsigned int index, 333 void **context) 334 { 335 struct gid_cache_context *ctx = *context; 336 struct mlx4_ib_dev *ibdev = to_mdev(device); 337 struct mlx4_ib_iboe *iboe = &ibdev->iboe; 338 struct mlx4_port_gid_table *port_gid_table; 339 int ret = 0; 340 int hw_update = 0; 341 struct gid_entry *gids = NULL; 342 343 if (!rdma_cap_roce_gid_table(device, port_num)) 344 return -EINVAL; 345 346 if (port_num > MLX4_MAX_PORTS) 347 return -EINVAL; 348 349 port_gid_table = &iboe->gids[port_num - 1]; 350 spin_lock_bh(&iboe->lock); 351 if (ctx) { 352 ctx->refcount--; 353 if (!ctx->refcount) { 354 unsigned int real_index = ctx->real_index; 355 356 memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid)); 357 kfree(port_gid_table->gids[real_index].ctx); 358 port_gid_table->gids[real_index].ctx = NULL; 359 hw_update = 1; 360 } 361 } 362 if (!ret && hw_update) { 363 int i; 364 365 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC); 366 if (!gids) { 367 ret = -ENOMEM; 368 } else { 369 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) 370 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); 371 } 372 } 373 spin_unlock_bh(&iboe->lock); 374 375 if (!ret && hw_update) { 376 ret = mlx4_ib_update_gids(gids, ibdev, port_num); 377 kfree(gids); 378 } 379 return ret; 380 } 381 382 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev, 383 u8 port_num, int index) 384 { 385 struct mlx4_ib_iboe *iboe = &ibdev->iboe; 386 struct gid_cache_context *ctx = NULL; 387 union ib_gid gid; 388 struct mlx4_port_gid_table *port_gid_table; 389 int real_index = -EINVAL; 390 int i; 391 int ret; 392 unsigned long flags; 393 struct ib_gid_attr attr; 394 395 if (port_num > MLX4_MAX_PORTS) 396 return -EINVAL; 397 398 if (mlx4_is_bonded(ibdev->dev)) 399 port_num = 1; 400 401 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num)) 402 return index; 403 404 ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr); 405 if (ret) 406 return ret; 407 408 if (attr.ndev) 409 dev_put(attr.ndev); 410 411 if (!memcmp(&gid, &zgid, sizeof(gid))) 412 return -EINVAL; 413 414 spin_lock_irqsave(&iboe->lock, flags); 415 port_gid_table = &iboe->gids[port_num - 1]; 416 417 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) 418 if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid)) && 419 attr.gid_type == port_gid_table->gids[i].gid_type) { 420 ctx = port_gid_table->gids[i].ctx; 421 break; 422 } 423 if (ctx) 424 real_index = ctx->real_index; 425 spin_unlock_irqrestore(&iboe->lock, flags); 426 return real_index; 427 } 428 429 static int mlx4_ib_query_device(struct ib_device *ibdev, 430 struct ib_device_attr *props, 431 struct ib_udata *uhw) 432 { 433 struct mlx4_ib_dev *dev = to_mdev(ibdev); 434 struct ib_smp *in_mad = NULL; 435 struct ib_smp *out_mad = NULL; 436 int err; 437 int have_ib_ports; 438 struct mlx4_uverbs_ex_query_device cmd; 439 struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0}; 440 struct mlx4_clock_params clock_params; 441 442 if (uhw->inlen) { 443 if (uhw->inlen < sizeof(cmd)) 444 return -EINVAL; 445 446 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd)); 447 if (err) 448 return err; 449 450 if (cmd.comp_mask) 451 return -EINVAL; 452 453 if (cmd.reserved) 454 return -EINVAL; 455 } 456 457 resp.response_length = offsetof(typeof(resp), response_length) + 458 sizeof(resp.response_length); 459 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 460 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 461 err = -ENOMEM; 462 if (!in_mad || !out_mad) 463 goto out; 464 465 init_query_mad(in_mad); 466 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 467 468 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS, 469 1, NULL, NULL, in_mad, out_mad); 470 if (err) 471 goto out; 472 473 memset(props, 0, sizeof *props); 474 475 have_ib_ports = num_ib_ports(dev->dev); 476 477 props->fw_ver = dev->dev->caps.fw_ver; 478 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 479 IB_DEVICE_PORT_ACTIVE_EVENT | 480 IB_DEVICE_SYS_IMAGE_GUID | 481 IB_DEVICE_RC_RNR_NAK_GEN | 482 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 483 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR) 484 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 485 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) 486 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 487 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports) 488 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 489 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) 490 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; 491 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 492 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 493 if (dev->dev->caps.max_gso_sz && 494 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && 495 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) 496 props->device_cap_flags |= IB_DEVICE_UD_TSO; 497 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) 498 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; 499 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) && 500 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && 501 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) 502 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 503 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) 504 props->device_cap_flags |= IB_DEVICE_XRC; 505 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW) 506 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW; 507 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) { 508 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B) 509 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; 510 else 511 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; 512 } 513 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) 514 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; 515 516 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; 517 518 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 519 0xffffff; 520 props->vendor_part_id = dev->dev->persist->pdev->device; 521 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); 522 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 523 524 props->max_mr_size = ~0ull; 525 props->page_size_cap = dev->dev->caps.page_size_cap; 526 props->max_qp = dev->dev->quotas.qp; 527 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; 528 props->max_sge = min(dev->dev->caps.max_sq_sg, 529 dev->dev->caps.max_rq_sg); 530 props->max_sge_rd = MLX4_MAX_SGE_RD; 531 props->max_cq = dev->dev->quotas.cq; 532 props->max_cqe = dev->dev->caps.max_cqes; 533 props->max_mr = dev->dev->quotas.mpt; 534 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds; 535 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma; 536 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma; 537 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 538 props->max_srq = dev->dev->quotas.srq; 539 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; 540 props->max_srq_sge = dev->dev->caps.max_srq_sge; 541 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES; 542 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; 543 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? 544 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 545 props->masked_atomic_cap = props->atomic_cap; 546 props->max_pkeys = dev->dev->caps.pkey_table_len[1]; 547 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; 548 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; 549 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 550 props->max_mcast_grp; 551 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps; 552 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; 553 props->timestamp_mask = 0xFFFFFFFFFFFFULL; 554 props->max_ah = INT_MAX; 555 556 if ((dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) && 557 (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET || 558 mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET)) { 559 props->rss_caps.max_rwq_indirection_tables = props->max_qp; 560 props->rss_caps.max_rwq_indirection_table_size = 561 dev->dev->caps.max_rss_tbl_sz; 562 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; 563 props->max_wq_type_rq = props->max_qp; 564 } 565 566 props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT; 567 props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD; 568 569 if (!mlx4_is_slave(dev->dev)) 570 err = mlx4_get_internal_clock_params(dev->dev, &clock_params); 571 572 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { 573 resp.response_length += sizeof(resp.hca_core_clock_offset); 574 if (!err && !mlx4_is_slave(dev->dev)) { 575 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; 576 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; 577 } 578 } 579 580 if (uhw->outlen >= resp.response_length + 581 sizeof(resp.max_inl_recv_sz)) { 582 resp.response_length += sizeof(resp.max_inl_recv_sz); 583 resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg * 584 sizeof(struct mlx4_wqe_data_seg); 585 } 586 587 if (uhw->outlen >= resp.response_length + sizeof(resp.rss_caps)) { 588 resp.response_length += sizeof(resp.rss_caps); 589 if (props->rss_caps.supported_qpts) { 590 resp.rss_caps.rx_hash_function = 591 MLX4_IB_RX_HASH_FUNC_TOEPLITZ; 592 resp.rss_caps.rx_hash_fields_mask = 593 MLX4_IB_RX_HASH_SRC_IPV4 | 594 MLX4_IB_RX_HASH_DST_IPV4 | 595 MLX4_IB_RX_HASH_SRC_IPV6 | 596 MLX4_IB_RX_HASH_DST_IPV6 | 597 MLX4_IB_RX_HASH_SRC_PORT_TCP | 598 MLX4_IB_RX_HASH_DST_PORT_TCP | 599 MLX4_IB_RX_HASH_SRC_PORT_UDP | 600 MLX4_IB_RX_HASH_DST_PORT_UDP; 601 } 602 } 603 604 if (uhw->outlen) { 605 err = ib_copy_to_udata(uhw, &resp, resp.response_length); 606 if (err) 607 goto out; 608 } 609 out: 610 kfree(in_mad); 611 kfree(out_mad); 612 613 return err; 614 } 615 616 static enum rdma_link_layer 617 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num) 618 { 619 struct mlx4_dev *dev = to_mdev(device)->dev; 620 621 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ? 622 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; 623 } 624 625 static int ib_link_query_port(struct ib_device *ibdev, u8 port, 626 struct ib_port_attr *props, int netw_view) 627 { 628 struct ib_smp *in_mad = NULL; 629 struct ib_smp *out_mad = NULL; 630 int ext_active_speed; 631 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; 632 int err = -ENOMEM; 633 634 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 635 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 636 if (!in_mad || !out_mad) 637 goto out; 638 639 init_query_mad(in_mad); 640 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 641 in_mad->attr_mod = cpu_to_be32(port); 642 643 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) 644 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; 645 646 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, 647 in_mad, out_mad); 648 if (err) 649 goto out; 650 651 652 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); 653 props->lmc = out_mad->data[34] & 0x7; 654 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); 655 props->sm_sl = out_mad->data[36] & 0xf; 656 props->state = out_mad->data[32] & 0xf; 657 props->phys_state = out_mad->data[33] >> 4; 658 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); 659 if (netw_view) 660 props->gid_tbl_len = out_mad->data[50]; 661 else 662 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; 663 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; 664 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port]; 665 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); 666 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); 667 props->active_width = out_mad->data[31] & 0xf; 668 props->active_speed = out_mad->data[35] >> 4; 669 props->max_mtu = out_mad->data[41] & 0xf; 670 props->active_mtu = out_mad->data[36] >> 4; 671 props->subnet_timeout = out_mad->data[51] & 0x1f; 672 props->max_vl_num = out_mad->data[37] >> 4; 673 props->init_type_reply = out_mad->data[41] >> 4; 674 675 /* Check if extended speeds (EDR/FDR/...) are supported */ 676 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { 677 ext_active_speed = out_mad->data[62] >> 4; 678 679 switch (ext_active_speed) { 680 case 1: 681 props->active_speed = IB_SPEED_FDR; 682 break; 683 case 2: 684 props->active_speed = IB_SPEED_EDR; 685 break; 686 } 687 } 688 689 /* If reported active speed is QDR, check if is FDR-10 */ 690 if (props->active_speed == IB_SPEED_QDR) { 691 init_query_mad(in_mad); 692 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; 693 in_mad->attr_mod = cpu_to_be32(port); 694 695 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, 696 NULL, NULL, in_mad, out_mad); 697 if (err) 698 goto out; 699 700 /* Checking LinkSpeedActive for FDR-10 */ 701 if (out_mad->data[15] & 0x1) 702 props->active_speed = IB_SPEED_FDR10; 703 } 704 705 /* Avoid wrong speed value returned by FW if the IB link is down. */ 706 if (props->state == IB_PORT_DOWN) 707 props->active_speed = IB_SPEED_SDR; 708 709 out: 710 kfree(in_mad); 711 kfree(out_mad); 712 return err; 713 } 714 715 static u8 state_to_phys_state(enum ib_port_state state) 716 { 717 return state == IB_PORT_ACTIVE ? 5 : 3; 718 } 719 720 static int eth_link_query_port(struct ib_device *ibdev, u8 port, 721 struct ib_port_attr *props) 722 { 723 724 struct mlx4_ib_dev *mdev = to_mdev(ibdev); 725 struct mlx4_ib_iboe *iboe = &mdev->iboe; 726 struct net_device *ndev; 727 enum ib_mtu tmp; 728 struct mlx4_cmd_mailbox *mailbox; 729 int err = 0; 730 int is_bonded = mlx4_is_bonded(mdev->dev); 731 732 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 733 if (IS_ERR(mailbox)) 734 return PTR_ERR(mailbox); 735 736 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, 737 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 738 MLX4_CMD_WRAPPED); 739 if (err) 740 goto out; 741 742 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) || 743 (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? 744 IB_WIDTH_4X : IB_WIDTH_1X; 745 props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? 746 IB_SPEED_FDR : IB_SPEED_QDR; 747 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS; 748 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; 749 props->max_msg_sz = mdev->dev->caps.max_msg_sz; 750 props->pkey_tbl_len = 1; 751 props->max_mtu = IB_MTU_4096; 752 props->max_vl_num = 2; 753 props->state = IB_PORT_DOWN; 754 props->phys_state = state_to_phys_state(props->state); 755 props->active_mtu = IB_MTU_256; 756 spin_lock_bh(&iboe->lock); 757 ndev = iboe->netdevs[port - 1]; 758 if (ndev && is_bonded) { 759 rcu_read_lock(); /* required to get upper dev */ 760 ndev = netdev_master_upper_dev_get_rcu(ndev); 761 rcu_read_unlock(); 762 } 763 if (!ndev) 764 goto out_unlock; 765 766 tmp = iboe_get_mtu(ndev->mtu); 767 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; 768 769 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? 770 IB_PORT_ACTIVE : IB_PORT_DOWN; 771 props->phys_state = state_to_phys_state(props->state); 772 out_unlock: 773 spin_unlock_bh(&iboe->lock); 774 out: 775 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 776 return err; 777 } 778 779 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port, 780 struct ib_port_attr *props, int netw_view) 781 { 782 int err; 783 784 /* props being zeroed by the caller, avoid zeroing it here */ 785 786 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? 787 ib_link_query_port(ibdev, port, props, netw_view) : 788 eth_link_query_port(ibdev, port, props); 789 790 return err; 791 } 792 793 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, 794 struct ib_port_attr *props) 795 { 796 /* returns host view */ 797 return __mlx4_ib_query_port(ibdev, port, props, 0); 798 } 799 800 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 801 union ib_gid *gid, int netw_view) 802 { 803 struct ib_smp *in_mad = NULL; 804 struct ib_smp *out_mad = NULL; 805 int err = -ENOMEM; 806 struct mlx4_ib_dev *dev = to_mdev(ibdev); 807 int clear = 0; 808 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; 809 810 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 811 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 812 if (!in_mad || !out_mad) 813 goto out; 814 815 init_query_mad(in_mad); 816 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 817 in_mad->attr_mod = cpu_to_be32(port); 818 819 if (mlx4_is_mfunc(dev->dev) && netw_view) 820 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; 821 822 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad); 823 if (err) 824 goto out; 825 826 memcpy(gid->raw, out_mad->data + 8, 8); 827 828 if (mlx4_is_mfunc(dev->dev) && !netw_view) { 829 if (index) { 830 /* For any index > 0, return the null guid */ 831 err = 0; 832 clear = 1; 833 goto out; 834 } 835 } 836 837 init_query_mad(in_mad); 838 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 839 in_mad->attr_mod = cpu_to_be32(index / 8); 840 841 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, 842 NULL, NULL, in_mad, out_mad); 843 if (err) 844 goto out; 845 846 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); 847 848 out: 849 if (clear) 850 memset(gid->raw + 8, 0, 8); 851 kfree(in_mad); 852 kfree(out_mad); 853 return err; 854 } 855 856 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 857 union ib_gid *gid) 858 { 859 int ret; 860 861 if (rdma_protocol_ib(ibdev, port)) 862 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0); 863 864 if (!rdma_protocol_roce(ibdev, port)) 865 return -ENODEV; 866 867 if (!rdma_cap_roce_gid_table(ibdev, port)) 868 return -ENODEV; 869 870 ret = ib_get_cached_gid(ibdev, port, index, gid, NULL); 871 if (ret == -EAGAIN) { 872 memcpy(gid, &zgid, sizeof(*gid)); 873 return 0; 874 } 875 876 return ret; 877 } 878 879 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl) 880 { 881 union sl2vl_tbl_to_u64 sl2vl64; 882 struct ib_smp *in_mad = NULL; 883 struct ib_smp *out_mad = NULL; 884 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; 885 int err = -ENOMEM; 886 int jj; 887 888 if (mlx4_is_slave(to_mdev(ibdev)->dev)) { 889 *sl2vl_tbl = 0; 890 return 0; 891 } 892 893 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 894 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 895 if (!in_mad || !out_mad) 896 goto out; 897 898 init_query_mad(in_mad); 899 in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE; 900 in_mad->attr_mod = 0; 901 902 if (mlx4_is_mfunc(to_mdev(ibdev)->dev)) 903 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; 904 905 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, 906 in_mad, out_mad); 907 if (err) 908 goto out; 909 910 for (jj = 0; jj < 8; jj++) 911 sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj]; 912 *sl2vl_tbl = sl2vl64.sl64; 913 914 out: 915 kfree(in_mad); 916 kfree(out_mad); 917 return err; 918 } 919 920 static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev) 921 { 922 u64 sl2vl; 923 int i; 924 int err; 925 926 for (i = 1; i <= mdev->dev->caps.num_ports; i++) { 927 if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 928 continue; 929 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl); 930 if (err) { 931 pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n", 932 i, err); 933 sl2vl = 0; 934 } 935 atomic64_set(&mdev->sl2vl[i - 1], sl2vl); 936 } 937 } 938 939 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 940 u16 *pkey, int netw_view) 941 { 942 struct ib_smp *in_mad = NULL; 943 struct ib_smp *out_mad = NULL; 944 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; 945 int err = -ENOMEM; 946 947 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 948 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 949 if (!in_mad || !out_mad) 950 goto out; 951 952 init_query_mad(in_mad); 953 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 954 in_mad->attr_mod = cpu_to_be32(index / 32); 955 956 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) 957 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; 958 959 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, 960 in_mad, out_mad); 961 if (err) 962 goto out; 963 964 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); 965 966 out: 967 kfree(in_mad); 968 kfree(out_mad); 969 return err; 970 } 971 972 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) 973 { 974 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0); 975 } 976 977 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, 978 struct ib_device_modify *props) 979 { 980 struct mlx4_cmd_mailbox *mailbox; 981 unsigned long flags; 982 983 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 984 return -EOPNOTSUPP; 985 986 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) 987 return 0; 988 989 if (mlx4_is_slave(to_mdev(ibdev)->dev)) 990 return -EOPNOTSUPP; 991 992 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags); 993 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 994 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags); 995 996 /* 997 * If possible, pass node desc to FW, so it can generate 998 * a 144 trap. If cmd fails, just ignore. 999 */ 1000 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev); 1001 if (IS_ERR(mailbox)) 1002 return 0; 1003 1004 memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 1005 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0, 1006 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1007 1008 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox); 1009 1010 return 0; 1011 } 1012 1013 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, 1014 u32 cap_mask) 1015 { 1016 struct mlx4_cmd_mailbox *mailbox; 1017 int err; 1018 1019 mailbox = mlx4_alloc_cmd_mailbox(dev->dev); 1020 if (IS_ERR(mailbox)) 1021 return PTR_ERR(mailbox); 1022 1023 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 1024 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6; 1025 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask); 1026 } else { 1027 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols; 1028 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); 1029 } 1030 1031 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE, 1032 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 1033 MLX4_CMD_WRAPPED); 1034 1035 mlx4_free_cmd_mailbox(dev->dev, mailbox); 1036 return err; 1037 } 1038 1039 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 1040 struct ib_port_modify *props) 1041 { 1042 struct mlx4_ib_dev *mdev = to_mdev(ibdev); 1043 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; 1044 struct ib_port_attr attr; 1045 u32 cap_mask; 1046 int err; 1047 1048 /* return OK if this is RoCE. CM calls ib_modify_port() regardless 1049 * of whether port link layer is ETH or IB. For ETH ports, qkey 1050 * violations and port capabilities are not meaningful. 1051 */ 1052 if (is_eth) 1053 return 0; 1054 1055 mutex_lock(&mdev->cap_mask_mutex); 1056 1057 err = ib_query_port(ibdev, port, &attr); 1058 if (err) 1059 goto out; 1060 1061 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & 1062 ~props->clr_port_cap_mask; 1063 1064 err = mlx4_ib_SET_PORT(mdev, port, 1065 !!(mask & IB_PORT_RESET_QKEY_CNTR), 1066 cap_mask); 1067 1068 out: 1069 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); 1070 return err; 1071 } 1072 1073 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, 1074 struct ib_udata *udata) 1075 { 1076 struct mlx4_ib_dev *dev = to_mdev(ibdev); 1077 struct mlx4_ib_ucontext *context; 1078 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3; 1079 struct mlx4_ib_alloc_ucontext_resp resp; 1080 int err; 1081 1082 if (!dev->ib_active) 1083 return ERR_PTR(-EAGAIN); 1084 1085 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) { 1086 resp_v3.qp_tab_size = dev->dev->caps.num_qps; 1087 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size; 1088 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; 1089 } else { 1090 resp.dev_caps = dev->dev->caps.userspace_caps; 1091 resp.qp_tab_size = dev->dev->caps.num_qps; 1092 resp.bf_reg_size = dev->dev->caps.bf_reg_size; 1093 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; 1094 resp.cqe_size = dev->dev->caps.cqe_size; 1095 } 1096 1097 context = kzalloc(sizeof(*context), GFP_KERNEL); 1098 if (!context) 1099 return ERR_PTR(-ENOMEM); 1100 1101 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar); 1102 if (err) { 1103 kfree(context); 1104 return ERR_PTR(err); 1105 } 1106 1107 INIT_LIST_HEAD(&context->db_page_list); 1108 mutex_init(&context->db_page_mutex); 1109 1110 INIT_LIST_HEAD(&context->wqn_ranges_list); 1111 mutex_init(&context->wqn_ranges_mutex); 1112 1113 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) 1114 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3)); 1115 else 1116 err = ib_copy_to_udata(udata, &resp, sizeof(resp)); 1117 1118 if (err) { 1119 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar); 1120 kfree(context); 1121 return ERR_PTR(-EFAULT); 1122 } 1123 1124 return &context->ibucontext; 1125 } 1126 1127 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 1128 { 1129 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext); 1130 1131 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar); 1132 kfree(context); 1133 1134 return 0; 1135 } 1136 1137 static void mlx4_ib_vma_open(struct vm_area_struct *area) 1138 { 1139 /* vma_open is called when a new VMA is created on top of our VMA. 1140 * This is done through either mremap flow or split_vma (usually due 1141 * to mlock, madvise, munmap, etc.). We do not support a clone of the 1142 * vma, as this VMA is strongly hardware related. Therefore we set the 1143 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from 1144 * calling us again and trying to do incorrect actions. We assume that 1145 * the original vma size is exactly a single page that there will be no 1146 * "splitting" operations on. 1147 */ 1148 area->vm_ops = NULL; 1149 } 1150 1151 static void mlx4_ib_vma_close(struct vm_area_struct *area) 1152 { 1153 struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data; 1154 1155 /* It's guaranteed that all VMAs opened on a FD are closed before the 1156 * file itself is closed, therefore no sync is needed with the regular 1157 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync 1158 * with accessing the vma as part of mlx4_ib_disassociate_ucontext. 1159 * The close operation is usually called under mm->mmap_sem except when 1160 * process is exiting. The exiting case is handled explicitly as part 1161 * of mlx4_ib_disassociate_ucontext. 1162 */ 1163 mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *) 1164 area->vm_private_data; 1165 1166 /* set the vma context pointer to null in the mlx4_ib driver's private 1167 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext(). 1168 */ 1169 mlx4_ib_vma_priv_data->vma = NULL; 1170 } 1171 1172 static const struct vm_operations_struct mlx4_ib_vm_ops = { 1173 .open = mlx4_ib_vma_open, 1174 .close = mlx4_ib_vma_close 1175 }; 1176 1177 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) 1178 { 1179 int i; 1180 int ret = 0; 1181 struct vm_area_struct *vma; 1182 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext); 1183 struct task_struct *owning_process = NULL; 1184 struct mm_struct *owning_mm = NULL; 1185 1186 owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID); 1187 if (!owning_process) 1188 return; 1189 1190 owning_mm = get_task_mm(owning_process); 1191 if (!owning_mm) { 1192 pr_info("no mm, disassociate ucontext is pending task termination\n"); 1193 while (1) { 1194 /* make sure that task is dead before returning, it may 1195 * prevent a rare case of module down in parallel to a 1196 * call to mlx4_ib_vma_close. 1197 */ 1198 put_task_struct(owning_process); 1199 usleep_range(1000, 2000); 1200 owning_process = get_pid_task(ibcontext->tgid, 1201 PIDTYPE_PID); 1202 if (!owning_process || 1203 owning_process->state == TASK_DEAD) { 1204 pr_info("disassociate ucontext done, task was terminated\n"); 1205 /* in case task was dead need to release the task struct */ 1206 if (owning_process) 1207 put_task_struct(owning_process); 1208 return; 1209 } 1210 } 1211 } 1212 1213 /* need to protect from a race on closing the vma as part of 1214 * mlx4_ib_vma_close(). 1215 */ 1216 down_write(&owning_mm->mmap_sem); 1217 for (i = 0; i < HW_BAR_COUNT; i++) { 1218 vma = context->hw_bar_info[i].vma; 1219 if (!vma) 1220 continue; 1221 1222 ret = zap_vma_ptes(context->hw_bar_info[i].vma, 1223 context->hw_bar_info[i].vma->vm_start, 1224 PAGE_SIZE); 1225 if (ret) { 1226 pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret); 1227 BUG_ON(1); 1228 } 1229 1230 context->hw_bar_info[i].vma->vm_flags &= 1231 ~(VM_SHARED | VM_MAYSHARE); 1232 /* context going to be destroyed, should not access ops any more */ 1233 context->hw_bar_info[i].vma->vm_ops = NULL; 1234 } 1235 1236 up_write(&owning_mm->mmap_sem); 1237 mmput(owning_mm); 1238 put_task_struct(owning_process); 1239 } 1240 1241 static void mlx4_ib_set_vma_data(struct vm_area_struct *vma, 1242 struct mlx4_ib_vma_private_data *vma_private_data) 1243 { 1244 vma_private_data->vma = vma; 1245 vma->vm_private_data = vma_private_data; 1246 vma->vm_ops = &mlx4_ib_vm_ops; 1247 } 1248 1249 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 1250 { 1251 struct mlx4_ib_dev *dev = to_mdev(context->device); 1252 struct mlx4_ib_ucontext *mucontext = to_mucontext(context); 1253 1254 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 1255 return -EINVAL; 1256 1257 if (vma->vm_pgoff == 0) { 1258 /* We prevent double mmaping on same context */ 1259 if (mucontext->hw_bar_info[HW_BAR_DB].vma) 1260 return -EINVAL; 1261 1262 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1263 1264 if (io_remap_pfn_range(vma, vma->vm_start, 1265 to_mucontext(context)->uar.pfn, 1266 PAGE_SIZE, vma->vm_page_prot)) 1267 return -EAGAIN; 1268 1269 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]); 1270 1271 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) { 1272 /* We prevent double mmaping on same context */ 1273 if (mucontext->hw_bar_info[HW_BAR_BF].vma) 1274 return -EINVAL; 1275 1276 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 1277 1278 if (io_remap_pfn_range(vma, vma->vm_start, 1279 to_mucontext(context)->uar.pfn + 1280 dev->dev->caps.num_uars, 1281 PAGE_SIZE, vma->vm_page_prot)) 1282 return -EAGAIN; 1283 1284 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]); 1285 1286 } else if (vma->vm_pgoff == 3) { 1287 struct mlx4_clock_params params; 1288 int ret; 1289 1290 /* We prevent double mmaping on same context */ 1291 if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma) 1292 return -EINVAL; 1293 1294 ret = mlx4_get_internal_clock_params(dev->dev, ¶ms); 1295 1296 if (ret) 1297 return ret; 1298 1299 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1300 if (io_remap_pfn_range(vma, vma->vm_start, 1301 (pci_resource_start(dev->dev->persist->pdev, 1302 params.bar) + 1303 params.offset) 1304 >> PAGE_SHIFT, 1305 PAGE_SIZE, vma->vm_page_prot)) 1306 return -EAGAIN; 1307 1308 mlx4_ib_set_vma_data(vma, 1309 &mucontext->hw_bar_info[HW_BAR_CLOCK]); 1310 } else { 1311 return -EINVAL; 1312 } 1313 1314 return 0; 1315 } 1316 1317 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev, 1318 struct ib_ucontext *context, 1319 struct ib_udata *udata) 1320 { 1321 struct mlx4_ib_pd *pd; 1322 int err; 1323 1324 pd = kmalloc(sizeof *pd, GFP_KERNEL); 1325 if (!pd) 1326 return ERR_PTR(-ENOMEM); 1327 1328 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn); 1329 if (err) { 1330 kfree(pd); 1331 return ERR_PTR(err); 1332 } 1333 1334 if (context) 1335 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) { 1336 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); 1337 kfree(pd); 1338 return ERR_PTR(-EFAULT); 1339 } 1340 1341 return &pd->ibpd; 1342 } 1343 1344 static int mlx4_ib_dealloc_pd(struct ib_pd *pd) 1345 { 1346 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); 1347 kfree(pd); 1348 1349 return 0; 1350 } 1351 1352 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, 1353 struct ib_ucontext *context, 1354 struct ib_udata *udata) 1355 { 1356 struct mlx4_ib_xrcd *xrcd; 1357 struct ib_cq_init_attr cq_attr = {}; 1358 int err; 1359 1360 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) 1361 return ERR_PTR(-ENOSYS); 1362 1363 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL); 1364 if (!xrcd) 1365 return ERR_PTR(-ENOMEM); 1366 1367 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn); 1368 if (err) 1369 goto err1; 1370 1371 xrcd->pd = ib_alloc_pd(ibdev, 0); 1372 if (IS_ERR(xrcd->pd)) { 1373 err = PTR_ERR(xrcd->pd); 1374 goto err2; 1375 } 1376 1377 cq_attr.cqe = 1; 1378 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr); 1379 if (IS_ERR(xrcd->cq)) { 1380 err = PTR_ERR(xrcd->cq); 1381 goto err3; 1382 } 1383 1384 return &xrcd->ibxrcd; 1385 1386 err3: 1387 ib_dealloc_pd(xrcd->pd); 1388 err2: 1389 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn); 1390 err1: 1391 kfree(xrcd); 1392 return ERR_PTR(err); 1393 } 1394 1395 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd) 1396 { 1397 ib_destroy_cq(to_mxrcd(xrcd)->cq); 1398 ib_dealloc_pd(to_mxrcd(xrcd)->pd); 1399 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn); 1400 kfree(xrcd); 1401 1402 return 0; 1403 } 1404 1405 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) 1406 { 1407 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1408 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1409 struct mlx4_ib_gid_entry *ge; 1410 1411 ge = kzalloc(sizeof *ge, GFP_KERNEL); 1412 if (!ge) 1413 return -ENOMEM; 1414 1415 ge->gid = *gid; 1416 if (mlx4_ib_add_mc(mdev, mqp, gid)) { 1417 ge->port = mqp->port; 1418 ge->added = 1; 1419 } 1420 1421 mutex_lock(&mqp->mutex); 1422 list_add_tail(&ge->list, &mqp->gid_list); 1423 mutex_unlock(&mqp->mutex); 1424 1425 return 0; 1426 } 1427 1428 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev, 1429 struct mlx4_ib_counters *ctr_table) 1430 { 1431 struct counter_index *counter, *tmp_count; 1432 1433 mutex_lock(&ctr_table->mutex); 1434 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list, 1435 list) { 1436 if (counter->allocated) 1437 mlx4_counter_free(ibdev->dev, counter->index); 1438 list_del(&counter->list); 1439 kfree(counter); 1440 } 1441 mutex_unlock(&ctr_table->mutex); 1442 } 1443 1444 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 1445 union ib_gid *gid) 1446 { 1447 struct net_device *ndev; 1448 int ret = 0; 1449 1450 if (!mqp->port) 1451 return 0; 1452 1453 spin_lock_bh(&mdev->iboe.lock); 1454 ndev = mdev->iboe.netdevs[mqp->port - 1]; 1455 if (ndev) 1456 dev_hold(ndev); 1457 spin_unlock_bh(&mdev->iboe.lock); 1458 1459 if (ndev) { 1460 ret = 1; 1461 dev_put(ndev); 1462 } 1463 1464 return ret; 1465 } 1466 1467 struct mlx4_ib_steering { 1468 struct list_head list; 1469 struct mlx4_flow_reg_id reg_id; 1470 union ib_gid gid; 1471 }; 1472 1473 #define LAST_ETH_FIELD vlan_tag 1474 #define LAST_IB_FIELD sl 1475 #define LAST_IPV4_FIELD dst_ip 1476 #define LAST_TCP_UDP_FIELD src_port 1477 1478 /* Field is the last supported field */ 1479 #define FIELDS_NOT_SUPPORTED(filter, field)\ 1480 memchr_inv((void *)&filter.field +\ 1481 sizeof(filter.field), 0,\ 1482 sizeof(filter) -\ 1483 offsetof(typeof(filter), field) -\ 1484 sizeof(filter.field)) 1485 1486 static int parse_flow_attr(struct mlx4_dev *dev, 1487 u32 qp_num, 1488 union ib_flow_spec *ib_spec, 1489 struct _rule_hw *mlx4_spec) 1490 { 1491 enum mlx4_net_trans_rule_id type; 1492 1493 switch (ib_spec->type) { 1494 case IB_FLOW_SPEC_ETH: 1495 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) 1496 return -ENOTSUPP; 1497 1498 type = MLX4_NET_TRANS_RULE_ID_ETH; 1499 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac, 1500 ETH_ALEN); 1501 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac, 1502 ETH_ALEN); 1503 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag; 1504 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag; 1505 break; 1506 case IB_FLOW_SPEC_IB: 1507 if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD)) 1508 return -ENOTSUPP; 1509 1510 type = MLX4_NET_TRANS_RULE_ID_IB; 1511 mlx4_spec->ib.l3_qpn = 1512 cpu_to_be32(qp_num); 1513 mlx4_spec->ib.qpn_mask = 1514 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK); 1515 break; 1516 1517 1518 case IB_FLOW_SPEC_IPV4: 1519 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) 1520 return -ENOTSUPP; 1521 1522 type = MLX4_NET_TRANS_RULE_ID_IPV4; 1523 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip; 1524 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip; 1525 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip; 1526 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip; 1527 break; 1528 1529 case IB_FLOW_SPEC_TCP: 1530 case IB_FLOW_SPEC_UDP: 1531 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD)) 1532 return -ENOTSUPP; 1533 1534 type = ib_spec->type == IB_FLOW_SPEC_TCP ? 1535 MLX4_NET_TRANS_RULE_ID_TCP : 1536 MLX4_NET_TRANS_RULE_ID_UDP; 1537 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port; 1538 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port; 1539 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port; 1540 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port; 1541 break; 1542 1543 default: 1544 return -EINVAL; 1545 } 1546 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 || 1547 mlx4_hw_rule_sz(dev, type) < 0) 1548 return -EINVAL; 1549 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type)); 1550 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2; 1551 return mlx4_hw_rule_sz(dev, type); 1552 } 1553 1554 struct default_rules { 1555 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS]; 1556 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS]; 1557 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS]; 1558 __u8 link_layer; 1559 }; 1560 static const struct default_rules default_table[] = { 1561 { 1562 .mandatory_fields = {IB_FLOW_SPEC_IPV4}, 1563 .mandatory_not_fields = {IB_FLOW_SPEC_ETH}, 1564 .rules_create_list = {IB_FLOW_SPEC_IB}, 1565 .link_layer = IB_LINK_LAYER_INFINIBAND 1566 } 1567 }; 1568 1569 static int __mlx4_ib_default_rules_match(struct ib_qp *qp, 1570 struct ib_flow_attr *flow_attr) 1571 { 1572 int i, j, k; 1573 void *ib_flow; 1574 const struct default_rules *pdefault_rules = default_table; 1575 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port); 1576 1577 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) { 1578 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS]; 1579 memset(&field_types, 0, sizeof(field_types)); 1580 1581 if (link_layer != pdefault_rules->link_layer) 1582 continue; 1583 1584 ib_flow = flow_attr + 1; 1585 /* we assume the specs are sorted */ 1586 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS && 1587 j < flow_attr->num_of_specs; k++) { 1588 union ib_flow_spec *current_flow = 1589 (union ib_flow_spec *)ib_flow; 1590 1591 /* same layer but different type */ 1592 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) == 1593 (pdefault_rules->mandatory_fields[k] & 1594 IB_FLOW_SPEC_LAYER_MASK)) && 1595 (current_flow->type != 1596 pdefault_rules->mandatory_fields[k])) 1597 goto out; 1598 1599 /* same layer, try match next one */ 1600 if (current_flow->type == 1601 pdefault_rules->mandatory_fields[k]) { 1602 j++; 1603 ib_flow += 1604 ((union ib_flow_spec *)ib_flow)->size; 1605 } 1606 } 1607 1608 ib_flow = flow_attr + 1; 1609 for (j = 0; j < flow_attr->num_of_specs; 1610 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size) 1611 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++) 1612 /* same layer and same type */ 1613 if (((union ib_flow_spec *)ib_flow)->type == 1614 pdefault_rules->mandatory_not_fields[k]) 1615 goto out; 1616 1617 return i; 1618 } 1619 out: 1620 return -1; 1621 } 1622 1623 static int __mlx4_ib_create_default_rules( 1624 struct mlx4_ib_dev *mdev, 1625 struct ib_qp *qp, 1626 const struct default_rules *pdefault_rules, 1627 struct _rule_hw *mlx4_spec) { 1628 int size = 0; 1629 int i; 1630 1631 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) { 1632 int ret; 1633 union ib_flow_spec ib_spec; 1634 switch (pdefault_rules->rules_create_list[i]) { 1635 case 0: 1636 /* no rule */ 1637 continue; 1638 case IB_FLOW_SPEC_IB: 1639 ib_spec.type = IB_FLOW_SPEC_IB; 1640 ib_spec.size = sizeof(struct ib_flow_spec_ib); 1641 1642 break; 1643 default: 1644 /* invalid rule */ 1645 return -EINVAL; 1646 } 1647 /* We must put empty rule, qpn is being ignored */ 1648 ret = parse_flow_attr(mdev->dev, 0, &ib_spec, 1649 mlx4_spec); 1650 if (ret < 0) { 1651 pr_info("invalid parsing\n"); 1652 return -EINVAL; 1653 } 1654 1655 mlx4_spec = (void *)mlx4_spec + ret; 1656 size += ret; 1657 } 1658 return size; 1659 } 1660 1661 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, 1662 int domain, 1663 enum mlx4_net_trans_promisc_mode flow_type, 1664 u64 *reg_id) 1665 { 1666 int ret, i; 1667 int size = 0; 1668 void *ib_flow; 1669 struct mlx4_ib_dev *mdev = to_mdev(qp->device); 1670 struct mlx4_cmd_mailbox *mailbox; 1671 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 1672 int default_flow; 1673 1674 static const u16 __mlx4_domain[] = { 1675 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS, 1676 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL, 1677 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS, 1678 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC, 1679 }; 1680 1681 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) { 1682 pr_err("Invalid priority value %d\n", flow_attr->priority); 1683 return -EINVAL; 1684 } 1685 1686 if (domain >= IB_FLOW_DOMAIN_NUM) { 1687 pr_err("Invalid domain value %d\n", domain); 1688 return -EINVAL; 1689 } 1690 1691 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0) 1692 return -EINVAL; 1693 1694 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 1695 if (IS_ERR(mailbox)) 1696 return PTR_ERR(mailbox); 1697 ctrl = mailbox->buf; 1698 1699 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] | 1700 flow_attr->priority); 1701 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type); 1702 ctrl->port = flow_attr->port; 1703 ctrl->qpn = cpu_to_be32(qp->qp_num); 1704 1705 ib_flow = flow_attr + 1; 1706 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 1707 /* Add default flows */ 1708 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr); 1709 if (default_flow >= 0) { 1710 ret = __mlx4_ib_create_default_rules( 1711 mdev, qp, default_table + default_flow, 1712 mailbox->buf + size); 1713 if (ret < 0) { 1714 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 1715 return -EINVAL; 1716 } 1717 size += ret; 1718 } 1719 for (i = 0; i < flow_attr->num_of_specs; i++) { 1720 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow, 1721 mailbox->buf + size); 1722 if (ret < 0) { 1723 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 1724 return -EINVAL; 1725 } 1726 ib_flow += ((union ib_flow_spec *) ib_flow)->size; 1727 size += ret; 1728 } 1729 1730 if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR && 1731 flow_attr->num_of_specs == 1) { 1732 struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1); 1733 enum ib_flow_spec_type header_spec = 1734 ((union ib_flow_spec *)(flow_attr + 1))->type; 1735 1736 if (header_spec == IB_FLOW_SPEC_ETH) 1737 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header); 1738 } 1739 1740 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0, 1741 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 1742 MLX4_CMD_NATIVE); 1743 if (ret == -ENOMEM) 1744 pr_err("mcg table is full. Fail to register network rule.\n"); 1745 else if (ret == -ENXIO) 1746 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n"); 1747 else if (ret) 1748 pr_err("Invalid argument. Fail to register network rule.\n"); 1749 1750 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 1751 return ret; 1752 } 1753 1754 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id) 1755 { 1756 int err; 1757 err = mlx4_cmd(dev, reg_id, 0, 0, 1758 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 1759 MLX4_CMD_NATIVE); 1760 if (err) 1761 pr_err("Fail to detach network rule. registration id = 0x%llx\n", 1762 reg_id); 1763 return err; 1764 } 1765 1766 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr, 1767 u64 *reg_id) 1768 { 1769 void *ib_flow; 1770 union ib_flow_spec *ib_spec; 1771 struct mlx4_dev *dev = to_mdev(qp->device)->dev; 1772 int err = 0; 1773 1774 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || 1775 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) 1776 return 0; /* do nothing */ 1777 1778 ib_flow = flow_attr + 1; 1779 ib_spec = (union ib_flow_spec *)ib_flow; 1780 1781 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1) 1782 return 0; /* do nothing */ 1783 1784 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac, 1785 flow_attr->port, qp->qp_num, 1786 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff), 1787 reg_id); 1788 return err; 1789 } 1790 1791 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev, 1792 struct ib_flow_attr *flow_attr, 1793 enum mlx4_net_trans_promisc_mode *type) 1794 { 1795 int err = 0; 1796 1797 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) || 1798 (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) || 1799 (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) { 1800 return -EOPNOTSUPP; 1801 } 1802 1803 if (flow_attr->num_of_specs == 0) { 1804 type[0] = MLX4_FS_MC_SNIFFER; 1805 type[1] = MLX4_FS_UC_SNIFFER; 1806 } else { 1807 union ib_flow_spec *ib_spec; 1808 1809 ib_spec = (union ib_flow_spec *)(flow_attr + 1); 1810 if (ib_spec->type != IB_FLOW_SPEC_ETH) 1811 return -EINVAL; 1812 1813 /* if all is zero than MC and UC */ 1814 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) { 1815 type[0] = MLX4_FS_MC_SNIFFER; 1816 type[1] = MLX4_FS_UC_SNIFFER; 1817 } else { 1818 u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01, 1819 ib_spec->eth.mask.dst_mac[1], 1820 ib_spec->eth.mask.dst_mac[2], 1821 ib_spec->eth.mask.dst_mac[3], 1822 ib_spec->eth.mask.dst_mac[4], 1823 ib_spec->eth.mask.dst_mac[5]}; 1824 1825 /* Above xor was only on MC bit, non empty mask is valid 1826 * only if this bit is set and rest are zero. 1827 */ 1828 if (!is_zero_ether_addr(&mac[0])) 1829 return -EINVAL; 1830 1831 if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac)) 1832 type[0] = MLX4_FS_MC_SNIFFER; 1833 else 1834 type[0] = MLX4_FS_UC_SNIFFER; 1835 } 1836 } 1837 1838 return err; 1839 } 1840 1841 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, 1842 struct ib_flow_attr *flow_attr, 1843 int domain) 1844 { 1845 int err = 0, i = 0, j = 0; 1846 struct mlx4_ib_flow *mflow; 1847 enum mlx4_net_trans_promisc_mode type[2]; 1848 struct mlx4_dev *dev = (to_mdev(qp->device))->dev; 1849 int is_bonded = mlx4_is_bonded(dev); 1850 1851 if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt) 1852 return ERR_PTR(-EINVAL); 1853 1854 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 1855 (flow_attr->type != IB_FLOW_ATTR_NORMAL)) 1856 return ERR_PTR(-EOPNOTSUPP); 1857 1858 memset(type, 0, sizeof(type)); 1859 1860 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL); 1861 if (!mflow) { 1862 err = -ENOMEM; 1863 goto err_free; 1864 } 1865 1866 switch (flow_attr->type) { 1867 case IB_FLOW_ATTR_NORMAL: 1868 /* If dont trap flag (continue match) is set, under specific 1869 * condition traffic be replicated to given qp, 1870 * without stealing it 1871 */ 1872 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) { 1873 err = mlx4_ib_add_dont_trap_rule(dev, 1874 flow_attr, 1875 type); 1876 if (err) 1877 goto err_free; 1878 } else { 1879 type[0] = MLX4_FS_REGULAR; 1880 } 1881 break; 1882 1883 case IB_FLOW_ATTR_ALL_DEFAULT: 1884 type[0] = MLX4_FS_ALL_DEFAULT; 1885 break; 1886 1887 case IB_FLOW_ATTR_MC_DEFAULT: 1888 type[0] = MLX4_FS_MC_DEFAULT; 1889 break; 1890 1891 case IB_FLOW_ATTR_SNIFFER: 1892 type[0] = MLX4_FS_MIRROR_RX_PORT; 1893 type[1] = MLX4_FS_MIRROR_SX_PORT; 1894 break; 1895 1896 default: 1897 err = -EINVAL; 1898 goto err_free; 1899 } 1900 1901 while (i < ARRAY_SIZE(type) && type[i]) { 1902 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], 1903 &mflow->reg_id[i].id); 1904 if (err) 1905 goto err_create_flow; 1906 if (is_bonded) { 1907 /* Application always sees one port so the mirror rule 1908 * must be on port #2 1909 */ 1910 flow_attr->port = 2; 1911 err = __mlx4_ib_create_flow(qp, flow_attr, 1912 domain, type[j], 1913 &mflow->reg_id[j].mirror); 1914 flow_attr->port = 1; 1915 if (err) 1916 goto err_create_flow; 1917 j++; 1918 } 1919 1920 i++; 1921 } 1922 1923 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { 1924 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, 1925 &mflow->reg_id[i].id); 1926 if (err) 1927 goto err_create_flow; 1928 1929 if (is_bonded) { 1930 flow_attr->port = 2; 1931 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, 1932 &mflow->reg_id[j].mirror); 1933 flow_attr->port = 1; 1934 if (err) 1935 goto err_create_flow; 1936 j++; 1937 } 1938 /* function to create mirror rule */ 1939 i++; 1940 } 1941 1942 return &mflow->ibflow; 1943 1944 err_create_flow: 1945 while (i) { 1946 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, 1947 mflow->reg_id[i].id); 1948 i--; 1949 } 1950 1951 while (j) { 1952 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, 1953 mflow->reg_id[j].mirror); 1954 j--; 1955 } 1956 err_free: 1957 kfree(mflow); 1958 return ERR_PTR(err); 1959 } 1960 1961 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id) 1962 { 1963 int err, ret = 0; 1964 int i = 0; 1965 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); 1966 struct mlx4_ib_flow *mflow = to_mflow(flow_id); 1967 1968 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) { 1969 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id); 1970 if (err) 1971 ret = err; 1972 if (mflow->reg_id[i].mirror) { 1973 err = __mlx4_ib_destroy_flow(mdev->dev, 1974 mflow->reg_id[i].mirror); 1975 if (err) 1976 ret = err; 1977 } 1978 i++; 1979 } 1980 1981 kfree(mflow); 1982 return ret; 1983 } 1984 1985 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 1986 { 1987 int err; 1988 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1989 struct mlx4_dev *dev = mdev->dev; 1990 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1991 struct mlx4_ib_steering *ib_steering = NULL; 1992 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; 1993 struct mlx4_flow_reg_id reg_id; 1994 1995 if (mdev->dev->caps.steering_mode == 1996 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1997 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL); 1998 if (!ib_steering) 1999 return -ENOMEM; 2000 } 2001 2002 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, 2003 !!(mqp->flags & 2004 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 2005 prot, ®_id.id); 2006 if (err) { 2007 pr_err("multicast attach op failed, err %d\n", err); 2008 goto err_malloc; 2009 } 2010 2011 reg_id.mirror = 0; 2012 if (mlx4_is_bonded(dev)) { 2013 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 2014 (mqp->port == 1) ? 2 : 1, 2015 !!(mqp->flags & 2016 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 2017 prot, ®_id.mirror); 2018 if (err) 2019 goto err_add; 2020 } 2021 2022 err = add_gid_entry(ibqp, gid); 2023 if (err) 2024 goto err_add; 2025 2026 if (ib_steering) { 2027 memcpy(ib_steering->gid.raw, gid->raw, 16); 2028 ib_steering->reg_id = reg_id; 2029 mutex_lock(&mqp->mutex); 2030 list_add(&ib_steering->list, &mqp->steering_rules); 2031 mutex_unlock(&mqp->mutex); 2032 } 2033 return 0; 2034 2035 err_add: 2036 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 2037 prot, reg_id.id); 2038 if (reg_id.mirror) 2039 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 2040 prot, reg_id.mirror); 2041 err_malloc: 2042 kfree(ib_steering); 2043 2044 return err; 2045 } 2046 2047 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw) 2048 { 2049 struct mlx4_ib_gid_entry *ge; 2050 struct mlx4_ib_gid_entry *tmp; 2051 struct mlx4_ib_gid_entry *ret = NULL; 2052 2053 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { 2054 if (!memcmp(raw, ge->gid.raw, 16)) { 2055 ret = ge; 2056 break; 2057 } 2058 } 2059 2060 return ret; 2061 } 2062 2063 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 2064 { 2065 int err; 2066 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 2067 struct mlx4_dev *dev = mdev->dev; 2068 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 2069 struct net_device *ndev; 2070 struct mlx4_ib_gid_entry *ge; 2071 struct mlx4_flow_reg_id reg_id = {0, 0}; 2072 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; 2073 2074 if (mdev->dev->caps.steering_mode == 2075 MLX4_STEERING_MODE_DEVICE_MANAGED) { 2076 struct mlx4_ib_steering *ib_steering; 2077 2078 mutex_lock(&mqp->mutex); 2079 list_for_each_entry(ib_steering, &mqp->steering_rules, list) { 2080 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) { 2081 list_del(&ib_steering->list); 2082 break; 2083 } 2084 } 2085 mutex_unlock(&mqp->mutex); 2086 if (&ib_steering->list == &mqp->steering_rules) { 2087 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n"); 2088 return -EINVAL; 2089 } 2090 reg_id = ib_steering->reg_id; 2091 kfree(ib_steering); 2092 } 2093 2094 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 2095 prot, reg_id.id); 2096 if (err) 2097 return err; 2098 2099 if (mlx4_is_bonded(dev)) { 2100 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 2101 prot, reg_id.mirror); 2102 if (err) 2103 return err; 2104 } 2105 2106 mutex_lock(&mqp->mutex); 2107 ge = find_gid_entry(mqp, gid->raw); 2108 if (ge) { 2109 spin_lock_bh(&mdev->iboe.lock); 2110 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; 2111 if (ndev) 2112 dev_hold(ndev); 2113 spin_unlock_bh(&mdev->iboe.lock); 2114 if (ndev) 2115 dev_put(ndev); 2116 list_del(&ge->list); 2117 kfree(ge); 2118 } else 2119 pr_warn("could not find mgid entry\n"); 2120 2121 mutex_unlock(&mqp->mutex); 2122 2123 return 0; 2124 } 2125 2126 static int init_node_data(struct mlx4_ib_dev *dev) 2127 { 2128 struct ib_smp *in_mad = NULL; 2129 struct ib_smp *out_mad = NULL; 2130 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; 2131 int err = -ENOMEM; 2132 2133 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 2134 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 2135 if (!in_mad || !out_mad) 2136 goto out; 2137 2138 init_query_mad(in_mad); 2139 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 2140 if (mlx4_is_master(dev->dev)) 2141 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; 2142 2143 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad); 2144 if (err) 2145 goto out; 2146 2147 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); 2148 2149 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 2150 2151 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad); 2152 if (err) 2153 goto out; 2154 2155 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); 2156 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 2157 2158 out: 2159 kfree(in_mad); 2160 kfree(out_mad); 2161 return err; 2162 } 2163 2164 static ssize_t show_hca(struct device *device, struct device_attribute *attr, 2165 char *buf) 2166 { 2167 struct mlx4_ib_dev *dev = 2168 container_of(device, struct mlx4_ib_dev, ib_dev.dev); 2169 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device); 2170 } 2171 2172 static ssize_t show_rev(struct device *device, struct device_attribute *attr, 2173 char *buf) 2174 { 2175 struct mlx4_ib_dev *dev = 2176 container_of(device, struct mlx4_ib_dev, ib_dev.dev); 2177 return sprintf(buf, "%x\n", dev->dev->rev_id); 2178 } 2179 2180 static ssize_t show_board(struct device *device, struct device_attribute *attr, 2181 char *buf) 2182 { 2183 struct mlx4_ib_dev *dev = 2184 container_of(device, struct mlx4_ib_dev, ib_dev.dev); 2185 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN, 2186 dev->dev->board_id); 2187 } 2188 2189 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 2190 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 2191 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 2192 2193 static struct device_attribute *mlx4_class_attributes[] = { 2194 &dev_attr_hw_rev, 2195 &dev_attr_hca_type, 2196 &dev_attr_board_id 2197 }; 2198 2199 struct diag_counter { 2200 const char *name; 2201 u32 offset; 2202 }; 2203 2204 #define DIAG_COUNTER(_name, _offset) \ 2205 { .name = #_name, .offset = _offset } 2206 2207 static const struct diag_counter diag_basic[] = { 2208 DIAG_COUNTER(rq_num_lle, 0x00), 2209 DIAG_COUNTER(sq_num_lle, 0x04), 2210 DIAG_COUNTER(rq_num_lqpoe, 0x08), 2211 DIAG_COUNTER(sq_num_lqpoe, 0x0C), 2212 DIAG_COUNTER(rq_num_lpe, 0x18), 2213 DIAG_COUNTER(sq_num_lpe, 0x1C), 2214 DIAG_COUNTER(rq_num_wrfe, 0x20), 2215 DIAG_COUNTER(sq_num_wrfe, 0x24), 2216 DIAG_COUNTER(sq_num_mwbe, 0x2C), 2217 DIAG_COUNTER(sq_num_bre, 0x34), 2218 DIAG_COUNTER(sq_num_rire, 0x44), 2219 DIAG_COUNTER(rq_num_rire, 0x48), 2220 DIAG_COUNTER(sq_num_rae, 0x4C), 2221 DIAG_COUNTER(rq_num_rae, 0x50), 2222 DIAG_COUNTER(sq_num_roe, 0x54), 2223 DIAG_COUNTER(sq_num_tree, 0x5C), 2224 DIAG_COUNTER(sq_num_rree, 0x64), 2225 DIAG_COUNTER(rq_num_rnr, 0x68), 2226 DIAG_COUNTER(sq_num_rnr, 0x6C), 2227 DIAG_COUNTER(rq_num_oos, 0x100), 2228 DIAG_COUNTER(sq_num_oos, 0x104), 2229 }; 2230 2231 static const struct diag_counter diag_ext[] = { 2232 DIAG_COUNTER(rq_num_dup, 0x130), 2233 DIAG_COUNTER(sq_num_to, 0x134), 2234 }; 2235 2236 static const struct diag_counter diag_device_only[] = { 2237 DIAG_COUNTER(num_cqovf, 0x1A0), 2238 DIAG_COUNTER(rq_num_udsdprd, 0x118), 2239 }; 2240 2241 static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev, 2242 u8 port_num) 2243 { 2244 struct mlx4_ib_dev *dev = to_mdev(ibdev); 2245 struct mlx4_ib_diag_counters *diag = dev->diag_counters; 2246 2247 if (!diag[!!port_num].name) 2248 return NULL; 2249 2250 return rdma_alloc_hw_stats_struct(diag[!!port_num].name, 2251 diag[!!port_num].num_counters, 2252 RDMA_HW_STATS_DEFAULT_LIFESPAN); 2253 } 2254 2255 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev, 2256 struct rdma_hw_stats *stats, 2257 u8 port, int index) 2258 { 2259 struct mlx4_ib_dev *dev = to_mdev(ibdev); 2260 struct mlx4_ib_diag_counters *diag = dev->diag_counters; 2261 u32 hw_value[ARRAY_SIZE(diag_device_only) + 2262 ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {}; 2263 int ret; 2264 int i; 2265 2266 ret = mlx4_query_diag_counters(dev->dev, 2267 MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS, 2268 diag[!!port].offset, hw_value, 2269 diag[!!port].num_counters, port); 2270 2271 if (ret) 2272 return ret; 2273 2274 for (i = 0; i < diag[!!port].num_counters; i++) 2275 stats->value[i] = hw_value[i]; 2276 2277 return diag[!!port].num_counters; 2278 } 2279 2280 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev, 2281 const char ***name, 2282 u32 **offset, 2283 u32 *num, 2284 bool port) 2285 { 2286 u32 num_counters; 2287 2288 num_counters = ARRAY_SIZE(diag_basic); 2289 2290 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) 2291 num_counters += ARRAY_SIZE(diag_ext); 2292 2293 if (!port) 2294 num_counters += ARRAY_SIZE(diag_device_only); 2295 2296 *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL); 2297 if (!*name) 2298 return -ENOMEM; 2299 2300 *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL); 2301 if (!*offset) 2302 goto err_name; 2303 2304 *num = num_counters; 2305 2306 return 0; 2307 2308 err_name: 2309 kfree(*name); 2310 return -ENOMEM; 2311 } 2312 2313 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev, 2314 const char **name, 2315 u32 *offset, 2316 bool port) 2317 { 2318 int i; 2319 int j; 2320 2321 for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) { 2322 name[i] = diag_basic[i].name; 2323 offset[i] = diag_basic[i].offset; 2324 } 2325 2326 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) { 2327 for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) { 2328 name[j] = diag_ext[i].name; 2329 offset[j] = diag_ext[i].offset; 2330 } 2331 } 2332 2333 if (!port) { 2334 for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) { 2335 name[j] = diag_device_only[i].name; 2336 offset[j] = diag_device_only[i].offset; 2337 } 2338 } 2339 } 2340 2341 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) 2342 { 2343 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters; 2344 int i; 2345 int ret; 2346 bool per_port = !!(ibdev->dev->caps.flags2 & 2347 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT); 2348 2349 if (mlx4_is_slave(ibdev->dev)) 2350 return 0; 2351 2352 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { 2353 /* i == 1 means we are building port counters */ 2354 if (i && !per_port) 2355 continue; 2356 2357 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name, 2358 &diag[i].offset, 2359 &diag[i].num_counters, i); 2360 if (ret) 2361 goto err_alloc; 2362 2363 mlx4_ib_fill_diag_counters(ibdev, diag[i].name, 2364 diag[i].offset, i); 2365 } 2366 2367 ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats; 2368 ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats; 2369 2370 return 0; 2371 2372 err_alloc: 2373 if (i) { 2374 kfree(diag[i - 1].name); 2375 kfree(diag[i - 1].offset); 2376 } 2377 2378 return ret; 2379 } 2380 2381 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev) 2382 { 2383 int i; 2384 2385 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { 2386 kfree(ibdev->diag_counters[i].offset); 2387 kfree(ibdev->diag_counters[i].name); 2388 } 2389 } 2390 2391 #define MLX4_IB_INVALID_MAC ((u64)-1) 2392 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, 2393 struct net_device *dev, 2394 int port) 2395 { 2396 u64 new_smac = 0; 2397 u64 release_mac = MLX4_IB_INVALID_MAC; 2398 struct mlx4_ib_qp *qp; 2399 2400 read_lock(&dev_base_lock); 2401 new_smac = mlx4_mac_to_u64(dev->dev_addr); 2402 read_unlock(&dev_base_lock); 2403 2404 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); 2405 2406 /* no need for update QP1 and mac registration in non-SRIOV */ 2407 if (!mlx4_is_mfunc(ibdev->dev)) 2408 return; 2409 2410 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); 2411 qp = ibdev->qp1_proxy[port - 1]; 2412 if (qp) { 2413 int new_smac_index; 2414 u64 old_smac; 2415 struct mlx4_update_qp_params update_params; 2416 2417 mutex_lock(&qp->mutex); 2418 old_smac = qp->pri.smac; 2419 if (new_smac == old_smac) 2420 goto unlock; 2421 2422 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac); 2423 2424 if (new_smac_index < 0) 2425 goto unlock; 2426 2427 update_params.smac_index = new_smac_index; 2428 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, 2429 &update_params)) { 2430 release_mac = new_smac; 2431 goto unlock; 2432 } 2433 /* if old port was zero, no mac was yet registered for this QP */ 2434 if (qp->pri.smac_port) 2435 release_mac = old_smac; 2436 qp->pri.smac = new_smac; 2437 qp->pri.smac_port = port; 2438 qp->pri.smac_index = new_smac_index; 2439 } 2440 2441 unlock: 2442 if (release_mac != MLX4_IB_INVALID_MAC) 2443 mlx4_unregister_mac(ibdev->dev, port, release_mac); 2444 if (qp) 2445 mutex_unlock(&qp->mutex); 2446 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); 2447 } 2448 2449 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, 2450 struct net_device *dev, 2451 unsigned long event) 2452 2453 { 2454 struct mlx4_ib_iboe *iboe; 2455 int update_qps_port = -1; 2456 int port; 2457 2458 ASSERT_RTNL(); 2459 2460 iboe = &ibdev->iboe; 2461 2462 spin_lock_bh(&iboe->lock); 2463 mlx4_foreach_ib_transport_port(port, ibdev->dev) { 2464 2465 iboe->netdevs[port - 1] = 2466 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); 2467 2468 if (dev == iboe->netdevs[port - 1] && 2469 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || 2470 event == NETDEV_UP || event == NETDEV_CHANGE)) 2471 update_qps_port = port; 2472 2473 } 2474 spin_unlock_bh(&iboe->lock); 2475 2476 if (update_qps_port > 0) 2477 mlx4_ib_update_qps(ibdev, dev, update_qps_port); 2478 } 2479 2480 static int mlx4_ib_netdev_event(struct notifier_block *this, 2481 unsigned long event, void *ptr) 2482 { 2483 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2484 struct mlx4_ib_dev *ibdev; 2485 2486 if (!net_eq(dev_net(dev), &init_net)) 2487 return NOTIFY_DONE; 2488 2489 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); 2490 mlx4_ib_scan_netdevs(ibdev, dev, event); 2491 2492 return NOTIFY_DONE; 2493 } 2494 2495 static void init_pkeys(struct mlx4_ib_dev *ibdev) 2496 { 2497 int port; 2498 int slave; 2499 int i; 2500 2501 if (mlx4_is_master(ibdev->dev)) { 2502 for (slave = 0; slave <= ibdev->dev->persist->num_vfs; 2503 ++slave) { 2504 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { 2505 for (i = 0; 2506 i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; 2507 ++i) { 2508 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] = 2509 /* master has the identity virt2phys pkey mapping */ 2510 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i : 2511 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1; 2512 mlx4_sync_pkey_table(ibdev->dev, slave, port, i, 2513 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]); 2514 } 2515 } 2516 } 2517 /* initialize pkey cache */ 2518 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { 2519 for (i = 0; 2520 i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; 2521 ++i) 2522 ibdev->pkeys.phys_pkey_cache[port-1][i] = 2523 (i) ? 0 : 0xFFFF; 2524 } 2525 } 2526 } 2527 2528 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) 2529 { 2530 int i, j, eq = 0, total_eqs = 0; 2531 2532 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors, 2533 sizeof(ibdev->eq_table[0]), GFP_KERNEL); 2534 if (!ibdev->eq_table) 2535 return; 2536 2537 for (i = 1; i <= dev->caps.num_ports; i++) { 2538 for (j = 0; j < mlx4_get_eqs_per_port(dev, i); 2539 j++, total_eqs++) { 2540 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs)) 2541 continue; 2542 ibdev->eq_table[eq] = total_eqs; 2543 if (!mlx4_assign_eq(dev, i, 2544 &ibdev->eq_table[eq])) 2545 eq++; 2546 else 2547 ibdev->eq_table[eq] = -1; 2548 } 2549 } 2550 2551 for (i = eq; i < dev->caps.num_comp_vectors; 2552 ibdev->eq_table[i++] = -1) 2553 ; 2554 2555 /* Advertise the new number of EQs to clients */ 2556 ibdev->ib_dev.num_comp_vectors = eq; 2557 } 2558 2559 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) 2560 { 2561 int i; 2562 int total_eqs = ibdev->ib_dev.num_comp_vectors; 2563 2564 /* no eqs were allocated */ 2565 if (!ibdev->eq_table) 2566 return; 2567 2568 /* Reset the advertised EQ number */ 2569 ibdev->ib_dev.num_comp_vectors = 0; 2570 2571 for (i = 0; i < total_eqs; i++) 2572 mlx4_release_eq(dev, ibdev->eq_table[i]); 2573 2574 kfree(ibdev->eq_table); 2575 ibdev->eq_table = NULL; 2576 } 2577 2578 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num, 2579 struct ib_port_immutable *immutable) 2580 { 2581 struct ib_port_attr attr; 2582 struct mlx4_ib_dev *mdev = to_mdev(ibdev); 2583 int err; 2584 2585 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) { 2586 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; 2587 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 2588 } else { 2589 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) 2590 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; 2591 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) 2592 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | 2593 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 2594 immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET; 2595 if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE | 2596 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP)) 2597 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 2598 } 2599 2600 err = ib_query_port(ibdev, port_num, &attr); 2601 if (err) 2602 return err; 2603 2604 immutable->pkey_tbl_len = attr.pkey_tbl_len; 2605 immutable->gid_tbl_len = attr.gid_tbl_len; 2606 2607 return 0; 2608 } 2609 2610 static void get_fw_ver_str(struct ib_device *device, char *str) 2611 { 2612 struct mlx4_ib_dev *dev = 2613 container_of(device, struct mlx4_ib_dev, ib_dev); 2614 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d", 2615 (int) (dev->dev->caps.fw_ver >> 32), 2616 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff, 2617 (int) dev->dev->caps.fw_ver & 0xffff); 2618 } 2619 2620 static void *mlx4_ib_add(struct mlx4_dev *dev) 2621 { 2622 struct mlx4_ib_dev *ibdev; 2623 int num_ports = 0; 2624 int i, j; 2625 int err; 2626 struct mlx4_ib_iboe *iboe; 2627 int ib_num_ports = 0; 2628 int num_req_counters; 2629 int allocated; 2630 u32 counter_index; 2631 struct counter_index *new_counter_index = NULL; 2632 2633 pr_info_once("%s", mlx4_ib_version); 2634 2635 num_ports = 0; 2636 mlx4_foreach_ib_transport_port(i, dev) 2637 num_ports++; 2638 2639 /* No point in registering a device with no ports... */ 2640 if (num_ports == 0) 2641 return NULL; 2642 2643 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); 2644 if (!ibdev) { 2645 dev_err(&dev->persist->pdev->dev, 2646 "Device struct alloc failed\n"); 2647 return NULL; 2648 } 2649 2650 iboe = &ibdev->iboe; 2651 2652 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) 2653 goto err_dealloc; 2654 2655 if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) 2656 goto err_pd; 2657 2658 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, 2659 PAGE_SIZE); 2660 if (!ibdev->uar_map) 2661 goto err_uar; 2662 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); 2663 2664 ibdev->dev = dev; 2665 ibdev->bond_next_port = 0; 2666 2667 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); 2668 ibdev->ib_dev.owner = THIS_MODULE; 2669 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; 2670 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; 2671 ibdev->num_ports = num_ports; 2672 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? 2673 1 : ibdev->num_ports; 2674 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; 2675 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev; 2676 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev; 2677 ibdev->ib_dev.add_gid = mlx4_ib_add_gid; 2678 ibdev->ib_dev.del_gid = mlx4_ib_del_gid; 2679 2680 if (dev->caps.userspace_caps) 2681 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; 2682 else 2683 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION; 2684 2685 ibdev->ib_dev.uverbs_cmd_mask = 2686 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 2687 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 2688 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 2689 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 2690 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 2691 (1ull << IB_USER_VERBS_CMD_REG_MR) | 2692 (1ull << IB_USER_VERBS_CMD_REREG_MR) | 2693 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 2694 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 2695 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 2696 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 2697 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 2698 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 2699 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 2700 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 2701 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 2702 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 2703 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 2704 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 2705 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 2706 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 2707 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 2708 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 2709 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 2710 2711 ibdev->ib_dev.query_device = mlx4_ib_query_device; 2712 ibdev->ib_dev.query_port = mlx4_ib_query_port; 2713 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer; 2714 ibdev->ib_dev.query_gid = mlx4_ib_query_gid; 2715 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; 2716 ibdev->ib_dev.modify_device = mlx4_ib_modify_device; 2717 ibdev->ib_dev.modify_port = mlx4_ib_modify_port; 2718 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext; 2719 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext; 2720 ibdev->ib_dev.mmap = mlx4_ib_mmap; 2721 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd; 2722 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd; 2723 ibdev->ib_dev.create_ah = mlx4_ib_create_ah; 2724 ibdev->ib_dev.query_ah = mlx4_ib_query_ah; 2725 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah; 2726 ibdev->ib_dev.create_srq = mlx4_ib_create_srq; 2727 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq; 2728 ibdev->ib_dev.query_srq = mlx4_ib_query_srq; 2729 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq; 2730 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv; 2731 ibdev->ib_dev.create_qp = mlx4_ib_create_qp; 2732 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp; 2733 ibdev->ib_dev.query_qp = mlx4_ib_query_qp; 2734 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp; 2735 ibdev->ib_dev.post_send = mlx4_ib_post_send; 2736 ibdev->ib_dev.post_recv = mlx4_ib_post_recv; 2737 ibdev->ib_dev.create_cq = mlx4_ib_create_cq; 2738 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq; 2739 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq; 2740 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq; 2741 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq; 2742 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; 2743 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; 2744 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; 2745 ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr; 2746 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; 2747 ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr; 2748 ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg; 2749 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; 2750 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; 2751 ibdev->ib_dev.process_mad = mlx4_ib_process_mad; 2752 ibdev->ib_dev.get_port_immutable = mlx4_port_immutable; 2753 ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str; 2754 ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext; 2755 2756 ibdev->ib_dev.uverbs_ex_cmd_mask |= 2757 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ); 2758 2759 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) && 2760 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) == 2761 IB_LINK_LAYER_ETHERNET) || 2762 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) == 2763 IB_LINK_LAYER_ETHERNET))) { 2764 ibdev->ib_dev.create_wq = mlx4_ib_create_wq; 2765 ibdev->ib_dev.modify_wq = mlx4_ib_modify_wq; 2766 ibdev->ib_dev.destroy_wq = mlx4_ib_destroy_wq; 2767 ibdev->ib_dev.create_rwq_ind_table = 2768 mlx4_ib_create_rwq_ind_table; 2769 ibdev->ib_dev.destroy_rwq_ind_table = 2770 mlx4_ib_destroy_rwq_ind_table; 2771 ibdev->ib_dev.uverbs_ex_cmd_mask |= 2772 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | 2773 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | 2774 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | 2775 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | 2776 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); 2777 } 2778 2779 if (!mlx4_is_slave(ibdev->dev)) { 2780 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc; 2781 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr; 2782 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; 2783 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; 2784 } 2785 2786 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2787 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) { 2788 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw; 2789 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw; 2790 2791 ibdev->ib_dev.uverbs_cmd_mask |= 2792 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | 2793 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); 2794 } 2795 2796 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) { 2797 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd; 2798 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd; 2799 ibdev->ib_dev.uverbs_cmd_mask |= 2800 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | 2801 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); 2802 } 2803 2804 if (check_flow_steering_support(dev)) { 2805 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED; 2806 ibdev->ib_dev.create_flow = mlx4_ib_create_flow; 2807 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow; 2808 2809 ibdev->ib_dev.uverbs_ex_cmd_mask |= 2810 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | 2811 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); 2812 } 2813 2814 ibdev->ib_dev.uverbs_ex_cmd_mask |= 2815 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | 2816 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | 2817 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP); 2818 2819 mlx4_ib_alloc_eqs(dev, ibdev); 2820 2821 spin_lock_init(&iboe->lock); 2822 2823 if (init_node_data(ibdev)) 2824 goto err_map; 2825 mlx4_init_sl2vl_tbl(ibdev); 2826 2827 for (i = 0; i < ibdev->num_ports; ++i) { 2828 mutex_init(&ibdev->counters_table[i].mutex); 2829 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list); 2830 } 2831 2832 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; 2833 for (i = 0; i < num_req_counters; ++i) { 2834 mutex_init(&ibdev->qp1_proxy_lock[i]); 2835 allocated = 0; 2836 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == 2837 IB_LINK_LAYER_ETHERNET) { 2838 err = mlx4_counter_alloc(ibdev->dev, &counter_index, 2839 MLX4_RES_USAGE_DRIVER); 2840 /* if failed to allocate a new counter, use default */ 2841 if (err) 2842 counter_index = 2843 mlx4_get_default_counter_index(dev, 2844 i + 1); 2845 else 2846 allocated = 1; 2847 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */ 2848 counter_index = mlx4_get_default_counter_index(dev, 2849 i + 1); 2850 } 2851 new_counter_index = kmalloc(sizeof(*new_counter_index), 2852 GFP_KERNEL); 2853 if (!new_counter_index) { 2854 if (allocated) 2855 mlx4_counter_free(ibdev->dev, counter_index); 2856 goto err_counter; 2857 } 2858 new_counter_index->index = counter_index; 2859 new_counter_index->allocated = allocated; 2860 list_add_tail(&new_counter_index->list, 2861 &ibdev->counters_table[i].counters_list); 2862 ibdev->counters_table[i].default_counter = counter_index; 2863 pr_info("counter index %d for port %d allocated %d\n", 2864 counter_index, i + 1, allocated); 2865 } 2866 if (mlx4_is_bonded(dev)) 2867 for (i = 1; i < ibdev->num_ports ; ++i) { 2868 new_counter_index = 2869 kmalloc(sizeof(struct counter_index), 2870 GFP_KERNEL); 2871 if (!new_counter_index) 2872 goto err_counter; 2873 new_counter_index->index = counter_index; 2874 new_counter_index->allocated = 0; 2875 list_add_tail(&new_counter_index->list, 2876 &ibdev->counters_table[i].counters_list); 2877 ibdev->counters_table[i].default_counter = 2878 counter_index; 2879 } 2880 2881 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 2882 ib_num_ports++; 2883 2884 spin_lock_init(&ibdev->sm_lock); 2885 mutex_init(&ibdev->cap_mask_mutex); 2886 INIT_LIST_HEAD(&ibdev->qp_list); 2887 spin_lock_init(&ibdev->reset_flow_resource_lock); 2888 2889 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && 2890 ib_num_ports) { 2891 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; 2892 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, 2893 MLX4_IB_UC_STEER_QPN_ALIGN, 2894 &ibdev->steer_qpn_base, 0, 2895 MLX4_RES_USAGE_DRIVER); 2896 if (err) 2897 goto err_counter; 2898 2899 ibdev->ib_uc_qpns_bitmap = 2900 kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) * 2901 sizeof(long), 2902 GFP_KERNEL); 2903 if (!ibdev->ib_uc_qpns_bitmap) 2904 goto err_steer_qp_release; 2905 2906 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) { 2907 bitmap_zero(ibdev->ib_uc_qpns_bitmap, 2908 ibdev->steer_qpn_count); 2909 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE( 2910 dev, ibdev->steer_qpn_base, 2911 ibdev->steer_qpn_base + 2912 ibdev->steer_qpn_count - 1); 2913 if (err) 2914 goto err_steer_free_bitmap; 2915 } else { 2916 bitmap_fill(ibdev->ib_uc_qpns_bitmap, 2917 ibdev->steer_qpn_count); 2918 } 2919 } 2920 2921 for (j = 1; j <= ibdev->dev->caps.num_ports; j++) 2922 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); 2923 2924 if (mlx4_ib_alloc_diag_counters(ibdev)) 2925 goto err_steer_free_bitmap; 2926 2927 if (ib_register_device(&ibdev->ib_dev, NULL)) 2928 goto err_diag_counters; 2929 2930 if (mlx4_ib_mad_init(ibdev)) 2931 goto err_reg; 2932 2933 if (mlx4_ib_init_sriov(ibdev)) 2934 goto err_mad; 2935 2936 if (!iboe->nb.notifier_call) { 2937 iboe->nb.notifier_call = mlx4_ib_netdev_event; 2938 err = register_netdevice_notifier(&iboe->nb); 2939 if (err) { 2940 iboe->nb.notifier_call = NULL; 2941 goto err_notif; 2942 } 2943 } 2944 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) { 2945 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT); 2946 if (err) 2947 goto err_notif; 2948 } 2949 2950 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { 2951 if (device_create_file(&ibdev->ib_dev.dev, 2952 mlx4_class_attributes[j])) 2953 goto err_notif; 2954 } 2955 2956 ibdev->ib_active = true; 2957 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 2958 devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i), 2959 &ibdev->ib_dev); 2960 2961 if (mlx4_is_mfunc(ibdev->dev)) 2962 init_pkeys(ibdev); 2963 2964 /* create paravirt contexts for any VFs which are active */ 2965 if (mlx4_is_master(ibdev->dev)) { 2966 for (j = 0; j < MLX4_MFUNC_MAX; j++) { 2967 if (j == mlx4_master_func_num(ibdev->dev)) 2968 continue; 2969 if (mlx4_is_slave_active(ibdev->dev, j)) 2970 do_slave_init(ibdev, j, 1); 2971 } 2972 } 2973 return ibdev; 2974 2975 err_notif: 2976 if (ibdev->iboe.nb.notifier_call) { 2977 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) 2978 pr_warn("failure unregistering notifier\n"); 2979 ibdev->iboe.nb.notifier_call = NULL; 2980 } 2981 flush_workqueue(wq); 2982 2983 mlx4_ib_close_sriov(ibdev); 2984 2985 err_mad: 2986 mlx4_ib_mad_cleanup(ibdev); 2987 2988 err_reg: 2989 ib_unregister_device(&ibdev->ib_dev); 2990 2991 err_diag_counters: 2992 mlx4_ib_diag_cleanup(ibdev); 2993 2994 err_steer_free_bitmap: 2995 kfree(ibdev->ib_uc_qpns_bitmap); 2996 2997 err_steer_qp_release: 2998 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) 2999 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, 3000 ibdev->steer_qpn_count); 3001 err_counter: 3002 for (i = 0; i < ibdev->num_ports; ++i) 3003 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); 3004 3005 err_map: 3006 mlx4_ib_free_eqs(dev, ibdev); 3007 iounmap(ibdev->uar_map); 3008 3009 err_uar: 3010 mlx4_uar_free(dev, &ibdev->priv_uar); 3011 3012 err_pd: 3013 mlx4_pd_free(dev, ibdev->priv_pdn); 3014 3015 err_dealloc: 3016 ib_dealloc_device(&ibdev->ib_dev); 3017 3018 return NULL; 3019 } 3020 3021 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn) 3022 { 3023 int offset; 3024 3025 WARN_ON(!dev->ib_uc_qpns_bitmap); 3026 3027 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap, 3028 dev->steer_qpn_count, 3029 get_count_order(count)); 3030 if (offset < 0) 3031 return offset; 3032 3033 *qpn = dev->steer_qpn_base + offset; 3034 return 0; 3035 } 3036 3037 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count) 3038 { 3039 if (!qpn || 3040 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED) 3041 return; 3042 3043 BUG_ON(qpn < dev->steer_qpn_base); 3044 3045 bitmap_release_region(dev->ib_uc_qpns_bitmap, 3046 qpn - dev->steer_qpn_base, 3047 get_count_order(count)); 3048 } 3049 3050 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 3051 int is_attach) 3052 { 3053 int err; 3054 size_t flow_size; 3055 struct ib_flow_attr *flow = NULL; 3056 struct ib_flow_spec_ib *ib_spec; 3057 3058 if (is_attach) { 3059 flow_size = sizeof(struct ib_flow_attr) + 3060 sizeof(struct ib_flow_spec_ib); 3061 flow = kzalloc(flow_size, GFP_KERNEL); 3062 if (!flow) 3063 return -ENOMEM; 3064 flow->port = mqp->port; 3065 flow->num_of_specs = 1; 3066 flow->size = flow_size; 3067 ib_spec = (struct ib_flow_spec_ib *)(flow + 1); 3068 ib_spec->type = IB_FLOW_SPEC_IB; 3069 ib_spec->size = sizeof(struct ib_flow_spec_ib); 3070 /* Add an empty rule for IB L2 */ 3071 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask)); 3072 3073 err = __mlx4_ib_create_flow(&mqp->ibqp, flow, 3074 IB_FLOW_DOMAIN_NIC, 3075 MLX4_FS_REGULAR, 3076 &mqp->reg_id); 3077 } else { 3078 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id); 3079 } 3080 kfree(flow); 3081 return err; 3082 } 3083 3084 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) 3085 { 3086 struct mlx4_ib_dev *ibdev = ibdev_ptr; 3087 int p; 3088 int i; 3089 3090 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 3091 devlink_port_type_clear(mlx4_get_devlink_port(dev, i)); 3092 ibdev->ib_active = false; 3093 flush_workqueue(wq); 3094 3095 mlx4_ib_close_sriov(ibdev); 3096 mlx4_ib_mad_cleanup(ibdev); 3097 ib_unregister_device(&ibdev->ib_dev); 3098 mlx4_ib_diag_cleanup(ibdev); 3099 if (ibdev->iboe.nb.notifier_call) { 3100 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) 3101 pr_warn("failure unregistering notifier\n"); 3102 ibdev->iboe.nb.notifier_call = NULL; 3103 } 3104 3105 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { 3106 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, 3107 ibdev->steer_qpn_count); 3108 kfree(ibdev->ib_uc_qpns_bitmap); 3109 } 3110 3111 iounmap(ibdev->uar_map); 3112 for (p = 0; p < ibdev->num_ports; ++p) 3113 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]); 3114 3115 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) 3116 mlx4_CLOSE_PORT(dev, p); 3117 3118 mlx4_ib_free_eqs(dev, ibdev); 3119 3120 mlx4_uar_free(dev, &ibdev->priv_uar); 3121 mlx4_pd_free(dev, ibdev->priv_pdn); 3122 ib_dealloc_device(&ibdev->ib_dev); 3123 } 3124 3125 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) 3126 { 3127 struct mlx4_ib_demux_work **dm = NULL; 3128 struct mlx4_dev *dev = ibdev->dev; 3129 int i; 3130 unsigned long flags; 3131 struct mlx4_active_ports actv_ports; 3132 unsigned int ports; 3133 unsigned int first_port; 3134 3135 if (!mlx4_is_master(dev)) 3136 return; 3137 3138 actv_ports = mlx4_get_active_ports(dev, slave); 3139 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports); 3140 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); 3141 3142 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); 3143 if (!dm) 3144 return; 3145 3146 for (i = 0; i < ports; i++) { 3147 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); 3148 if (!dm[i]) { 3149 while (--i >= 0) 3150 kfree(dm[i]); 3151 goto out; 3152 } 3153 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); 3154 dm[i]->port = first_port + i + 1; 3155 dm[i]->slave = slave; 3156 dm[i]->do_init = do_init; 3157 dm[i]->dev = ibdev; 3158 } 3159 /* initialize or tear down tunnel QPs for the slave */ 3160 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); 3161 if (!ibdev->sriov.is_going_down) { 3162 for (i = 0; i < ports; i++) 3163 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); 3164 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); 3165 } else { 3166 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); 3167 for (i = 0; i < ports; i++) 3168 kfree(dm[i]); 3169 } 3170 out: 3171 kfree(dm); 3172 return; 3173 } 3174 3175 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev) 3176 { 3177 struct mlx4_ib_qp *mqp; 3178 unsigned long flags_qp; 3179 unsigned long flags_cq; 3180 struct mlx4_ib_cq *send_mcq, *recv_mcq; 3181 struct list_head cq_notify_list; 3182 struct mlx4_cq *mcq; 3183 unsigned long flags; 3184 3185 pr_warn("mlx4_ib_handle_catas_error was started\n"); 3186 INIT_LIST_HEAD(&cq_notify_list); 3187 3188 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ 3189 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); 3190 3191 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { 3192 spin_lock_irqsave(&mqp->sq.lock, flags_qp); 3193 if (mqp->sq.tail != mqp->sq.head) { 3194 send_mcq = to_mcq(mqp->ibqp.send_cq); 3195 spin_lock_irqsave(&send_mcq->lock, flags_cq); 3196 if (send_mcq->mcq.comp && 3197 mqp->ibqp.send_cq->comp_handler) { 3198 if (!send_mcq->mcq.reset_notify_added) { 3199 send_mcq->mcq.reset_notify_added = 1; 3200 list_add_tail(&send_mcq->mcq.reset_notify, 3201 &cq_notify_list); 3202 } 3203 } 3204 spin_unlock_irqrestore(&send_mcq->lock, flags_cq); 3205 } 3206 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); 3207 /* Now, handle the QP's receive queue */ 3208 spin_lock_irqsave(&mqp->rq.lock, flags_qp); 3209 /* no handling is needed for SRQ */ 3210 if (!mqp->ibqp.srq) { 3211 if (mqp->rq.tail != mqp->rq.head) { 3212 recv_mcq = to_mcq(mqp->ibqp.recv_cq); 3213 spin_lock_irqsave(&recv_mcq->lock, flags_cq); 3214 if (recv_mcq->mcq.comp && 3215 mqp->ibqp.recv_cq->comp_handler) { 3216 if (!recv_mcq->mcq.reset_notify_added) { 3217 recv_mcq->mcq.reset_notify_added = 1; 3218 list_add_tail(&recv_mcq->mcq.reset_notify, 3219 &cq_notify_list); 3220 } 3221 } 3222 spin_unlock_irqrestore(&recv_mcq->lock, 3223 flags_cq); 3224 } 3225 } 3226 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); 3227 } 3228 3229 list_for_each_entry(mcq, &cq_notify_list, reset_notify) { 3230 mcq->comp(mcq); 3231 } 3232 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); 3233 pr_warn("mlx4_ib_handle_catas_error ended\n"); 3234 } 3235 3236 static void handle_bonded_port_state_event(struct work_struct *work) 3237 { 3238 struct ib_event_work *ew = 3239 container_of(work, struct ib_event_work, work); 3240 struct mlx4_ib_dev *ibdev = ew->ib_dev; 3241 enum ib_port_state bonded_port_state = IB_PORT_NOP; 3242 int i; 3243 struct ib_event ibev; 3244 3245 kfree(ew); 3246 spin_lock_bh(&ibdev->iboe.lock); 3247 for (i = 0; i < MLX4_MAX_PORTS; ++i) { 3248 struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; 3249 enum ib_port_state curr_port_state; 3250 3251 if (!curr_netdev) 3252 continue; 3253 3254 curr_port_state = 3255 (netif_running(curr_netdev) && 3256 netif_carrier_ok(curr_netdev)) ? 3257 IB_PORT_ACTIVE : IB_PORT_DOWN; 3258 3259 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ? 3260 curr_port_state : IB_PORT_ACTIVE; 3261 } 3262 spin_unlock_bh(&ibdev->iboe.lock); 3263 3264 ibev.device = &ibdev->ib_dev; 3265 ibev.element.port_num = 1; 3266 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ? 3267 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 3268 3269 ib_dispatch_event(&ibev); 3270 } 3271 3272 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port) 3273 { 3274 u64 sl2vl; 3275 int err; 3276 3277 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl); 3278 if (err) { 3279 pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n", 3280 port, err); 3281 sl2vl = 0; 3282 } 3283 atomic64_set(&mdev->sl2vl[port - 1], sl2vl); 3284 } 3285 3286 static void ib_sl2vl_update_work(struct work_struct *work) 3287 { 3288 struct ib_event_work *ew = container_of(work, struct ib_event_work, work); 3289 struct mlx4_ib_dev *mdev = ew->ib_dev; 3290 int port = ew->port; 3291 3292 mlx4_ib_sl2vl_update(mdev, port); 3293 3294 kfree(ew); 3295 } 3296 3297 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev, 3298 int port) 3299 { 3300 struct ib_event_work *ew; 3301 3302 ew = kmalloc(sizeof(*ew), GFP_ATOMIC); 3303 if (ew) { 3304 INIT_WORK(&ew->work, ib_sl2vl_update_work); 3305 ew->port = port; 3306 ew->ib_dev = ibdev; 3307 queue_work(wq, &ew->work); 3308 } 3309 } 3310 3311 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, 3312 enum mlx4_dev_event event, unsigned long param) 3313 { 3314 struct ib_event ibev; 3315 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); 3316 struct mlx4_eqe *eqe = NULL; 3317 struct ib_event_work *ew; 3318 int p = 0; 3319 3320 if (mlx4_is_bonded(dev) && 3321 ((event == MLX4_DEV_EVENT_PORT_UP) || 3322 (event == MLX4_DEV_EVENT_PORT_DOWN))) { 3323 ew = kmalloc(sizeof(*ew), GFP_ATOMIC); 3324 if (!ew) 3325 return; 3326 INIT_WORK(&ew->work, handle_bonded_port_state_event); 3327 ew->ib_dev = ibdev; 3328 queue_work(wq, &ew->work); 3329 return; 3330 } 3331 3332 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) 3333 eqe = (struct mlx4_eqe *)param; 3334 else 3335 p = (int) param; 3336 3337 switch (event) { 3338 case MLX4_DEV_EVENT_PORT_UP: 3339 if (p > ibdev->num_ports) 3340 return; 3341 if (!mlx4_is_slave(dev) && 3342 rdma_port_get_link_layer(&ibdev->ib_dev, p) == 3343 IB_LINK_LAYER_INFINIBAND) { 3344 if (mlx4_is_master(dev)) 3345 mlx4_ib_invalidate_all_guid_record(ibdev, p); 3346 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST && 3347 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) 3348 mlx4_sched_ib_sl2vl_update_work(ibdev, p); 3349 } 3350 ibev.event = IB_EVENT_PORT_ACTIVE; 3351 break; 3352 3353 case MLX4_DEV_EVENT_PORT_DOWN: 3354 if (p > ibdev->num_ports) 3355 return; 3356 ibev.event = IB_EVENT_PORT_ERR; 3357 break; 3358 3359 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: 3360 ibdev->ib_active = false; 3361 ibev.event = IB_EVENT_DEVICE_FATAL; 3362 mlx4_ib_handle_catas_error(ibdev); 3363 break; 3364 3365 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: 3366 ew = kmalloc(sizeof *ew, GFP_ATOMIC); 3367 if (!ew) 3368 break; 3369 3370 INIT_WORK(&ew->work, handle_port_mgmt_change_event); 3371 memcpy(&ew->ib_eqe, eqe, sizeof *eqe); 3372 ew->ib_dev = ibdev; 3373 /* need to queue only for port owner, which uses GEN_EQE */ 3374 if (mlx4_is_master(dev)) 3375 queue_work(wq, &ew->work); 3376 else 3377 handle_port_mgmt_change_event(&ew->work); 3378 return; 3379 3380 case MLX4_DEV_EVENT_SLAVE_INIT: 3381 /* here, p is the slave id */ 3382 do_slave_init(ibdev, p, 1); 3383 if (mlx4_is_master(dev)) { 3384 int i; 3385 3386 for (i = 1; i <= ibdev->num_ports; i++) { 3387 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) 3388 == IB_LINK_LAYER_INFINIBAND) 3389 mlx4_ib_slave_alias_guid_event(ibdev, 3390 p, i, 3391 1); 3392 } 3393 } 3394 return; 3395 3396 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: 3397 if (mlx4_is_master(dev)) { 3398 int i; 3399 3400 for (i = 1; i <= ibdev->num_ports; i++) { 3401 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) 3402 == IB_LINK_LAYER_INFINIBAND) 3403 mlx4_ib_slave_alias_guid_event(ibdev, 3404 p, i, 3405 0); 3406 } 3407 } 3408 /* here, p is the slave id */ 3409 do_slave_init(ibdev, p, 0); 3410 return; 3411 3412 default: 3413 return; 3414 } 3415 3416 ibev.device = ibdev_ptr; 3417 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p; 3418 3419 ib_dispatch_event(&ibev); 3420 } 3421 3422 static struct mlx4_interface mlx4_ib_interface = { 3423 .add = mlx4_ib_add, 3424 .remove = mlx4_ib_remove, 3425 .event = mlx4_ib_event, 3426 .protocol = MLX4_PROT_IB_IPV6, 3427 .flags = MLX4_INTFF_BONDING 3428 }; 3429 3430 static int __init mlx4_ib_init(void) 3431 { 3432 int err; 3433 3434 wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM); 3435 if (!wq) 3436 return -ENOMEM; 3437 3438 err = mlx4_ib_mcg_init(); 3439 if (err) 3440 goto clean_wq; 3441 3442 err = mlx4_register_interface(&mlx4_ib_interface); 3443 if (err) 3444 goto clean_mcg; 3445 3446 return 0; 3447 3448 clean_mcg: 3449 mlx4_ib_mcg_destroy(); 3450 3451 clean_wq: 3452 destroy_workqueue(wq); 3453 return err; 3454 } 3455 3456 static void __exit mlx4_ib_cleanup(void) 3457 { 3458 mlx4_unregister_interface(&mlx4_ib_interface); 3459 mlx4_ib_mcg_destroy(); 3460 destroy_workqueue(wq); 3461 } 3462 3463 module_init(mlx4_ib_init); 3464 module_exit(mlx4_ib_cleanup); 3465