1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/init.h> 36 #include <linux/slab.h> 37 #include <linux/errno.h> 38 #include <linux/netdevice.h> 39 #include <linux/inetdevice.h> 40 #include <linux/rtnetlink.h> 41 #include <linux/if_vlan.h> 42 43 #include <rdma/ib_smi.h> 44 #include <rdma/ib_user_verbs.h> 45 #include <rdma/ib_addr.h> 46 47 #include <linux/mlx4/driver.h> 48 #include <linux/mlx4/cmd.h> 49 50 #include "mlx4_ib.h" 51 #include "user.h" 52 53 #define DRV_NAME "mlx4_ib" 54 #define DRV_VERSION "1.0" 55 #define DRV_RELDATE "April 4, 2008" 56 57 MODULE_AUTHOR("Roland Dreier"); 58 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); 59 MODULE_LICENSE("Dual BSD/GPL"); 60 MODULE_VERSION(DRV_VERSION); 61 62 static const char mlx4_ib_version[] = 63 DRV_NAME ": Mellanox ConnectX InfiniBand driver v" 64 DRV_VERSION " (" DRV_RELDATE ")\n"; 65 66 struct update_gid_work { 67 struct work_struct work; 68 union ib_gid gids[128]; 69 struct mlx4_ib_dev *dev; 70 int port; 71 }; 72 73 static struct workqueue_struct *wq; 74 75 static void init_query_mad(struct ib_smp *mad) 76 { 77 mad->base_version = 1; 78 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 79 mad->class_version = 1; 80 mad->method = IB_MGMT_METHOD_GET; 81 } 82 83 static union ib_gid zgid; 84 85 static int mlx4_ib_query_device(struct ib_device *ibdev, 86 struct ib_device_attr *props) 87 { 88 struct mlx4_ib_dev *dev = to_mdev(ibdev); 89 struct ib_smp *in_mad = NULL; 90 struct ib_smp *out_mad = NULL; 91 int err = -ENOMEM; 92 93 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 94 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 95 if (!in_mad || !out_mad) 96 goto out; 97 98 init_query_mad(in_mad); 99 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 100 101 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad); 102 if (err) 103 goto out; 104 105 memset(props, 0, sizeof *props); 106 107 props->fw_ver = dev->dev->caps.fw_ver; 108 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 109 IB_DEVICE_PORT_ACTIVE_EVENT | 110 IB_DEVICE_SYS_IMAGE_GUID | 111 IB_DEVICE_RC_RNR_NAK_GEN | 112 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 113 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR) 114 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 115 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) 116 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 117 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM) 118 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 119 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) 120 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; 121 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 122 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 123 if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH) 124 props->device_cap_flags |= IB_DEVICE_UD_TSO; 125 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) 126 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; 127 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) && 128 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && 129 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) 130 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 131 132 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 133 0xffffff; 134 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); 135 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); 136 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 137 138 props->max_mr_size = ~0ull; 139 props->page_size_cap = dev->dev->caps.page_size_cap; 140 props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; 141 props->max_qp_wr = dev->dev->caps.max_wqes; 142 props->max_sge = min(dev->dev->caps.max_sq_sg, 143 dev->dev->caps.max_rq_sg); 144 props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; 145 props->max_cqe = dev->dev->caps.max_cqes; 146 props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws; 147 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds; 148 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma; 149 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma; 150 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 151 props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs; 152 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; 153 props->max_srq_sge = dev->dev->caps.max_srq_sge; 154 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES; 155 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; 156 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? 157 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 158 props->masked_atomic_cap = IB_ATOMIC_HCA; 159 props->max_pkeys = dev->dev->caps.pkey_table_len[1]; 160 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; 161 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; 162 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 163 props->max_mcast_grp; 164 props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1; 165 166 out: 167 kfree(in_mad); 168 kfree(out_mad); 169 170 return err; 171 } 172 173 static enum rdma_link_layer 174 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num) 175 { 176 struct mlx4_dev *dev = to_mdev(device)->dev; 177 178 return dev->caps.port_mask & (1 << (port_num - 1)) ? 179 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; 180 } 181 182 static int ib_link_query_port(struct ib_device *ibdev, u8 port, 183 struct ib_port_attr *props, 184 struct ib_smp *out_mad) 185 { 186 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); 187 props->lmc = out_mad->data[34] & 0x7; 188 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); 189 props->sm_sl = out_mad->data[36] & 0xf; 190 props->state = out_mad->data[32] & 0xf; 191 props->phys_state = out_mad->data[33] >> 4; 192 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); 193 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; 194 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; 195 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port]; 196 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); 197 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); 198 props->active_width = out_mad->data[31] & 0xf; 199 props->active_speed = out_mad->data[35] >> 4; 200 props->max_mtu = out_mad->data[41] & 0xf; 201 props->active_mtu = out_mad->data[36] >> 4; 202 props->subnet_timeout = out_mad->data[51] & 0x1f; 203 props->max_vl_num = out_mad->data[37] >> 4; 204 props->init_type_reply = out_mad->data[41] >> 4; 205 206 return 0; 207 } 208 209 static u8 state_to_phys_state(enum ib_port_state state) 210 { 211 return state == IB_PORT_ACTIVE ? 5 : 3; 212 } 213 214 static int eth_link_query_port(struct ib_device *ibdev, u8 port, 215 struct ib_port_attr *props, 216 struct ib_smp *out_mad) 217 { 218 struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe; 219 struct net_device *ndev; 220 enum ib_mtu tmp; 221 222 props->active_width = IB_WIDTH_1X; 223 props->active_speed = 4; 224 props->port_cap_flags = IB_PORT_CM_SUP; 225 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; 226 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; 227 props->pkey_tbl_len = 1; 228 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); 229 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); 230 props->max_mtu = IB_MTU_2048; 231 props->subnet_timeout = 0; 232 props->max_vl_num = out_mad->data[37] >> 4; 233 props->init_type_reply = 0; 234 props->state = IB_PORT_DOWN; 235 props->phys_state = state_to_phys_state(props->state); 236 props->active_mtu = IB_MTU_256; 237 spin_lock(&iboe->lock); 238 ndev = iboe->netdevs[port - 1]; 239 if (!ndev) 240 goto out; 241 242 tmp = iboe_get_mtu(ndev->mtu); 243 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; 244 245 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? 246 IB_PORT_ACTIVE : IB_PORT_DOWN; 247 props->phys_state = state_to_phys_state(props->state); 248 249 out: 250 spin_unlock(&iboe->lock); 251 return 0; 252 } 253 254 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, 255 struct ib_port_attr *props) 256 { 257 struct ib_smp *in_mad = NULL; 258 struct ib_smp *out_mad = NULL; 259 int err = -ENOMEM; 260 261 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 262 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 263 if (!in_mad || !out_mad) 264 goto out; 265 266 memset(props, 0, sizeof *props); 267 268 init_query_mad(in_mad); 269 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 270 in_mad->attr_mod = cpu_to_be32(port); 271 272 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); 273 if (err) 274 goto out; 275 276 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? 277 ib_link_query_port(ibdev, port, props, out_mad) : 278 eth_link_query_port(ibdev, port, props, out_mad); 279 280 out: 281 kfree(in_mad); 282 kfree(out_mad); 283 284 return err; 285 } 286 287 static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 288 union ib_gid *gid) 289 { 290 struct ib_smp *in_mad = NULL; 291 struct ib_smp *out_mad = NULL; 292 int err = -ENOMEM; 293 294 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 295 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 296 if (!in_mad || !out_mad) 297 goto out; 298 299 init_query_mad(in_mad); 300 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 301 in_mad->attr_mod = cpu_to_be32(port); 302 303 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); 304 if (err) 305 goto out; 306 307 memcpy(gid->raw, out_mad->data + 8, 8); 308 309 init_query_mad(in_mad); 310 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 311 in_mad->attr_mod = cpu_to_be32(index / 8); 312 313 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); 314 if (err) 315 goto out; 316 317 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); 318 319 out: 320 kfree(in_mad); 321 kfree(out_mad); 322 return err; 323 } 324 325 static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index, 326 union ib_gid *gid) 327 { 328 struct mlx4_ib_dev *dev = to_mdev(ibdev); 329 330 *gid = dev->iboe.gid_table[port - 1][index]; 331 332 return 0; 333 } 334 335 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 336 union ib_gid *gid) 337 { 338 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND) 339 return __mlx4_ib_query_gid(ibdev, port, index, gid); 340 else 341 return iboe_query_gid(ibdev, port, index, gid); 342 } 343 344 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 345 u16 *pkey) 346 { 347 struct ib_smp *in_mad = NULL; 348 struct ib_smp *out_mad = NULL; 349 int err = -ENOMEM; 350 351 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 352 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 353 if (!in_mad || !out_mad) 354 goto out; 355 356 init_query_mad(in_mad); 357 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 358 in_mad->attr_mod = cpu_to_be32(index / 32); 359 360 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); 361 if (err) 362 goto out; 363 364 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); 365 366 out: 367 kfree(in_mad); 368 kfree(out_mad); 369 return err; 370 } 371 372 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, 373 struct ib_device_modify *props) 374 { 375 struct mlx4_cmd_mailbox *mailbox; 376 377 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 378 return -EOPNOTSUPP; 379 380 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) 381 return 0; 382 383 spin_lock(&to_mdev(ibdev)->sm_lock); 384 memcpy(ibdev->node_desc, props->node_desc, 64); 385 spin_unlock(&to_mdev(ibdev)->sm_lock); 386 387 /* 388 * If possible, pass node desc to FW, so it can generate 389 * a 144 trap. If cmd fails, just ignore. 390 */ 391 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev); 392 if (IS_ERR(mailbox)) 393 return 0; 394 395 memset(mailbox->buf, 0, 256); 396 memcpy(mailbox->buf, props->node_desc, 64); 397 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0, 398 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A); 399 400 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox); 401 402 return 0; 403 } 404 405 static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, 406 u32 cap_mask) 407 { 408 struct mlx4_cmd_mailbox *mailbox; 409 int err; 410 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; 411 412 mailbox = mlx4_alloc_cmd_mailbox(dev->dev); 413 if (IS_ERR(mailbox)) 414 return PTR_ERR(mailbox); 415 416 memset(mailbox->buf, 0, 256); 417 418 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 419 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6; 420 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask); 421 } else { 422 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols; 423 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); 424 } 425 426 err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT, 427 MLX4_CMD_TIME_CLASS_B); 428 429 mlx4_free_cmd_mailbox(dev->dev, mailbox); 430 return err; 431 } 432 433 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 434 struct ib_port_modify *props) 435 { 436 struct ib_port_attr attr; 437 u32 cap_mask; 438 int err; 439 440 mutex_lock(&to_mdev(ibdev)->cap_mask_mutex); 441 442 err = mlx4_ib_query_port(ibdev, port, &attr); 443 if (err) 444 goto out; 445 446 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & 447 ~props->clr_port_cap_mask; 448 449 err = mlx4_SET_PORT(to_mdev(ibdev), port, 450 !!(mask & IB_PORT_RESET_QKEY_CNTR), 451 cap_mask); 452 453 out: 454 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); 455 return err; 456 } 457 458 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, 459 struct ib_udata *udata) 460 { 461 struct mlx4_ib_dev *dev = to_mdev(ibdev); 462 struct mlx4_ib_ucontext *context; 463 struct mlx4_ib_alloc_ucontext_resp resp; 464 int err; 465 466 if (!dev->ib_active) 467 return ERR_PTR(-EAGAIN); 468 469 resp.qp_tab_size = dev->dev->caps.num_qps; 470 resp.bf_reg_size = dev->dev->caps.bf_reg_size; 471 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; 472 473 context = kmalloc(sizeof *context, GFP_KERNEL); 474 if (!context) 475 return ERR_PTR(-ENOMEM); 476 477 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar); 478 if (err) { 479 kfree(context); 480 return ERR_PTR(err); 481 } 482 483 INIT_LIST_HEAD(&context->db_page_list); 484 mutex_init(&context->db_page_mutex); 485 486 err = ib_copy_to_udata(udata, &resp, sizeof resp); 487 if (err) { 488 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar); 489 kfree(context); 490 return ERR_PTR(-EFAULT); 491 } 492 493 return &context->ibucontext; 494 } 495 496 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 497 { 498 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext); 499 500 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar); 501 kfree(context); 502 503 return 0; 504 } 505 506 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 507 { 508 struct mlx4_ib_dev *dev = to_mdev(context->device); 509 510 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 511 return -EINVAL; 512 513 if (vma->vm_pgoff == 0) { 514 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 515 516 if (io_remap_pfn_range(vma, vma->vm_start, 517 to_mucontext(context)->uar.pfn, 518 PAGE_SIZE, vma->vm_page_prot)) 519 return -EAGAIN; 520 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) { 521 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 522 523 if (io_remap_pfn_range(vma, vma->vm_start, 524 to_mucontext(context)->uar.pfn + 525 dev->dev->caps.num_uars, 526 PAGE_SIZE, vma->vm_page_prot)) 527 return -EAGAIN; 528 } else 529 return -EINVAL; 530 531 return 0; 532 } 533 534 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev, 535 struct ib_ucontext *context, 536 struct ib_udata *udata) 537 { 538 struct mlx4_ib_pd *pd; 539 int err; 540 541 pd = kmalloc(sizeof *pd, GFP_KERNEL); 542 if (!pd) 543 return ERR_PTR(-ENOMEM); 544 545 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn); 546 if (err) { 547 kfree(pd); 548 return ERR_PTR(err); 549 } 550 551 if (context) 552 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) { 553 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); 554 kfree(pd); 555 return ERR_PTR(-EFAULT); 556 } 557 558 return &pd->ibpd; 559 } 560 561 static int mlx4_ib_dealloc_pd(struct ib_pd *pd) 562 { 563 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); 564 kfree(pd); 565 566 return 0; 567 } 568 569 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) 570 { 571 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 572 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 573 struct mlx4_ib_gid_entry *ge; 574 575 ge = kzalloc(sizeof *ge, GFP_KERNEL); 576 if (!ge) 577 return -ENOMEM; 578 579 ge->gid = *gid; 580 if (mlx4_ib_add_mc(mdev, mqp, gid)) { 581 ge->port = mqp->port; 582 ge->added = 1; 583 } 584 585 mutex_lock(&mqp->mutex); 586 list_add_tail(&ge->list, &mqp->gid_list); 587 mutex_unlock(&mqp->mutex); 588 589 return 0; 590 } 591 592 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 593 union ib_gid *gid) 594 { 595 u8 mac[6]; 596 struct net_device *ndev; 597 int ret = 0; 598 599 if (!mqp->port) 600 return 0; 601 602 spin_lock(&mdev->iboe.lock); 603 ndev = mdev->iboe.netdevs[mqp->port - 1]; 604 if (ndev) 605 dev_hold(ndev); 606 spin_unlock(&mdev->iboe.lock); 607 608 if (ndev) { 609 rdma_get_mcast_mac((struct in6_addr *)gid, mac); 610 rtnl_lock(); 611 dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac); 612 ret = 1; 613 rtnl_unlock(); 614 dev_put(ndev); 615 } 616 617 return ret; 618 } 619 620 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 621 { 622 int err; 623 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 624 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 625 626 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags & 627 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)); 628 if (err) 629 return err; 630 631 err = add_gid_entry(ibqp, gid); 632 if (err) 633 goto err_add; 634 635 return 0; 636 637 err_add: 638 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw); 639 return err; 640 } 641 642 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw) 643 { 644 struct mlx4_ib_gid_entry *ge; 645 struct mlx4_ib_gid_entry *tmp; 646 struct mlx4_ib_gid_entry *ret = NULL; 647 648 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { 649 if (!memcmp(raw, ge->gid.raw, 16)) { 650 ret = ge; 651 break; 652 } 653 } 654 655 return ret; 656 } 657 658 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 659 { 660 int err; 661 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 662 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 663 u8 mac[6]; 664 struct net_device *ndev; 665 struct mlx4_ib_gid_entry *ge; 666 667 err = mlx4_multicast_detach(mdev->dev, 668 &mqp->mqp, gid->raw); 669 if (err) 670 return err; 671 672 mutex_lock(&mqp->mutex); 673 ge = find_gid_entry(mqp, gid->raw); 674 if (ge) { 675 spin_lock(&mdev->iboe.lock); 676 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; 677 if (ndev) 678 dev_hold(ndev); 679 spin_unlock(&mdev->iboe.lock); 680 rdma_get_mcast_mac((struct in6_addr *)gid, mac); 681 if (ndev) { 682 rtnl_lock(); 683 dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac); 684 rtnl_unlock(); 685 dev_put(ndev); 686 } 687 list_del(&ge->list); 688 kfree(ge); 689 } else 690 printk(KERN_WARNING "could not find mgid entry\n"); 691 692 mutex_unlock(&mqp->mutex); 693 694 return 0; 695 } 696 697 static int init_node_data(struct mlx4_ib_dev *dev) 698 { 699 struct ib_smp *in_mad = NULL; 700 struct ib_smp *out_mad = NULL; 701 int err = -ENOMEM; 702 703 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 704 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 705 if (!in_mad || !out_mad) 706 goto out; 707 708 init_query_mad(in_mad); 709 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 710 711 err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 712 if (err) 713 goto out; 714 715 memcpy(dev->ib_dev.node_desc, out_mad->data, 64); 716 717 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 718 719 err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 720 if (err) 721 goto out; 722 723 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); 724 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 725 726 out: 727 kfree(in_mad); 728 kfree(out_mad); 729 return err; 730 } 731 732 static ssize_t show_hca(struct device *device, struct device_attribute *attr, 733 char *buf) 734 { 735 struct mlx4_ib_dev *dev = 736 container_of(device, struct mlx4_ib_dev, ib_dev.dev); 737 return sprintf(buf, "MT%d\n", dev->dev->pdev->device); 738 } 739 740 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, 741 char *buf) 742 { 743 struct mlx4_ib_dev *dev = 744 container_of(device, struct mlx4_ib_dev, ib_dev.dev); 745 return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32), 746 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff, 747 (int) dev->dev->caps.fw_ver & 0xffff); 748 } 749 750 static ssize_t show_rev(struct device *device, struct device_attribute *attr, 751 char *buf) 752 { 753 struct mlx4_ib_dev *dev = 754 container_of(device, struct mlx4_ib_dev, ib_dev.dev); 755 return sprintf(buf, "%x\n", dev->dev->rev_id); 756 } 757 758 static ssize_t show_board(struct device *device, struct device_attribute *attr, 759 char *buf) 760 { 761 struct mlx4_ib_dev *dev = 762 container_of(device, struct mlx4_ib_dev, ib_dev.dev); 763 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN, 764 dev->dev->board_id); 765 } 766 767 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 768 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 769 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 770 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 771 772 static struct device_attribute *mlx4_class_attributes[] = { 773 &dev_attr_hw_rev, 774 &dev_attr_fw_ver, 775 &dev_attr_hca_type, 776 &dev_attr_board_id 777 }; 778 779 static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev) 780 { 781 memcpy(eui, dev->dev_addr, 3); 782 memcpy(eui + 5, dev->dev_addr + 3, 3); 783 if (vlan_id < 0x1000) { 784 eui[3] = vlan_id >> 8; 785 eui[4] = vlan_id & 0xff; 786 } else { 787 eui[3] = 0xff; 788 eui[4] = 0xfe; 789 } 790 eui[0] ^= 2; 791 } 792 793 static void update_gids_task(struct work_struct *work) 794 { 795 struct update_gid_work *gw = container_of(work, struct update_gid_work, work); 796 struct mlx4_cmd_mailbox *mailbox; 797 union ib_gid *gids; 798 int err; 799 struct mlx4_dev *dev = gw->dev->dev; 800 struct ib_event event; 801 802 mailbox = mlx4_alloc_cmd_mailbox(dev); 803 if (IS_ERR(mailbox)) { 804 printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox)); 805 return; 806 } 807 808 gids = mailbox->buf; 809 memcpy(gids, gw->gids, sizeof gw->gids); 810 811 err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port, 812 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B); 813 if (err) 814 printk(KERN_WARNING "set port command failed\n"); 815 else { 816 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); 817 event.device = &gw->dev->ib_dev; 818 event.element.port_num = gw->port; 819 event.event = IB_EVENT_LID_CHANGE; 820 ib_dispatch_event(&event); 821 } 822 823 mlx4_free_cmd_mailbox(dev, mailbox); 824 kfree(gw); 825 } 826 827 static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear) 828 { 829 struct net_device *ndev = dev->iboe.netdevs[port - 1]; 830 struct update_gid_work *work; 831 struct net_device *tmp; 832 int i; 833 u8 *hits; 834 int ret; 835 union ib_gid gid; 836 int free; 837 int found; 838 int need_update = 0; 839 u16 vid; 840 841 work = kzalloc(sizeof *work, GFP_ATOMIC); 842 if (!work) 843 return -ENOMEM; 844 845 hits = kzalloc(128, GFP_ATOMIC); 846 if (!hits) { 847 ret = -ENOMEM; 848 goto out; 849 } 850 851 rcu_read_lock(); 852 for_each_netdev_rcu(&init_net, tmp) { 853 if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) { 854 gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 855 vid = rdma_vlan_dev_vlan_id(tmp); 856 mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev); 857 found = 0; 858 free = -1; 859 for (i = 0; i < 128; ++i) { 860 if (free < 0 && 861 !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid)) 862 free = i; 863 if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) { 864 hits[i] = 1; 865 found = 1; 866 break; 867 } 868 } 869 870 if (!found) { 871 if (tmp == ndev && 872 (memcmp(&dev->iboe.gid_table[port - 1][0], 873 &gid, sizeof gid) || 874 !memcmp(&dev->iboe.gid_table[port - 1][0], 875 &zgid, sizeof gid))) { 876 dev->iboe.gid_table[port - 1][0] = gid; 877 ++need_update; 878 hits[0] = 1; 879 } else if (free >= 0) { 880 dev->iboe.gid_table[port - 1][free] = gid; 881 hits[free] = 1; 882 ++need_update; 883 } 884 } 885 } 886 } 887 rcu_read_unlock(); 888 889 for (i = 0; i < 128; ++i) 890 if (!hits[i]) { 891 if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid)) 892 ++need_update; 893 dev->iboe.gid_table[port - 1][i] = zgid; 894 } 895 896 if (need_update) { 897 memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids); 898 INIT_WORK(&work->work, update_gids_task); 899 work->port = port; 900 work->dev = dev; 901 queue_work(wq, &work->work); 902 } else 903 kfree(work); 904 905 kfree(hits); 906 return 0; 907 908 out: 909 kfree(work); 910 return ret; 911 } 912 913 static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event) 914 { 915 switch (event) { 916 case NETDEV_UP: 917 case NETDEV_CHANGEADDR: 918 update_ipv6_gids(dev, port, 0); 919 break; 920 921 case NETDEV_DOWN: 922 update_ipv6_gids(dev, port, 1); 923 dev->iboe.netdevs[port - 1] = NULL; 924 } 925 } 926 927 static void netdev_added(struct mlx4_ib_dev *dev, int port) 928 { 929 update_ipv6_gids(dev, port, 0); 930 } 931 932 static void netdev_removed(struct mlx4_ib_dev *dev, int port) 933 { 934 update_ipv6_gids(dev, port, 1); 935 } 936 937 static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event, 938 void *ptr) 939 { 940 struct net_device *dev = ptr; 941 struct mlx4_ib_dev *ibdev; 942 struct net_device *oldnd; 943 struct mlx4_ib_iboe *iboe; 944 int port; 945 946 if (!net_eq(dev_net(dev), &init_net)) 947 return NOTIFY_DONE; 948 949 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); 950 iboe = &ibdev->iboe; 951 952 spin_lock(&iboe->lock); 953 mlx4_foreach_ib_transport_port(port, ibdev->dev) { 954 oldnd = iboe->netdevs[port - 1]; 955 iboe->netdevs[port - 1] = 956 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port); 957 if (oldnd != iboe->netdevs[port - 1]) { 958 if (iboe->netdevs[port - 1]) 959 netdev_added(ibdev, port); 960 else 961 netdev_removed(ibdev, port); 962 } 963 } 964 965 if (dev == iboe->netdevs[0] || 966 (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0])) 967 handle_en_event(ibdev, 1, event); 968 else if (dev == iboe->netdevs[1] 969 || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1])) 970 handle_en_event(ibdev, 2, event); 971 972 spin_unlock(&iboe->lock); 973 974 return NOTIFY_DONE; 975 } 976 977 static void *mlx4_ib_add(struct mlx4_dev *dev) 978 { 979 struct mlx4_ib_dev *ibdev; 980 int num_ports = 0; 981 int i; 982 int err; 983 struct mlx4_ib_iboe *iboe; 984 985 printk_once(KERN_INFO "%s", mlx4_ib_version); 986 987 mlx4_foreach_ib_transport_port(i, dev) 988 num_ports++; 989 990 /* No point in registering a device with no ports... */ 991 if (num_ports == 0) 992 return NULL; 993 994 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); 995 if (!ibdev) { 996 dev_err(&dev->pdev->dev, "Device struct alloc failed\n"); 997 return NULL; 998 } 999 1000 iboe = &ibdev->iboe; 1001 1002 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) 1003 goto err_dealloc; 1004 1005 if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) 1006 goto err_pd; 1007 1008 ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 1009 if (!ibdev->uar_map) 1010 goto err_uar; 1011 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); 1012 1013 ibdev->dev = dev; 1014 1015 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); 1016 ibdev->ib_dev.owner = THIS_MODULE; 1017 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; 1018 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; 1019 ibdev->num_ports = num_ports; 1020 ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; 1021 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; 1022 ibdev->ib_dev.dma_device = &dev->pdev->dev; 1023 1024 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; 1025 ibdev->ib_dev.uverbs_cmd_mask = 1026 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1027 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 1028 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 1029 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 1030 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 1031 (1ull << IB_USER_VERBS_CMD_REG_MR) | 1032 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 1033 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 1034 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 1035 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 1036 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 1037 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 1038 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 1039 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 1040 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 1041 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 1042 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 1043 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 1044 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 1045 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 1046 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); 1047 1048 ibdev->ib_dev.query_device = mlx4_ib_query_device; 1049 ibdev->ib_dev.query_port = mlx4_ib_query_port; 1050 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer; 1051 ibdev->ib_dev.query_gid = mlx4_ib_query_gid; 1052 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; 1053 ibdev->ib_dev.modify_device = mlx4_ib_modify_device; 1054 ibdev->ib_dev.modify_port = mlx4_ib_modify_port; 1055 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext; 1056 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext; 1057 ibdev->ib_dev.mmap = mlx4_ib_mmap; 1058 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd; 1059 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd; 1060 ibdev->ib_dev.create_ah = mlx4_ib_create_ah; 1061 ibdev->ib_dev.query_ah = mlx4_ib_query_ah; 1062 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah; 1063 ibdev->ib_dev.create_srq = mlx4_ib_create_srq; 1064 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq; 1065 ibdev->ib_dev.query_srq = mlx4_ib_query_srq; 1066 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq; 1067 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv; 1068 ibdev->ib_dev.create_qp = mlx4_ib_create_qp; 1069 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp; 1070 ibdev->ib_dev.query_qp = mlx4_ib_query_qp; 1071 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp; 1072 ibdev->ib_dev.post_send = mlx4_ib_post_send; 1073 ibdev->ib_dev.post_recv = mlx4_ib_post_recv; 1074 ibdev->ib_dev.create_cq = mlx4_ib_create_cq; 1075 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq; 1076 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq; 1077 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq; 1078 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq; 1079 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; 1080 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; 1081 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; 1082 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; 1083 ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr; 1084 ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list; 1085 ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list; 1086 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; 1087 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; 1088 ibdev->ib_dev.process_mad = mlx4_ib_process_mad; 1089 1090 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc; 1091 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr; 1092 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; 1093 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; 1094 1095 spin_lock_init(&iboe->lock); 1096 1097 if (init_node_data(ibdev)) 1098 goto err_map; 1099 1100 spin_lock_init(&ibdev->sm_lock); 1101 mutex_init(&ibdev->cap_mask_mutex); 1102 1103 if (ib_register_device(&ibdev->ib_dev, NULL)) 1104 goto err_map; 1105 1106 if (mlx4_ib_mad_init(ibdev)) 1107 goto err_reg; 1108 1109 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) { 1110 iboe->nb.notifier_call = mlx4_ib_netdev_event; 1111 err = register_netdevice_notifier(&iboe->nb); 1112 if (err) 1113 goto err_reg; 1114 } 1115 1116 for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) { 1117 if (device_create_file(&ibdev->ib_dev.dev, 1118 mlx4_class_attributes[i])) 1119 goto err_notif; 1120 } 1121 1122 ibdev->ib_active = true; 1123 1124 return ibdev; 1125 1126 err_notif: 1127 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) 1128 printk(KERN_WARNING "failure unregistering notifier\n"); 1129 flush_workqueue(wq); 1130 1131 err_reg: 1132 ib_unregister_device(&ibdev->ib_dev); 1133 1134 err_map: 1135 iounmap(ibdev->uar_map); 1136 1137 err_uar: 1138 mlx4_uar_free(dev, &ibdev->priv_uar); 1139 1140 err_pd: 1141 mlx4_pd_free(dev, ibdev->priv_pdn); 1142 1143 err_dealloc: 1144 ib_dealloc_device(&ibdev->ib_dev); 1145 1146 return NULL; 1147 } 1148 1149 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) 1150 { 1151 struct mlx4_ib_dev *ibdev = ibdev_ptr; 1152 int p; 1153 1154 mlx4_ib_mad_cleanup(ibdev); 1155 ib_unregister_device(&ibdev->ib_dev); 1156 if (ibdev->iboe.nb.notifier_call) { 1157 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) 1158 printk(KERN_WARNING "failure unregistering notifier\n"); 1159 ibdev->iboe.nb.notifier_call = NULL; 1160 } 1161 iounmap(ibdev->uar_map); 1162 1163 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) 1164 mlx4_CLOSE_PORT(dev, p); 1165 1166 mlx4_uar_free(dev, &ibdev->priv_uar); 1167 mlx4_pd_free(dev, ibdev->priv_pdn); 1168 ib_dealloc_device(&ibdev->ib_dev); 1169 } 1170 1171 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, 1172 enum mlx4_dev_event event, int port) 1173 { 1174 struct ib_event ibev; 1175 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); 1176 1177 if (port > ibdev->num_ports) 1178 return; 1179 1180 switch (event) { 1181 case MLX4_DEV_EVENT_PORT_UP: 1182 ibev.event = IB_EVENT_PORT_ACTIVE; 1183 break; 1184 1185 case MLX4_DEV_EVENT_PORT_DOWN: 1186 ibev.event = IB_EVENT_PORT_ERR; 1187 break; 1188 1189 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: 1190 ibdev->ib_active = false; 1191 ibev.event = IB_EVENT_DEVICE_FATAL; 1192 break; 1193 1194 default: 1195 return; 1196 } 1197 1198 ibev.device = ibdev_ptr; 1199 ibev.element.port_num = port; 1200 1201 ib_dispatch_event(&ibev); 1202 } 1203 1204 static struct mlx4_interface mlx4_ib_interface = { 1205 .add = mlx4_ib_add, 1206 .remove = mlx4_ib_remove, 1207 .event = mlx4_ib_event, 1208 .protocol = MLX4_PROTOCOL_IB 1209 }; 1210 1211 static int __init mlx4_ib_init(void) 1212 { 1213 int err; 1214 1215 wq = create_singlethread_workqueue("mlx4_ib"); 1216 if (!wq) 1217 return -ENOMEM; 1218 1219 err = mlx4_register_interface(&mlx4_ib_interface); 1220 if (err) { 1221 destroy_workqueue(wq); 1222 return err; 1223 } 1224 1225 return 0; 1226 } 1227 1228 static void __exit mlx4_ib_cleanup(void) 1229 { 1230 mlx4_unregister_interface(&mlx4_ib_interface); 1231 destroy_workqueue(wq); 1232 } 1233 1234 module_init(mlx4_ib_init); 1235 module_exit(mlx4_ib_cleanup); 1236