Lines Matching +full:slave +full:- +full:dev
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
63 #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.bl…
64 #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_e…
91 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u32 port_num);
92 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u32 port_num);
93 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
104 return cpu_to_be64(atomic_inc_return(&ctx->tid)) | in mlx4_ib_get_new_demux_tid()
108 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags, in mlx4_MAD_IFC() argument
119 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev); in mlx4_MAD_IFC()
122 inbox = inmailbox->buf; in mlx4_MAD_IFC()
124 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev); in mlx4_MAD_IFC()
126 mlx4_free_cmd_mailbox(dev->dev, inmailbox); in mlx4_MAD_IFC()
140 if (mlx4_is_mfunc(dev->dev) && in mlx4_MAD_IFC()
160 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num); in mlx4_MAD_IFC()
161 ext_info->rqpn = cpu_to_be32(in_wc->src_qp); in mlx4_MAD_IFC()
162 ext_info->sl = in_wc->sl << 4; in mlx4_MAD_IFC()
163 ext_info->g_path = in_wc->dlid_path_bits | in mlx4_MAD_IFC()
164 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); in mlx4_MAD_IFC()
165 ext_info->pkey = cpu_to_be16(in_wc->pkey_index); in mlx4_MAD_IFC()
168 memcpy(ext_info->grh, in_grh, 40); in mlx4_MAD_IFC()
172 in_modifier |= ib_lid_cpu16(in_wc->slid) << 16; in mlx4_MAD_IFC()
175 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier, in mlx4_MAD_IFC()
176 mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier, in mlx4_MAD_IFC()
181 memcpy(response_mad, outmailbox->buf, 256); in mlx4_MAD_IFC()
183 mlx4_free_cmd_mailbox(dev->dev, inmailbox); in mlx4_MAD_IFC()
184 mlx4_free_cmd_mailbox(dev->dev, outmailbox); in mlx4_MAD_IFC()
189 static void update_sm_ah(struct mlx4_ib_dev *dev, u32 port_num, u16 lid, u8 sl) in update_sm_ah() argument
195 if (!dev->send_agent[port_num - 1][0]) in update_sm_ah()
199 ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num); in update_sm_ah()
204 new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, in update_sm_ah()
209 spin_lock_irqsave(&dev->sm_lock, flags); in update_sm_ah()
210 if (dev->sm_ah[port_num - 1]) in update_sm_ah()
211 rdma_destroy_ah(dev->sm_ah[port_num - 1], 0); in update_sm_ah()
212 dev->sm_ah[port_num - 1] = new_ah; in update_sm_ah()
213 spin_unlock_irqrestore(&dev->sm_lock, flags); in update_sm_ah()
218 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
230 struct mlx4_ib_dev *dev = to_mdev(ibdev); in smp_snoop() local
231 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in smp_snoop()
232 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in smp_snoop()
233 mad->mad_hdr.method == IB_MGMT_METHOD_SET) in smp_snoop()
234 switch (mad->mad_hdr.attr_id) { in smp_snoop()
236 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) in smp_snoop()
238 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; in smp_snoop()
239 lid = be16_to_cpu(pinfo->lid); in smp_snoop()
241 update_sm_ah(dev, port_num, in smp_snoop()
242 be16_to_cpu(pinfo->sm_lid), in smp_snoop()
243 pinfo->neighbormtu_mastersmsl & 0xf); in smp_snoop()
245 if (pinfo->clientrereg_resv_subnetto & 0x80) in smp_snoop()
246 handle_client_rereg_event(dev, port_num); in smp_snoop()
249 handle_lid_change_event(dev, port_num); in smp_snoop()
253 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) in smp_snoop()
255 if (!mlx4_is_mfunc(dev->dev)) { in smp_snoop()
256 mlx4_ib_dispatch_event(dev, port_num, in smp_snoop()
264 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF; in smp_snoop()
265 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]); in smp_snoop()
271 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) { in smp_snoop()
273 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] = in smp_snoop()
282 mlx4_ib_dispatch_event(dev, port_num, in smp_snoop()
284 if (!dev->sriov.is_going_down) in smp_snoop()
285 __propagate_pkey_ev(dev, port_num, bn, in smp_snoop()
291 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) in smp_snoop()
293 /* paravirtualized master's guid is guid 0 -- does not change */ in smp_snoop()
294 if (!mlx4_is_master(dev->dev)) in smp_snoop()
295 mlx4_ib_dispatch_event(dev, port_num, in smp_snoop()
298 if (mlx4_is_master(dev->dev) && in smp_snoop()
299 !dev->sriov.is_going_down) { in smp_snoop()
300 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod); in smp_snoop()
301 mlx4_ib_update_cache_on_guid_change(dev, bn, port_num, in smp_snoop()
302 (u8 *)(&((struct ib_smp *)mad)->data)); in smp_snoop()
303 mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num, in smp_snoop()
304 (u8 *)(&((struct ib_smp *)mad)->data)); in smp_snoop()
312 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV && in smp_snoop()
313 dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT) in smp_snoop()
315 if (!mlx4_is_slave(dev->dev)) { in smp_snoop()
320 sl2vl64.sl8[jj] = ((struct ib_smp *)mad)->data[jj]; in smp_snoop()
324 atomic64_set(&dev->sl2vl[port_num - 1], sl2vl64.sl64); in smp_snoop()
333 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, in __propagate_pkey_ev() argument
336 int i, ix, slave, err; in __propagate_pkey_ev() local
339 for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) { in __propagate_pkey_ev()
340 if (slave == mlx4_master_func_num(dev->dev)) in __propagate_pkey_ev()
342 if (!mlx4_is_slave_active(dev->dev, slave)) in __propagate_pkey_ev()
350 ix < dev->dev->caps.pkey_table_len[port_num]; ix++) { in __propagate_pkey_ev()
351 if (dev->pkeys.virt2phys_pkey[slave][port_num - 1] in __propagate_pkey_ev()
353 err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num); in __propagate_pkey_ev()
354 pr_debug("propagate_pkey_ev: slave %d," in __propagate_pkey_ev()
356 slave, port_num, ix, err); in __propagate_pkey_ev()
367 static void node_desc_override(struct ib_device *dev, in node_desc_override() argument
372 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in node_desc_override()
373 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in node_desc_override()
374 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && in node_desc_override()
375 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { in node_desc_override()
376 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags); in node_desc_override()
377 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, in node_desc_override()
379 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags); in node_desc_override()
383 static void forward_trap(struct mlx4_ib_dev *dev, u32 port_num, in forward_trap() argument
386 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; in forward_trap()
388 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; in forward_trap()
404 spin_lock_irqsave(&dev->sm_lock, flags); in forward_trap()
405 memcpy(send_buf->mad, mad, sizeof *mad); in forward_trap()
406 if ((send_buf->ah = dev->sm_ah[port_num - 1])) in forward_trap()
409 ret = -EINVAL; in forward_trap()
410 spin_unlock_irqrestore(&dev->sm_lock, flags); in forward_trap()
417 static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave, in mlx4_ib_demux_sa_handler() argument
423 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) { in mlx4_ib_demux_sa_handler()
425 ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad); in mlx4_ib_demux_sa_handler()
435 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_find_real_gid() local
438 for (i = 0; i < dev->dev->caps.sqp_demux; i++) { in mlx4_ib_find_real_gid()
439 if (dev->sriov.demux[port - 1].guid_cache[i] == guid) in mlx4_ib_find_real_gid()
442 return -1; in mlx4_ib_find_real_gid()
446 static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave, in find_slave_port_pkey_ix() argument
453 if (slave == mlx4_master_func_num(dev->dev)) in find_slave_port_pkey_ix()
454 return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix); in find_slave_port_pkey_ix()
456 unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1; in find_slave_port_pkey_ix()
458 for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) { in find_slave_port_pkey_ix()
459 if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix) in find_slave_port_pkey_ix()
462 pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i]; in find_slave_port_pkey_ix()
464 ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey); in find_slave_port_pkey_ix()
484 return -EINVAL; in find_slave_port_pkey_ix()
498 return -EINVAL; in get_gids_from_l3_hdr()
504 static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave) in is_proxy_qp0() argument
506 int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave; in is_proxy_qp0()
511 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u32 port, in mlx4_ib_send_to_slave() argument
529 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; in mlx4_ib_send_to_slave()
533 return -EINVAL; in mlx4_ib_send_to_slave()
536 tun_ctx = dev->sriov.demux[port-1].tun[slave]; in mlx4_ib_send_to_slave()
539 if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE) in mlx4_ib_send_to_slave()
540 return -EAGAIN; in mlx4_ib_send_to_slave()
543 tun_qp = &tun_ctx->qp[0]; in mlx4_ib_send_to_slave()
545 tun_qp = &tun_ctx->qp[1]; in mlx4_ib_send_to_slave()
547 /* compute P_Key index to put in tunnel header for slave */ in mlx4_ib_send_to_slave()
550 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey); in mlx4_ib_send_to_slave()
553 is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", in mlx4_ib_send_to_slave()
554 wc->pkey_index, ret); in mlx4_ib_send_to_slave()
555 return -EINVAL; in mlx4_ib_send_to_slave()
558 ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix); in mlx4_ib_send_to_slave()
561 is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", in mlx4_ib_send_to_slave()
563 return -EINVAL; in mlx4_ib_send_to_slave()
567 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0]; in mlx4_ib_send_to_slave()
569 dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1; in mlx4_ib_send_to_slave()
571 /* get tunnel tx data buf for slave */ in mlx4_ib_send_to_slave()
572 src_qp = tun_qp->qp; in mlx4_ib_send_to_slave()
577 attr.type = rdma_ah_find_type(&dev->ib_dev, port); in mlx4_ib_send_to_slave()
585 return -EINVAL; in mlx4_ib_send_to_slave()
588 ah = rdma_create_ah(tun_ctx->pd, &attr, 0); in mlx4_ib_send_to_slave()
590 return -ENOMEM; in mlx4_ib_send_to_slave()
593 spin_lock(&tun_qp->tx_lock); in mlx4_ib_send_to_slave()
594 if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >= in mlx4_ib_send_to_slave()
595 (MLX4_NUM_TUNNEL_BUFS - 1)) in mlx4_ib_send_to_slave()
596 ret = -EAGAIN; in mlx4_ib_send_to_slave()
598 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); in mlx4_ib_send_to_slave()
599 spin_unlock(&tun_qp->tx_lock); in mlx4_ib_send_to_slave()
603 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); in mlx4_ib_send_to_slave()
604 if (tun_qp->tx_ring[tun_tx_ix].ah) in mlx4_ib_send_to_slave()
605 rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah, 0); in mlx4_ib_send_to_slave()
606 tun_qp->tx_ring[tun_tx_ix].ah = ah; in mlx4_ib_send_to_slave()
607 ib_dma_sync_single_for_cpu(&dev->ib_dev, in mlx4_ib_send_to_slave()
608 tun_qp->tx_ring[tun_tx_ix].buf.map, in mlx4_ib_send_to_slave()
614 memcpy(&tun_mad->grh, grh, sizeof *grh); in mlx4_ib_send_to_slave()
615 memcpy(&tun_mad->mad, mad, sizeof *mad); in mlx4_ib_send_to_slave()
618 tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix); in mlx4_ib_send_to_slave()
619 tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF); in mlx4_ib_send_to_slave()
620 tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0; in mlx4_ib_send_to_slave()
624 if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan, in mlx4_ib_send_to_slave()
627 if (vlan != wc->vlan_id) in mlx4_ib_send_to_slave()
628 /* Packet vlan is not the VST-assigned vlan. in mlx4_ib_send_to_slave()
638 vlan = wc->vlan_id; in mlx4_ib_send_to_slave()
641 tun_mad->hdr.sl_vid = cpu_to_be16(vlan); in mlx4_ib_send_to_slave()
642 memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4); in mlx4_ib_send_to_slave()
643 memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2); in mlx4_ib_send_to_slave()
645 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12); in mlx4_ib_send_to_slave()
646 tun_mad->hdr.slid_mac_47_32 = ib_lid_be16(wc->slid); in mlx4_ib_send_to_slave()
649 ib_dma_sync_single_for_device(&dev->ib_dev, in mlx4_ib_send_to_slave()
650 tun_qp->tx_ring[tun_tx_ix].buf.map, in mlx4_ib_send_to_slave()
654 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map; in mlx4_ib_send_to_slave()
656 list.lkey = tun_ctx->pd->local_dma_lkey; in mlx4_ib_send_to_slave()
673 spin_lock(&tun_qp->tx_lock); in mlx4_ib_send_to_slave()
674 tun_qp->tx_ix_tail++; in mlx4_ib_send_to_slave()
675 spin_unlock(&tun_qp->tx_lock); in mlx4_ib_send_to_slave()
676 tun_qp->tx_ring[tun_tx_ix].ah = NULL; in mlx4_ib_send_to_slave()
686 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_demux_mad() local
688 int slave = -1; in mlx4_ib_demux_mad() local
702 return -EINVAL; in mlx4_ib_demux_mad()
703 if (!(wc->wc_flags & IB_WC_GRH)) { in mlx4_ib_demux_mad()
705 return -EINVAL; in mlx4_ib_demux_mad()
707 if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) { in mlx4_ib_demux_mad()
709 return -EINVAL; in mlx4_ib_demux_mad()
711 err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave); in mlx4_ib_demux_mad()
712 if (err && mlx4_is_mf_bonded(dev->dev)) { in mlx4_ib_demux_mad()
714 err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave); in mlx4_ib_demux_mad()
717 pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n", in mlx4_ib_demux_mad()
718 slave, grh->dgid.raw, port, other_port); in mlx4_ib_demux_mad()
723 return -ENOENT; in mlx4_ib_demux_mad()
725 if (slave >= dev->dev->caps.sqp_demux) { in mlx4_ib_demux_mad()
726 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n", in mlx4_ib_demux_mad()
727 slave, dev->dev->caps.sqp_demux); in mlx4_ib_demux_mad()
728 return -ENOENT; in mlx4_ib_demux_mad()
734 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); in mlx4_ib_demux_mad()
736 pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n", in mlx4_ib_demux_mad()
737 is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", in mlx4_ib_demux_mad()
738 slave, err); in mlx4_ib_demux_mad()
743 slave = mlx4_master_func_num(dev->dev); in mlx4_ib_demux_mad()
745 /* See if the slave id is encoded in a response mad */ in mlx4_ib_demux_mad()
746 if (mad->mad_hdr.method & 0x80) { in mlx4_ib_demux_mad()
747 slave_id = (u8 *) &mad->mad_hdr.tid; in mlx4_ib_demux_mad()
748 slave = *slave_id; in mlx4_ib_demux_mad()
749 if (slave != 255) /*255 indicates the dom0*/ in mlx4_ib_demux_mad()
754 if (wc->wc_flags & IB_WC_GRH) { in mlx4_ib_demux_mad()
755 if (grh->dgid.global.interface_id == in mlx4_ib_demux_mad()
757 grh->dgid.global.subnet_prefix == cpu_to_be64( in mlx4_ib_demux_mad()
758 atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) { in mlx4_ib_demux_mad()
759 slave = 0; in mlx4_ib_demux_mad()
761 slave = mlx4_ib_find_real_gid(ibdev, port, in mlx4_ib_demux_mad()
762 grh->dgid.global.interface_id); in mlx4_ib_demux_mad()
763 if (slave < 0) { in mlx4_ib_demux_mad()
765 return -ENOENT; in mlx4_ib_demux_mad()
769 /* Class-specific handling */ in mlx4_ib_demux_mad()
770 switch (mad->mad_hdr.mgmt_class) { in mlx4_ib_demux_mad()
774 if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) { in mlx4_ib_demux_mad()
775 if (!mlx4_vf_smi_enabled(dev->dev, slave, port)) in mlx4_ib_demux_mad()
776 return -EPERM; in mlx4_ib_demux_mad()
778 if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) { in mlx4_ib_demux_mad()
779 … mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n", in mlx4_ib_demux_mad()
780 slave, mad->mad_hdr.mgmt_class, in mlx4_ib_demux_mad()
781 mad->mad_hdr.method); in mlx4_ib_demux_mad()
782 return -EINVAL; in mlx4_ib_demux_mad()
787 if (mlx4_ib_demux_sa_handler(ibdev, port, slave, in mlx4_ib_demux_mad()
792 if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad)) in mlx4_ib_demux_mad()
796 if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP) in mlx4_ib_demux_mad()
801 if (slave != mlx4_master_func_num(dev->dev)) { in mlx4_ib_demux_mad()
803 "for slave:%d\n", mad->mad_hdr.mgmt_class, slave); in mlx4_ib_demux_mad()
807 /*make sure that no slave==255 was not handled yet.*/ in mlx4_ib_demux_mad()
808 if (slave >= dev->dev->caps.sqp_demux) { in mlx4_ib_demux_mad()
809 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n", in mlx4_ib_demux_mad()
810 slave, dev->dev->caps.sqp_demux); in mlx4_ib_demux_mad()
811 return -ENOENT; in mlx4_ib_demux_mad()
814 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); in mlx4_ib_demux_mad()
816 pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n", in mlx4_ib_demux_mad()
817 is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", in mlx4_ib_demux_mad()
818 slave, err); in mlx4_ib_demux_mad()
830 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE); in ib_process_mad()
832 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { in ib_process_mad()
837 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in ib_process_mad()
838 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { in ib_process_mad()
839 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && in ib_process_mad()
840 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && in ib_process_mad()
841 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) in ib_process_mad()
845 * Don't process SMInfo queries -- the SMA can't handle them. in ib_process_mad()
847 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) in ib_process_mad()
849 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || in ib_process_mad()
850 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 || in ib_process_mad()
851 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 || in ib_process_mad()
852 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { in ib_process_mad()
853 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && in ib_process_mad()
854 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) in ib_process_mad()
859 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in ib_process_mad()
860 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in ib_process_mad()
861 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET && in ib_process_mad()
862 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && in ib_process_mad()
874 if (!out_mad->mad_hdr.status) { in ib_process_mad()
877 if (!mlx4_is_slave(to_mdev(ibdev)->dev)) in ib_process_mad()
882 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) in ib_process_mad()
883 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); in ib_process_mad()
885 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) in ib_process_mad()
901 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, in edit_counter()
902 (be64_to_cpu(cnt->tx_bytes) >> 2)); in edit_counter()
903 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, in edit_counter()
904 (be64_to_cpu(cnt->rx_bytes) >> 2)); in edit_counter()
905 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, in edit_counter()
906 be64_to_cpu(cnt->tx_frames)); in edit_counter()
907 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, in edit_counter()
908 be64_to_cpu(cnt->rx_frames)); in edit_counter()
916 pma_cnt_ext->port_xmit_data = in edit_counter()
917 cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2); in edit_counter()
918 pma_cnt_ext->port_rcv_data = in edit_counter()
919 cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2); in edit_counter()
920 pma_cnt_ext->port_xmit_packets = cnt->tx_frames; in edit_counter()
921 pma_cnt_ext->port_rcv_packets = cnt->rx_frames; in edit_counter()
942 struct mlx4_ib_dev *dev = to_mdev(ibdev); in iboe_process_mad() local
946 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) in iboe_process_mad()
947 return -EINVAL; in iboe_process_mad()
949 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) in iboe_process_mad()
950 return iboe_process_mad_port_info((void *)(out_mad->data + 40)); in iboe_process_mad()
953 mutex_lock(&dev->counters_table[port_num - 1].mutex); in iboe_process_mad()
955 &dev->counters_table[port_num - 1].counters_list, in iboe_process_mad()
957 err = mlx4_get_counter_stats(dev->dev, in iboe_process_mad()
958 tmp_counter->index, in iboe_process_mad()
967 mutex_unlock(&dev->counters_table[port_num - 1].mutex); in iboe_process_mad()
972 (void *)(out_mad->data + 40), in iboe_process_mad()
973 in_mad->mad_hdr.attr_id); in iboe_process_mad()
989 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_process_mad() local
992 /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA in mlx4_ib_process_mad()
996 if (mlx4_is_slave(dev->dev) && in mlx4_ib_process_mad()
997 (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && in mlx4_ib_process_mad()
998 (in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS || in mlx4_ib_process_mad()
999 in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT || in mlx4_ib_process_mad()
1000 in->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO))) in mlx4_ib_process_mad()
1012 return -EINVAL; in mlx4_ib_process_mad()
1018 if (mad_send_wc->send_buf->context[0]) in send_handler()
1019 rdma_destroy_ah(mad_send_wc->send_buf->context[0], 0); in send_handler()
1020 ib_free_send_mad(mad_send_wc->send_buf); in send_handler()
1023 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) in mlx4_ib_mad_init() argument
1030 for (p = 0; p < dev->num_ports; ++p) { in mlx4_ib_mad_init()
1031 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1); in mlx4_ib_mad_init()
1034 agent = ib_register_mad_agent(&dev->ib_dev, p + 1, in mlx4_ib_mad_init()
1042 dev->send_agent[p][q] = agent; in mlx4_ib_mad_init()
1044 dev->send_agent[p][q] = NULL; in mlx4_ib_mad_init()
1051 for (p = 0; p < dev->num_ports; ++p) in mlx4_ib_mad_init()
1053 if (dev->send_agent[p][q]) in mlx4_ib_mad_init()
1054 ib_unregister_mad_agent(dev->send_agent[p][q]); in mlx4_ib_mad_init()
1059 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) in mlx4_ib_mad_cleanup() argument
1064 for (p = 0; p < dev->num_ports; ++p) { in mlx4_ib_mad_cleanup()
1066 agent = dev->send_agent[p][q]; in mlx4_ib_mad_cleanup()
1068 dev->send_agent[p][q] = NULL; in mlx4_ib_mad_cleanup()
1073 if (dev->sm_ah[p]) in mlx4_ib_mad_cleanup()
1074 rdma_destroy_ah(dev->sm_ah[p], 0); in mlx4_ib_mad_cleanup()
1078 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u32 port_num) in handle_lid_change_event() argument
1080 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE); in handle_lid_change_event()
1082 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) in handle_lid_change_event()
1083 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num, in handle_lid_change_event()
1087 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u32 port_num) in handle_client_rereg_event() argument
1089 /* re-configure the alias-guid and mcg's */ in handle_client_rereg_event()
1090 if (mlx4_is_master(dev->dev)) { in handle_client_rereg_event()
1091 mlx4_ib_invalidate_all_guid_record(dev, port_num); in handle_client_rereg_event()
1093 if (!dev->sriov.is_going_down) { in handle_client_rereg_event()
1094 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0); in handle_client_rereg_event()
1095 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num, in handle_client_rereg_event()
1101 * only if in secure-host mode (snooping is not possible) in handle_client_rereg_event()
1102 * and the sl-to-vl change event is not generated by FW. in handle_client_rereg_event()
1104 if (!mlx4_is_slave(dev->dev) && in handle_client_rereg_event()
1105 dev->dev->flags & MLX4_FLAG_SECURE_HOST && in handle_client_rereg_event()
1106 !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) { in handle_client_rereg_event()
1107 if (mlx4_is_master(dev->dev)) in handle_client_rereg_event()
1112 mlx4_ib_sl2vl_update(dev, port_num); in handle_client_rereg_event()
1114 mlx4_sched_ib_sl2vl_update_work(dev, port_num); in handle_client_rereg_event()
1116 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER); in handle_client_rereg_event()
1119 static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, in propagate_pkey_ev() argument
1122 __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe), in propagate_pkey_ev()
1126 static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u32 port_num, in handle_slaves_guid_change() argument
1133 if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev)) in handle_slaves_guid_change()
1149 in_mad->base_version = 1; in handle_slaves_guid_change()
1150 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; in handle_slaves_guid_change()
1151 in_mad->class_version = 1; in handle_slaves_guid_change()
1152 in_mad->method = IB_MGMT_METHOD_GET; in handle_slaves_guid_change()
1153 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in handle_slaves_guid_change()
1154 in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i); in handle_slaves_guid_change()
1156 if (mlx4_MAD_IFC(dev, in handle_slaves_guid_change()
1159 mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n"); in handle_slaves_guid_change()
1163 mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i, in handle_slaves_guid_change()
1165 (u8 *)(&((struct ib_smp *)out_mad)->data)); in handle_slaves_guid_change()
1166 mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i, in handle_slaves_guid_change()
1168 (u8 *)(&((struct ib_smp *)out_mad)->data)); in handle_slaves_guid_change()
1180 struct mlx4_ib_dev *dev = ew->ib_dev; in handle_port_mgmt_change_event() local
1181 struct mlx4_eqe *eqe = &(ew->ib_eqe); in handle_port_mgmt_change_event()
1182 u32 port = eqe->event.port_mgmt_change.port; in handle_port_mgmt_change_event()
1187 switch (eqe->subtype) { in handle_port_mgmt_change_event()
1189 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr); in handle_port_mgmt_change_event()
1191 /* Update the SM ah - This should be done before handling in handle_port_mgmt_change_event()
1194 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid); in handle_port_mgmt_change_event()
1195 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf; in handle_port_mgmt_change_event()
1196 update_sm_ah(dev, port, lid, sl); in handle_port_mgmt_change_event()
1201 handle_lid_change_event(dev, port); in handle_port_mgmt_change_event()
1205 if (mlx4_is_master(dev->dev)) { in handle_port_mgmt_change_event()
1209 if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix) in handle_port_mgmt_change_event()
1210 err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1); in handle_port_mgmt_change_event()
1213 eqe->event.port_mgmt_change.params.port_info.gid_prefix; in handle_port_mgmt_change_event()
1220 (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix), in handle_port_mgmt_change_event()
1222 atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix, in handle_port_mgmt_change_event()
1226 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); in handle_port_mgmt_change_event()
1228 if (mlx4_is_master(dev->dev)) in handle_port_mgmt_change_event()
1229 mlx4_gen_slaves_port_mgt_ev(dev->dev, port, in handle_port_mgmt_change_event()
1234 handle_client_rereg_event(dev, port); in handle_port_mgmt_change_event()
1238 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE); in handle_port_mgmt_change_event()
1239 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) in handle_port_mgmt_change_event()
1240 propagate_pkey_ev(dev, port, eqe); in handle_port_mgmt_change_event()
1243 /* paravirtualized master's guid is guid 0 -- does not change */ in handle_port_mgmt_change_event()
1244 if (!mlx4_is_master(dev->dev)) in handle_port_mgmt_change_event()
1245 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); in handle_port_mgmt_change_event()
1247 else if (!dev->sriov.is_going_down) { in handle_port_mgmt_change_event()
1250 handle_slaves_guid_change(dev, port, tbl_block, change_bitmap); in handle_port_mgmt_change_event()
1258 if (!mlx4_is_slave(dev->dev)) { in handle_port_mgmt_change_event()
1264 eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj]; in handle_port_mgmt_change_event()
1268 atomic64_set(&dev->sl2vl[port - 1], sl2vl64.sl64); in handle_port_mgmt_change_event()
1273 "Port Management Change event\n", eqe->subtype); in handle_port_mgmt_change_event()
1279 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u32 port_num, in mlx4_ib_dispatch_event() argument
1284 event.device = &dev->ib_dev; in mlx4_ib_dispatch_event()
1294 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; in mlx4_ib_tunnel_comp_handler()
1295 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); in mlx4_ib_tunnel_comp_handler() local
1296 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in mlx4_ib_tunnel_comp_handler()
1297 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) in mlx4_ib_tunnel_comp_handler()
1298 queue_work(ctx->wq, &ctx->work); in mlx4_ib_tunnel_comp_handler()
1299 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in mlx4_ib_tunnel_comp_handler()
1305 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; in mlx4_ib_wire_comp_handler()
1306 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); in mlx4_ib_wire_comp_handler() local
1308 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in mlx4_ib_wire_comp_handler()
1309 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) in mlx4_ib_wire_comp_handler()
1310 queue_work(ctx->wi_wq, &ctx->work); in mlx4_ib_wire_comp_handler()
1311 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in mlx4_ib_wire_comp_handler()
1323 size = (tun_qp->qp->qp_type == IB_QPT_UD) ? in mlx4_ib_post_pv_qp_buf()
1326 sg_list.addr = tun_qp->ring[index].map; in mlx4_ib_post_pv_qp_buf()
1328 sg_list.lkey = ctx->pd->local_dma_lkey; in mlx4_ib_post_pv_qp_buf()
1334 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt); in mlx4_ib_post_pv_qp_buf()
1335 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map, in mlx4_ib_post_pv_qp_buf()
1337 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr); in mlx4_ib_post_pv_qp_buf()
1341 int slave, struct ib_sa_mad *sa_mad) in mlx4_ib_multiplex_sa_handler() argument
1346 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) { in mlx4_ib_multiplex_sa_handler()
1348 ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad); in mlx4_ib_multiplex_sa_handler()
1356 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u32 port, in mlx4_ib_send_to_wire() argument
1374 sqp_ctx = dev->sriov.sqps[port-1]; in mlx4_ib_send_to_wire()
1377 if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE) in mlx4_ib_send_to_wire()
1378 return -EAGAIN; in mlx4_ib_send_to_wire()
1382 sqp = &sqp_ctx->qp[0]; in mlx4_ib_send_to_wire()
1383 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0]; in mlx4_ib_send_to_wire()
1386 sqp = &sqp_ctx->qp[1]; in mlx4_ib_send_to_wire()
1387 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index]; in mlx4_ib_send_to_wire()
1390 send_qp = sqp->qp; in mlx4_ib_send_to_wire()
1392 ah = rdma_zalloc_drv_obj(sqp_ctx->pd->device, ib_ah); in mlx4_ib_send_to_wire()
1394 return -ENOMEM; in mlx4_ib_send_to_wire()
1396 ah->device = sqp_ctx->pd->device; in mlx4_ib_send_to_wire()
1397 ah->pd = sqp_ctx->pd; in mlx4_ib_send_to_wire()
1401 rdma_ah_retrieve_grh(attr)->sgid_index, in mlx4_ib_send_to_wire()
1406 spin_lock(&sqp->tx_lock); in mlx4_ib_send_to_wire()
1407 if (sqp->tx_ix_head - sqp->tx_ix_tail >= in mlx4_ib_send_to_wire()
1408 (MLX4_NUM_WIRE_BUFS - 1)) in mlx4_ib_send_to_wire()
1409 ret = -EAGAIN; in mlx4_ib_send_to_wire()
1411 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_WIRE_BUFS - 1); in mlx4_ib_send_to_wire()
1412 spin_unlock(&sqp->tx_lock); in mlx4_ib_send_to_wire()
1416 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); in mlx4_ib_send_to_wire()
1417 kfree(sqp->tx_ring[wire_tx_ix].ah); in mlx4_ib_send_to_wire()
1418 sqp->tx_ring[wire_tx_ix].ah = ah; in mlx4_ib_send_to_wire()
1419 ib_dma_sync_single_for_cpu(&dev->ib_dev, in mlx4_ib_send_to_wire()
1420 sqp->tx_ring[wire_tx_ix].buf.map, in mlx4_ib_send_to_wire()
1424 memcpy(&sqp_mad->payload, mad, sizeof *mad); in mlx4_ib_send_to_wire()
1426 ib_dma_sync_single_for_device(&dev->ib_dev, in mlx4_ib_send_to_wire()
1427 sqp->tx_ring[wire_tx_ix].buf.map, in mlx4_ib_send_to_wire()
1431 list.addr = sqp->tx_ring[wire_tx_ix].buf.map; in mlx4_ib_send_to_wire()
1433 list.lkey = sqp_ctx->pd->local_dma_lkey; in mlx4_ib_send_to_wire()
1451 spin_lock(&sqp->tx_lock); in mlx4_ib_send_to_wire()
1452 sqp->tx_ix_tail++; in mlx4_ib_send_to_wire()
1453 spin_unlock(&sqp->tx_lock); in mlx4_ib_send_to_wire()
1454 sqp->tx_ring[wire_tx_ix].ah = NULL; in mlx4_ib_send_to_wire()
1460 static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port) in get_slave_base_gid_ix() argument
1462 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) in get_slave_base_gid_ix()
1463 return slave; in get_slave_base_gid_ix()
1464 return mlx4_get_base_gid_ix(dev->dev, slave, port); in get_slave_base_gid_ix()
1467 static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port, in fill_in_real_sgid_index() argument
1471 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) in fill_in_real_sgid_index()
1472 grh->sgid_index = slave; in fill_in_real_sgid_index()
1474 grh->sgid_index += get_slave_base_gid_ix(dev, slave, port); in fill_in_real_sgid_index()
1479 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); in mlx4_ib_multiplex_mad() local
1480 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)]; in mlx4_ib_multiplex_mad()
1481 int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1); in mlx4_ib_multiplex_mad()
1482 struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr; in mlx4_ib_multiplex_mad()
1486 int slave; in mlx4_ib_multiplex_mad() local
1493 /* Get slave that sent this packet */ in mlx4_ib_multiplex_mad()
1494 if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn || in mlx4_ib_multiplex_mad()
1495 wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX || in mlx4_ib_multiplex_mad()
1496 (wc->src_qp & 0x1) != ctx->port - 1 || in mlx4_ib_multiplex_mad()
1497 wc->src_qp & 0x4) { in mlx4_ib_multiplex_mad()
1498 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp); in mlx4_ib_multiplex_mad()
1501 slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8; in mlx4_ib_multiplex_mad()
1502 if (slave != ctx->slave) { in mlx4_ib_multiplex_mad()
1503 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: " in mlx4_ib_multiplex_mad()
1504 "belongs to another slave\n", wc->src_qp); in mlx4_ib_multiplex_mad()
1509 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map, in mlx4_ib_multiplex_mad()
1512 switch (tunnel->mad.mad_hdr.method) { in mlx4_ib_multiplex_mad()
1520 slave_id = (u8 *) &tunnel->mad.mad_hdr.tid; in mlx4_ib_multiplex_mad()
1522 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d " in mlx4_ib_multiplex_mad()
1523 "class:%d slave:%d\n", *slave_id, in mlx4_ib_multiplex_mad()
1524 tunnel->mad.mad_hdr.mgmt_class, slave); in mlx4_ib_multiplex_mad()
1527 *slave_id = slave; in mlx4_ib_multiplex_mad()
1533 /* Class-specific handling */ in mlx4_ib_multiplex_mad()
1534 switch (tunnel->mad.mad_hdr.mgmt_class) { in mlx4_ib_multiplex_mad()
1537 if (slave != mlx4_master_func_num(dev->dev) && in mlx4_ib_multiplex_mad()
1538 !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port)) in mlx4_ib_multiplex_mad()
1542 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave, in mlx4_ib_multiplex_mad()
1543 (struct ib_sa_mad *) &tunnel->mad)) in mlx4_ib_multiplex_mad()
1547 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave, in mlx4_ib_multiplex_mad()
1548 (struct ib_mad *) &tunnel->mad)) in mlx4_ib_multiplex_mad()
1552 if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET && in mlx4_ib_multiplex_mad()
1553 tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET) in mlx4_ib_multiplex_mad()
1558 if (slave != mlx4_master_func_num(dev->dev)) { in mlx4_ib_multiplex_mad()
1559 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d " in mlx4_ib_multiplex_mad()
1560 "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave); in mlx4_ib_multiplex_mad()
1567 memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av)); in mlx4_ib_multiplex_mad()
1568 ah.ibah.device = ctx->ib_dev; in mlx4_ib_multiplex_mad()
1571 port = mlx4_slave_convert_port(dev->dev, slave, port); in mlx4_ib_multiplex_mad()
1575 ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port); in mlx4_ib_multiplex_mad()
1579 fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr); in mlx4_ib_multiplex_mad()
1582 memcpy(dmac, tunnel->hdr.mac, ETH_ALEN); in mlx4_ib_multiplex_mad()
1583 vlan_id = be16_to_cpu(tunnel->hdr.vlan); in mlx4_ib_multiplex_mad()
1584 /* if slave have default vlan use it */ in mlx4_ib_multiplex_mad()
1585 if (mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave, in mlx4_ib_multiplex_mad()
1589 sts = mlx4_ib_send_to_wire(dev, slave, ctx->port, in mlx4_ib_multiplex_mad()
1590 is_proxy_qp0(dev, wc->src_qp, slave) ? in mlx4_ib_multiplex_mad()
1592 be16_to_cpu(tunnel->hdr.pkey_index), in mlx4_ib_multiplex_mad()
1593 be32_to_cpu(tunnel->hdr.remote_qpn), in mlx4_ib_multiplex_mad()
1594 be32_to_cpu(tunnel->hdr.qkey), in mlx4_ib_multiplex_mad()
1595 &ah_attr, wc->smac, vlan_id, &tunnel->mad); in mlx4_ib_multiplex_mad()
1597 pr_debug("failed sending %s to wire on behalf of slave %d (%d)\n", in mlx4_ib_multiplex_mad()
1598 is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", in mlx4_ib_multiplex_mad()
1599 slave, sts); in mlx4_ib_multiplex_mad()
1611 return -EINVAL; in mlx4_ib_alloc_pv_bufs()
1613 tun_qp = &ctx->qp[qp_type]; in mlx4_ib_alloc_pv_bufs()
1615 tun_qp->ring = kcalloc(nmbr_bufs, in mlx4_ib_alloc_pv_bufs()
1618 if (!tun_qp->ring) in mlx4_ib_alloc_pv_bufs()
1619 return -ENOMEM; in mlx4_ib_alloc_pv_bufs()
1621 tun_qp->tx_ring = kcalloc(nmbr_bufs, in mlx4_ib_alloc_pv_bufs()
1624 if (!tun_qp->tx_ring) { in mlx4_ib_alloc_pv_bufs()
1625 kfree(tun_qp->ring); in mlx4_ib_alloc_pv_bufs()
1626 tun_qp->ring = NULL; in mlx4_ib_alloc_pv_bufs()
1627 return -ENOMEM; in mlx4_ib_alloc_pv_bufs()
1639 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL); in mlx4_ib_alloc_pv_bufs()
1640 if (!tun_qp->ring[i].addr) in mlx4_ib_alloc_pv_bufs()
1642 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev, in mlx4_ib_alloc_pv_bufs()
1643 tun_qp->ring[i].addr, in mlx4_ib_alloc_pv_bufs()
1646 if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) { in mlx4_ib_alloc_pv_bufs()
1647 kfree(tun_qp->ring[i].addr); in mlx4_ib_alloc_pv_bufs()
1653 tun_qp->tx_ring[i].buf.addr = in mlx4_ib_alloc_pv_bufs()
1655 if (!tun_qp->tx_ring[i].buf.addr) in mlx4_ib_alloc_pv_bufs()
1657 tun_qp->tx_ring[i].buf.map = in mlx4_ib_alloc_pv_bufs()
1658 ib_dma_map_single(ctx->ib_dev, in mlx4_ib_alloc_pv_bufs()
1659 tun_qp->tx_ring[i].buf.addr, in mlx4_ib_alloc_pv_bufs()
1662 if (ib_dma_mapping_error(ctx->ib_dev, in mlx4_ib_alloc_pv_bufs()
1663 tun_qp->tx_ring[i].buf.map)) { in mlx4_ib_alloc_pv_bufs()
1664 kfree(tun_qp->tx_ring[i].buf.addr); in mlx4_ib_alloc_pv_bufs()
1667 tun_qp->tx_ring[i].ah = NULL; in mlx4_ib_alloc_pv_bufs()
1669 spin_lock_init(&tun_qp->tx_lock); in mlx4_ib_alloc_pv_bufs()
1670 tun_qp->tx_ix_head = 0; in mlx4_ib_alloc_pv_bufs()
1671 tun_qp->tx_ix_tail = 0; in mlx4_ib_alloc_pv_bufs()
1672 tun_qp->proxy_qpt = qp_type; in mlx4_ib_alloc_pv_bufs()
1678 --i; in mlx4_ib_alloc_pv_bufs()
1679 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, in mlx4_ib_alloc_pv_bufs()
1681 kfree(tun_qp->tx_ring[i].buf.addr); in mlx4_ib_alloc_pv_bufs()
1686 --i; in mlx4_ib_alloc_pv_bufs()
1687 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, in mlx4_ib_alloc_pv_bufs()
1689 kfree(tun_qp->ring[i].addr); in mlx4_ib_alloc_pv_bufs()
1691 kfree(tun_qp->tx_ring); in mlx4_ib_alloc_pv_bufs()
1692 tun_qp->tx_ring = NULL; in mlx4_ib_alloc_pv_bufs()
1693 kfree(tun_qp->ring); in mlx4_ib_alloc_pv_bufs()
1694 tun_qp->ring = NULL; in mlx4_ib_alloc_pv_bufs()
1695 return -ENOMEM; in mlx4_ib_alloc_pv_bufs()
1709 tun_qp = &ctx->qp[qp_type]; in mlx4_ib_free_pv_qp_bufs()
1720 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, in mlx4_ib_free_pv_qp_bufs()
1722 kfree(tun_qp->ring[i].addr); in mlx4_ib_free_pv_qp_bufs()
1726 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, in mlx4_ib_free_pv_qp_bufs()
1728 kfree(tun_qp->tx_ring[i].buf.addr); in mlx4_ib_free_pv_qp_bufs()
1729 if (tun_qp->tx_ring[i].ah) in mlx4_ib_free_pv_qp_bufs()
1730 rdma_destroy_ah(tun_qp->tx_ring[i].ah, 0); in mlx4_ib_free_pv_qp_bufs()
1732 kfree(tun_qp->tx_ring); in mlx4_ib_free_pv_qp_bufs()
1733 kfree(tun_qp->ring); in mlx4_ib_free_pv_qp_bufs()
1743 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); in mlx4_ib_tunnel_comp_worker()
1745 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) { in mlx4_ib_tunnel_comp_worker()
1746 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; in mlx4_ib_tunnel_comp_worker()
1753 (MLX4_NUM_TUNNEL_BUFS - 1)); in mlx4_ib_tunnel_comp_worker()
1759 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id & in mlx4_ib_tunnel_comp_worker()
1760 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); in mlx4_ib_tunnel_comp_worker()
1761 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah in mlx4_ib_tunnel_comp_worker()
1763 spin_lock(&tun_qp->tx_lock); in mlx4_ib_tunnel_comp_worker()
1764 tun_qp->tx_ix_tail++; in mlx4_ib_tunnel_comp_worker()
1765 spin_unlock(&tun_qp->tx_lock); in mlx4_ib_tunnel_comp_worker()
1774 ctx->slave, wc.status, wc.wr_id); in mlx4_ib_tunnel_comp_worker()
1776 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id & in mlx4_ib_tunnel_comp_worker()
1777 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); in mlx4_ib_tunnel_comp_worker()
1778 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah in mlx4_ib_tunnel_comp_worker()
1780 spin_lock(&tun_qp->tx_lock); in mlx4_ib_tunnel_comp_worker()
1781 tun_qp->tx_ix_tail++; in mlx4_ib_tunnel_comp_worker()
1782 spin_unlock(&tun_qp->tx_lock); in mlx4_ib_tunnel_comp_worker()
1794 event->event, sqp->port); in pv_qp_event_handler()
1808 return -EINVAL; in create_pv_sqp()
1810 tun_qp = &ctx->qp[qp_type]; in create_pv_sqp()
1813 qp_init_attr.init_attr.send_cq = ctx->cq; in create_pv_sqp()
1814 qp_init_attr.init_attr.recv_cq = ctx->cq; in create_pv_sqp()
1823 qp_init_attr.port = ctx->port; in create_pv_sqp()
1824 qp_init_attr.slave = ctx->slave; in create_pv_sqp()
1833 qp_init_attr.init_attr.port_num = ctx->port; in create_pv_sqp()
1836 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr); in create_pv_sqp()
1837 if (IS_ERR(tun_qp->qp)) { in create_pv_sqp()
1838 ret = PTR_ERR(tun_qp->qp); in create_pv_sqp()
1839 tun_qp->qp = NULL; in create_pv_sqp()
1849 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave, in create_pv_sqp()
1850 ctx->port, IB_DEFAULT_PKEY_FULL, in create_pv_sqp()
1854 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; in create_pv_sqp()
1856 attr.port_num = ctx->port; in create_pv_sqp()
1857 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT); in create_pv_sqp()
1864 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE); in create_pv_sqp()
1872 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN); in create_pv_sqp()
1890 ib_destroy_qp(tun_qp->qp); in create_pv_sqp()
1891 tun_qp->qp = NULL; in create_pv_sqp()
1907 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); in mlx4_ib_sqp_comp_worker()
1909 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) { in mlx4_ib_sqp_comp_worker()
1910 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; in mlx4_ib_sqp_comp_worker()
1914 kfree(sqp->tx_ring[wc.wr_id & in mlx4_ib_sqp_comp_worker()
1915 (MLX4_NUM_WIRE_BUFS - 1)].ah); in mlx4_ib_sqp_comp_worker()
1916 sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah in mlx4_ib_sqp_comp_worker()
1918 spin_lock(&sqp->tx_lock); in mlx4_ib_sqp_comp_worker()
1919 sqp->tx_ix_tail++; in mlx4_ib_sqp_comp_worker()
1920 spin_unlock(&sqp->tx_lock); in mlx4_ib_sqp_comp_worker()
1924 (sqp->ring[wc.wr_id & in mlx4_ib_sqp_comp_worker()
1925 (MLX4_NUM_WIRE_BUFS - 1)].addr))->payload); in mlx4_ib_sqp_comp_worker()
1927 (sqp->ring[wc.wr_id & in mlx4_ib_sqp_comp_worker()
1928 (MLX4_NUM_WIRE_BUFS - 1)].addr))->grh); in mlx4_ib_sqp_comp_worker()
1929 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad); in mlx4_ib_sqp_comp_worker()
1931 (MLX4_NUM_WIRE_BUFS - 1))) in mlx4_ib_sqp_comp_worker()
1941 ctx->slave, wc.status, wc.wr_id); in mlx4_ib_sqp_comp_worker()
1943 kfree(sqp->tx_ring[wc.wr_id & in mlx4_ib_sqp_comp_worker()
1944 (MLX4_NUM_WIRE_BUFS - 1)].ah); in mlx4_ib_sqp_comp_worker()
1945 sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah in mlx4_ib_sqp_comp_worker()
1947 spin_lock(&sqp->tx_lock); in mlx4_ib_sqp_comp_worker()
1948 sqp->tx_ix_tail++; in mlx4_ib_sqp_comp_worker()
1949 spin_unlock(&sqp->tx_lock); in mlx4_ib_sqp_comp_worker()
1955 static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port, in alloc_pv_object() argument
1963 return -ENOMEM; in alloc_pv_object()
1965 ctx->ib_dev = &dev->ib_dev; in alloc_pv_object()
1966 ctx->port = port; in alloc_pv_object()
1967 ctx->slave = slave; in alloc_pv_object()
1972 static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port) in free_pv_object() argument
1974 if (dev->sriov.demux[port - 1].tun[slave]) { in free_pv_object()
1975 kfree(dev->sriov.demux[port - 1].tun[slave]); in free_pv_object()
1976 dev->sriov.demux[port - 1].tun[slave] = NULL; in free_pv_object()
1980 static int create_pv_resources(struct ib_device *ibdev, int slave, int port, in create_pv_resources() argument
1987 if (ctx->state != DEMUX_PV_STATE_DOWN) in create_pv_resources()
1988 return -EEXIST; in create_pv_resources()
1990 ctx->state = DEMUX_PV_STATE_STARTING; in create_pv_resources()
1992 if (rdma_port_get_link_layer(ibdev, ctx->port) == in create_pv_resources()
1994 ctx->has_smi = 1; in create_pv_resources()
1996 if (ctx->has_smi) { in create_pv_resources()
2011 if (ctx->has_smi) in create_pv_resources()
2015 ctx->cq = ib_create_cq(ctx->ib_dev, in create_pv_resources()
2018 if (IS_ERR(ctx->cq)) { in create_pv_resources()
2019 ret = PTR_ERR(ctx->cq); in create_pv_resources()
2024 ctx->pd = ib_alloc_pd(ctx->ib_dev, 0); in create_pv_resources()
2025 if (IS_ERR(ctx->pd)) { in create_pv_resources()
2026 ret = PTR_ERR(ctx->pd); in create_pv_resources()
2031 if (ctx->has_smi) { in create_pv_resources()
2048 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker); in create_pv_resources()
2050 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); in create_pv_resources()
2052 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; in create_pv_resources()
2053 ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq; in create_pv_resources()
2055 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); in create_pv_resources()
2060 ctx->state = DEMUX_PV_STATE_ACTIVE; in create_pv_resources()
2064 ctx->wq = NULL; in create_pv_resources()
2065 ib_destroy_qp(ctx->qp[1].qp); in create_pv_resources()
2066 ctx->qp[1].qp = NULL; in create_pv_resources()
2070 if (ctx->has_smi) in create_pv_resources()
2071 ib_destroy_qp(ctx->qp[0].qp); in create_pv_resources()
2072 ctx->qp[0].qp = NULL; in create_pv_resources()
2075 ib_dealloc_pd(ctx->pd); in create_pv_resources()
2076 ctx->pd = NULL; in create_pv_resources()
2079 ib_destroy_cq(ctx->cq); in create_pv_resources()
2080 ctx->cq = NULL; in create_pv_resources()
2086 if (ctx->has_smi) in create_pv_resources()
2089 ctx->state = DEMUX_PV_STATE_DOWN; in create_pv_resources()
2093 static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port, in destroy_pv_resources() argument
2098 if (ctx->state > DEMUX_PV_STATE_DOWN) { in destroy_pv_resources()
2099 ctx->state = DEMUX_PV_STATE_DOWNING; in destroy_pv_resources()
2101 flush_workqueue(ctx->wq); in destroy_pv_resources()
2102 if (ctx->has_smi) { in destroy_pv_resources()
2103 ib_destroy_qp(ctx->qp[0].qp); in destroy_pv_resources()
2104 ctx->qp[0].qp = NULL; in destroy_pv_resources()
2107 ib_destroy_qp(ctx->qp[1].qp); in destroy_pv_resources()
2108 ctx->qp[1].qp = NULL; in destroy_pv_resources()
2110 ib_dealloc_pd(ctx->pd); in destroy_pv_resources()
2111 ctx->pd = NULL; in destroy_pv_resources()
2112 ib_destroy_cq(ctx->cq); in destroy_pv_resources()
2113 ctx->cq = NULL; in destroy_pv_resources()
2114 ctx->state = DEMUX_PV_STATE_DOWN; in destroy_pv_resources()
2118 static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave, in mlx4_ib_tunnels_update() argument
2124 clean_vf_mcast(&dev->sriov.demux[port - 1], slave); in mlx4_ib_tunnels_update()
2126 if (slave == mlx4_master_func_num(dev->dev)) in mlx4_ib_tunnels_update()
2127 destroy_pv_resources(dev, slave, port, in mlx4_ib_tunnels_update()
2128 dev->sriov.sqps[port - 1], 1); in mlx4_ib_tunnels_update()
2130 destroy_pv_resources(dev, slave, port, in mlx4_ib_tunnels_update()
2131 dev->sriov.demux[port - 1].tun[slave], 1); in mlx4_ib_tunnels_update()
2136 ret = create_pv_resources(&dev->ib_dev, slave, port, 1, in mlx4_ib_tunnels_update()
2137 dev->sriov.demux[port - 1].tun[slave]); in mlx4_ib_tunnels_update()
2140 if (!ret && slave == mlx4_master_func_num(dev->dev)) in mlx4_ib_tunnels_update()
2141 ret = create_pv_resources(&dev->ib_dev, slave, port, 0, in mlx4_ib_tunnels_update()
2142 dev->sriov.sqps[port - 1]); in mlx4_ib_tunnels_update()
2151 mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port, in mlx4_ib_tunnels_update_work()
2152 dmxw->do_init); in mlx4_ib_tunnels_update_work()
2157 static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, in mlx4_ib_alloc_demux_ctx() argument
2165 ctx->tun = kcalloc(dev->dev->caps.sqp_demux, in mlx4_ib_alloc_demux_ctx()
2167 if (!ctx->tun) in mlx4_ib_alloc_demux_ctx()
2168 return -ENOMEM; in mlx4_ib_alloc_demux_ctx()
2170 ctx->dev = dev; in mlx4_ib_alloc_demux_ctx()
2171 ctx->port = port; in mlx4_ib_alloc_demux_ctx()
2172 ctx->ib_dev = &dev->ib_dev; in mlx4_ib_alloc_demux_ctx()
2175 i < min(dev->dev->caps.sqp_demux, in mlx4_ib_alloc_demux_ctx()
2176 (u16)(dev->dev->persist->num_vfs + 1)); in mlx4_ib_alloc_demux_ctx()
2179 mlx4_get_active_ports(dev->dev, i); in mlx4_ib_alloc_demux_ctx()
2181 if (!test_bit(port - 1, actv_ports.ports)) in mlx4_ib_alloc_demux_ctx()
2184 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]); in mlx4_ib_alloc_demux_ctx()
2186 ret = -ENOMEM; in mlx4_ib_alloc_demux_ctx()
2193 pr_err("Failed initializing mcg para-virt (%d)\n", ret); in mlx4_ib_alloc_demux_ctx()
2198 ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); in mlx4_ib_alloc_demux_ctx()
2199 if (!ctx->wq) { in mlx4_ib_alloc_demux_ctx()
2201 ret = -ENOMEM; in mlx4_ib_alloc_demux_ctx()
2206 ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); in mlx4_ib_alloc_demux_ctx()
2207 if (!ctx->wi_wq) { in mlx4_ib_alloc_demux_ctx()
2209 ret = -ENOMEM; in mlx4_ib_alloc_demux_ctx()
2214 ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); in mlx4_ib_alloc_demux_ctx()
2215 if (!ctx->ud_wq) { in mlx4_ib_alloc_demux_ctx()
2217 ret = -ENOMEM; in mlx4_ib_alloc_demux_ctx()
2224 destroy_workqueue(ctx->wi_wq); in mlx4_ib_alloc_demux_ctx()
2225 ctx->wi_wq = NULL; in mlx4_ib_alloc_demux_ctx()
2228 destroy_workqueue(ctx->wq); in mlx4_ib_alloc_demux_ctx()
2229 ctx->wq = NULL; in mlx4_ib_alloc_demux_ctx()
2234 for (i = 0; i < dev->dev->caps.sqp_demux; i++) in mlx4_ib_alloc_demux_ctx()
2235 free_pv_object(dev, i, port); in mlx4_ib_alloc_demux_ctx()
2236 kfree(ctx->tun); in mlx4_ib_alloc_demux_ctx()
2237 ctx->tun = NULL; in mlx4_ib_alloc_demux_ctx()
2243 if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) { in mlx4_ib_free_sqp_ctx()
2244 sqp_ctx->state = DEMUX_PV_STATE_DOWNING; in mlx4_ib_free_sqp_ctx()
2245 flush_workqueue(sqp_ctx->wq); in mlx4_ib_free_sqp_ctx()
2246 if (sqp_ctx->has_smi) { in mlx4_ib_free_sqp_ctx()
2247 ib_destroy_qp(sqp_ctx->qp[0].qp); in mlx4_ib_free_sqp_ctx()
2248 sqp_ctx->qp[0].qp = NULL; in mlx4_ib_free_sqp_ctx()
2251 ib_destroy_qp(sqp_ctx->qp[1].qp); in mlx4_ib_free_sqp_ctx()
2252 sqp_ctx->qp[1].qp = NULL; in mlx4_ib_free_sqp_ctx()
2254 ib_dealloc_pd(sqp_ctx->pd); in mlx4_ib_free_sqp_ctx()
2255 sqp_ctx->pd = NULL; in mlx4_ib_free_sqp_ctx()
2256 ib_destroy_cq(sqp_ctx->cq); in mlx4_ib_free_sqp_ctx()
2257 sqp_ctx->cq = NULL; in mlx4_ib_free_sqp_ctx()
2258 sqp_ctx->state = DEMUX_PV_STATE_DOWN; in mlx4_ib_free_sqp_ctx()
2266 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); in mlx4_ib_free_demux_ctx() local
2268 for (i = 0; i < dev->dev->caps.sqp_demux; i++) { in mlx4_ib_free_demux_ctx()
2269 if (!ctx->tun[i]) in mlx4_ib_free_demux_ctx()
2271 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN) in mlx4_ib_free_demux_ctx()
2272 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; in mlx4_ib_free_demux_ctx()
2274 flush_workqueue(ctx->wq); in mlx4_ib_free_demux_ctx()
2275 flush_workqueue(ctx->wi_wq); in mlx4_ib_free_demux_ctx()
2276 for (i = 0; i < dev->dev->caps.sqp_demux; i++) { in mlx4_ib_free_demux_ctx()
2277 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); in mlx4_ib_free_demux_ctx()
2278 free_pv_object(dev, i, ctx->port); in mlx4_ib_free_demux_ctx()
2280 kfree(ctx->tun); in mlx4_ib_free_demux_ctx()
2281 destroy_workqueue(ctx->ud_wq); in mlx4_ib_free_demux_ctx()
2282 destroy_workqueue(ctx->wi_wq); in mlx4_ib_free_demux_ctx()
2283 destroy_workqueue(ctx->wq); in mlx4_ib_free_demux_ctx()
2287 static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init) in mlx4_ib_master_tunnels() argument
2291 if (!mlx4_is_master(dev->dev)) in mlx4_ib_master_tunnels()
2294 for (i = 0; i < dev->dev->caps.num_ports; i++) in mlx4_ib_master_tunnels()
2295 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init); in mlx4_ib_master_tunnels()
2299 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) in mlx4_ib_init_sriov() argument
2304 if (!mlx4_is_mfunc(dev->dev)) in mlx4_ib_init_sriov()
2307 dev->sriov.is_going_down = 0; in mlx4_ib_init_sriov()
2308 spin_lock_init(&dev->sriov.going_down_lock); in mlx4_ib_init_sriov()
2309 mlx4_ib_cm_paravirt_init(dev); in mlx4_ib_init_sriov()
2311 mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n"); in mlx4_ib_init_sriov()
2313 if (mlx4_is_slave(dev->dev)) { in mlx4_ib_init_sriov()
2314 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n"); in mlx4_ib_init_sriov()
2318 for (i = 0; i < dev->dev->caps.sqp_demux; i++) { in mlx4_ib_init_sriov()
2319 if (i == mlx4_master_func_num(dev->dev)) in mlx4_ib_init_sriov()
2320 mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid); in mlx4_ib_init_sriov()
2322 mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid()); in mlx4_ib_init_sriov()
2325 err = mlx4_ib_init_alias_guid_service(dev); in mlx4_ib_init_sriov()
2327 mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n"); in mlx4_ib_init_sriov()
2330 err = mlx4_ib_device_register_sysfs(dev); in mlx4_ib_init_sriov()
2332 mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n"); in mlx4_ib_init_sriov()
2336 mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n", in mlx4_ib_init_sriov()
2337 dev->dev->caps.sqp_demux); in mlx4_ib_init_sriov()
2338 for (i = 0; i < dev->num_ports; i++) { in mlx4_ib_init_sriov()
2340 err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1); in mlx4_ib_init_sriov()
2343 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; in mlx4_ib_init_sriov()
2344 atomic64_set(&dev->sriov.demux[i].subnet_prefix, in mlx4_ib_init_sriov()
2346 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1, in mlx4_ib_init_sriov()
2347 &dev->sriov.sqps[i]); in mlx4_ib_init_sriov()
2350 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1); in mlx4_ib_init_sriov()
2354 mlx4_ib_master_tunnels(dev, 1); in mlx4_ib_init_sriov()
2358 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1); in mlx4_ib_init_sriov()
2360 while (--i >= 0) { in mlx4_ib_init_sriov()
2361 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1); in mlx4_ib_init_sriov()
2362 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); in mlx4_ib_init_sriov()
2364 mlx4_ib_device_unregister_sysfs(dev); in mlx4_ib_init_sriov()
2367 mlx4_ib_destroy_alias_guid_service(dev); in mlx4_ib_init_sriov()
2370 mlx4_ib_cm_paravirt_clean(dev, -1); in mlx4_ib_init_sriov()
2375 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev) in mlx4_ib_close_sriov() argument
2380 if (!mlx4_is_mfunc(dev->dev)) in mlx4_ib_close_sriov()
2383 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in mlx4_ib_close_sriov()
2384 dev->sriov.is_going_down = 1; in mlx4_ib_close_sriov()
2385 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in mlx4_ib_close_sriov()
2386 if (mlx4_is_master(dev->dev)) { in mlx4_ib_close_sriov()
2387 for (i = 0; i < dev->num_ports; i++) { in mlx4_ib_close_sriov()
2388 flush_workqueue(dev->sriov.demux[i].ud_wq); in mlx4_ib_close_sriov()
2389 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]); in mlx4_ib_close_sriov()
2390 kfree(dev->sriov.sqps[i]); in mlx4_ib_close_sriov()
2391 dev->sriov.sqps[i] = NULL; in mlx4_ib_close_sriov()
2392 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); in mlx4_ib_close_sriov()
2395 mlx4_ib_cm_paravirt_clean(dev, -1); in mlx4_ib_close_sriov()
2396 mlx4_ib_destroy_alias_guid_service(dev); in mlx4_ib_close_sriov()
2397 mlx4_ib_device_unregister_sysfs(dev); in mlx4_ib_close_sriov()