qp.c (c1395a2a8c01e8a919e47d64eb3d23d00e824b8b) | qp.c (6aec21f6a8322fa8d43df3ea7f051dfd8967f1b9) |
---|---|
1/* 2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: --- 862 unchanged lines hidden (view full) --- 871 struct mlx5_ib_resources *devr = &dev->devr; 872 struct mlx5_ib_create_qp_resp resp; 873 struct mlx5_create_qp_mbox_in *in; 874 struct mlx5_general_caps *gen; 875 struct mlx5_ib_create_qp ucmd; 876 int inlen = sizeof(*in); 877 int err; 878 | 1/* 2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: --- 862 unchanged lines hidden (view full) --- 871 struct mlx5_ib_resources *devr = &dev->devr; 872 struct mlx5_ib_create_qp_resp resp; 873 struct mlx5_create_qp_mbox_in *in; 874 struct mlx5_general_caps *gen; 875 struct mlx5_ib_create_qp ucmd; 876 int inlen = sizeof(*in); 877 int err; 878 |
879 mlx5_ib_odp_create_qp(qp); 880 |
|
879 gen = &dev->mdev->caps.gen; 880 mutex_init(&qp->mutex); 881 spin_lock_init(&qp->sq.lock); 882 spin_lock_init(&qp->rq.lock); 883 884 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 885 if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { 886 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); --- 268 unchanged lines hidden (view full) --- 1155{ 1156 struct mlx5_ib_cq *send_cq, *recv_cq; 1157 struct mlx5_modify_qp_mbox_in *in; 1158 int err; 1159 1160 in = kzalloc(sizeof(*in), GFP_KERNEL); 1161 if (!in) 1162 return; | 881 gen = &dev->mdev->caps.gen; 882 mutex_init(&qp->mutex); 883 spin_lock_init(&qp->sq.lock); 884 spin_lock_init(&qp->rq.lock); 885 886 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 887 if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { 888 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); --- 268 unchanged lines hidden (view full) --- 1157{ 1158 struct mlx5_ib_cq *send_cq, *recv_cq; 1159 struct mlx5_modify_qp_mbox_in *in; 1160 int err; 1161 1162 in = kzalloc(sizeof(*in), GFP_KERNEL); 1163 if (!in) 1164 return; |
1163 if (qp->state != IB_QPS_RESET) | 1165 if (qp->state != IB_QPS_RESET) { 1166 mlx5_ib_qp_disable_pagefaults(qp); |
1164 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state), 1165 MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp)) 1166 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n", 1167 qp->mqp.qpn); | 1167 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state), 1168 MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp)) 1169 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n", 1170 qp->mqp.qpn); |
1171 } |
|
1168 1169 get_cqs(qp, &send_cq, &recv_cq); 1170 1171 if (qp->create_type == MLX5_QP_KERNEL) { 1172 mlx5_ib_lock_cqs(send_cq, recv_cq); 1173 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, 1174 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1175 if (send_cq != recv_cq) --- 531 unchanged lines hidden (view full) --- 1707 1708 1709 mlx5_cur = to_mlx5_state(cur_state); 1710 mlx5_new = to_mlx5_state(new_state); 1711 mlx5_st = to_mlx5_st(ibqp->qp_type); 1712 if (mlx5_st < 0) 1713 goto out; 1714 | 1172 1173 get_cqs(qp, &send_cq, &recv_cq); 1174 1175 if (qp->create_type == MLX5_QP_KERNEL) { 1176 mlx5_ib_lock_cqs(send_cq, recv_cq); 1177 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, 1178 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1179 if (send_cq != recv_cq) --- 531 unchanged lines hidden (view full) --- 1711 1712 1713 mlx5_cur = to_mlx5_state(cur_state); 1714 mlx5_new = to_mlx5_state(new_state); 1715 mlx5_st = to_mlx5_st(ibqp->qp_type); 1716 if (mlx5_st < 0) 1717 goto out; 1718 |
1719 /* If moving to a reset or error state, we must disable page faults on 1720 * this QP and flush all current page faults. Otherwise a stale page 1721 * fault may attempt to work on this QP after it is reset and moved 1722 * again to RTS, and may cause the driver and the device to get out of 1723 * sync. */ 1724 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && 1725 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) 1726 mlx5_ib_qp_disable_pagefaults(qp); 1727 |
|
1715 optpar = ib_mask_to_mlx5_opt(attr_mask); 1716 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; 1717 in->optparam = cpu_to_be32(optpar); 1718 err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state), 1719 to_mlx5_state(new_state), in, sqd_event, 1720 &qp->mqp); 1721 if (err) 1722 goto out; 1723 | 1728 optpar = ib_mask_to_mlx5_opt(attr_mask); 1729 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; 1730 in->optparam = cpu_to_be32(optpar); 1731 err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state), 1732 to_mlx5_state(new_state), in, sqd_event, 1733 &qp->mqp); 1734 if (err) 1735 goto out; 1736 |
1737 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1738 mlx5_ib_qp_enable_pagefaults(qp); 1739 |
|
1724 qp->state = new_state; 1725 1726 if (attr_mask & IB_QP_ACCESS_FLAGS) 1727 qp->atomic_rd_en = attr->qp_access_flags; 1728 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1729 qp->resp_depth = attr->max_dest_rd_atomic; 1730 if (attr_mask & IB_QP_PORT) 1731 qp->port = attr->port_num; --- 1289 unchanged lines hidden (view full) --- 3021{ 3022 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 3023 struct mlx5_ib_qp *qp = to_mqp(ibqp); 3024 struct mlx5_query_qp_mbox_out *outb; 3025 struct mlx5_qp_context *context; 3026 int mlx5_state; 3027 int err = 0; 3028 | 1740 qp->state = new_state; 1741 1742 if (attr_mask & IB_QP_ACCESS_FLAGS) 1743 qp->atomic_rd_en = attr->qp_access_flags; 1744 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1745 qp->resp_depth = attr->max_dest_rd_atomic; 1746 if (attr_mask & IB_QP_PORT) 1747 qp->port = attr->port_num; --- 1289 unchanged lines hidden (view full) --- 3037{ 3038 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 3039 struct mlx5_ib_qp *qp = to_mqp(ibqp); 3040 struct mlx5_query_qp_mbox_out *outb; 3041 struct mlx5_qp_context *context; 3042 int mlx5_state; 3043 int err = 0; 3044 |
3045#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3046 /* 3047 * Wait for any outstanding page faults, in case the user frees memory 3048 * based upon this query's result. 3049 */ 3050 flush_workqueue(mlx5_ib_page_fault_wq); 3051#endif 3052 |
|
3029 mutex_lock(&qp->mutex); 3030 outb = kzalloc(sizeof(*outb), GFP_KERNEL); 3031 if (!outb) { 3032 err = -ENOMEM; 3033 goto out; 3034 } 3035 context = &outb->ctx; 3036 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb)); --- 116 unchanged lines hidden --- | 3053 mutex_lock(&qp->mutex); 3054 outb = kzalloc(sizeof(*outb), GFP_KERNEL); 3055 if (!outb) { 3056 err = -ENOMEM; 3057 goto out; 3058 } 3059 context = &outb->ctx; 3060 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb)); --- 116 unchanged lines hidden --- |