Lines Matching refs:qp

107 	struct mlx4_qp *qp;  member
113 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument
118 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
119 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
123 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument
130 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
131 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
137 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy || in is_sqp()
138 qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) { in is_sqp()
147 return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP); in is_sqp()
151 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_qp0() argument
158 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0()
159 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0()
165 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy) { in is_qp0()
174 static void *get_wqe(struct mlx4_ib_qp *qp, int offset) in get_wqe() argument
176 return mlx4_buf_offset(&qp->buf, offset); in get_wqe()
179 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) in get_recv_wqe() argument
181 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe()
184 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) in get_send_wqe() argument
186 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); in get_send_wqe()
194 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) in stamp_send_wqe() argument
202 buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe()
215 struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp; in mlx4_ib_handle_qp_event()
219 event.element.qp = ibqp; in mlx4_ib_handle_qp_event()
248 qpe_work->type, qpe_work->qp->qpn); in mlx4_ib_handle_qp_event()
255 mlx4_put_qp(qpe_work->qp); in mlx4_ib_handle_qp_event()
259 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) in mlx4_ib_qp_event() argument
261 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx4_ib_qp_event()
265 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; in mlx4_ib_qp_event()
274 qpe_work->qp = qp; in mlx4_ib_qp_event()
281 mlx4_put_qp(qp); in mlx4_ib_qp_event()
284 static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type) in mlx4_ib_wq_event() argument
287 type, qp->qpn); in mlx4_ib_wq_event()
337 bool is_user, bool has_rq, struct mlx4_ib_qp *qp, in set_rq_size() argument
349 qp->rq.wqe_cnt = qp->rq.max_gs = 0; in set_rq_size()
360 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); in set_rq_size()
361 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); in set_rq_size()
362 wqe_size = qp->rq.max_gs * sizeof(struct mlx4_wqe_data_seg); in set_rq_size()
363 qp->rq.wqe_shift = ilog2(max_t(u32, wqe_size, inl_recv_sz)); in set_rq_size()
368 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
369 cap->max_recv_sge = qp->rq.max_gs; in set_rq_size()
371 cap->max_recv_wr = qp->rq.max_post = in set_rq_size()
372 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); in set_rq_size()
373 cap->max_recv_sge = min(qp->rq.max_gs, in set_rq_size()
382 enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) in set_kernel_sq_size() argument
389 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + in set_kernel_sq_size()
404 send_wqe_overhead(type, qp->flags); in set_kernel_sq_size()
409 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); in set_kernel_sq_size()
415 qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift); in set_kernel_sq_size()
416 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + in set_kernel_sq_size()
417 qp->sq_spare_wqes); in set_kernel_sq_size()
419 qp->sq.max_gs = in set_kernel_sq_size()
421 (1 << qp->sq.wqe_shift)) - in set_kernel_sq_size()
422 send_wqe_overhead(type, qp->flags)) / in set_kernel_sq_size()
425 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_kernel_sq_size()
426 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_kernel_sq_size()
427 if (qp->rq.wqe_shift > qp->sq.wqe_shift) { in set_kernel_sq_size()
428 qp->rq.offset = 0; in set_kernel_sq_size()
429 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in set_kernel_sq_size()
431 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; in set_kernel_sq_size()
432 qp->sq.offset = 0; in set_kernel_sq_size()
435 cap->max_send_wr = qp->sq.max_post = in set_kernel_sq_size()
436 qp->sq.wqe_cnt - qp->sq_spare_wqes; in set_kernel_sq_size()
437 cap->max_send_sge = min(qp->sq.max_gs, in set_kernel_sq_size()
447 struct mlx4_ib_qp *qp, in set_user_sq_size() argument
461 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in set_user_sq_size()
462 qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
464 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_user_sq_size()
465 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_user_sq_size()
470 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in alloc_proxy_bufs() argument
474 qp->sqp_proxy_rcv = in alloc_proxy_bufs()
475 kmalloc_array(qp->rq.wqe_cnt, sizeof(struct mlx4_ib_buf), in alloc_proxy_bufs()
477 if (!qp->sqp_proxy_rcv) in alloc_proxy_bufs()
479 for (i = 0; i < qp->rq.wqe_cnt; i++) { in alloc_proxy_bufs()
480 qp->sqp_proxy_rcv[i].addr = in alloc_proxy_bufs()
483 if (!qp->sqp_proxy_rcv[i].addr) in alloc_proxy_bufs()
485 qp->sqp_proxy_rcv[i].map = in alloc_proxy_bufs()
486 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, in alloc_proxy_bufs()
489 if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { in alloc_proxy_bufs()
490 kfree(qp->sqp_proxy_rcv[i].addr); in alloc_proxy_bufs()
499 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in alloc_proxy_bufs()
502 kfree(qp->sqp_proxy_rcv[i].addr); in alloc_proxy_bufs()
504 kfree(qp->sqp_proxy_rcv); in alloc_proxy_bufs()
505 qp->sqp_proxy_rcv = NULL; in alloc_proxy_bufs()
509 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in free_proxy_bufs() argument
513 for (i = 0; i < qp->rq.wqe_cnt; i++) { in free_proxy_bufs()
514 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in free_proxy_bufs()
517 kfree(qp->sqp_proxy_rcv[i].addr); in free_proxy_bufs()
519 kfree(qp->sqp_proxy_rcv); in free_proxy_bufs()
541 struct mlx4_ib_qp *qp) in mlx4_ib_free_qp_counter() argument
543 mutex_lock(&dev->counters_table[qp->port - 1].mutex); in mlx4_ib_free_qp_counter()
544 mlx4_counter_free(dev->dev, qp->counter_index->index); in mlx4_ib_free_qp_counter()
545 list_del(&qp->counter_index->list); in mlx4_ib_free_qp_counter()
546 mutex_unlock(&dev->counters_table[qp->port - 1].mutex); in mlx4_ib_free_qp_counter()
548 kfree(qp->counter_index); in mlx4_ib_free_qp_counter()
549 qp->counter_index = NULL; in mlx4_ib_free_qp_counter()
657 struct mlx4_ib_qp *qp) in create_qp_rss() argument
662 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; in create_qp_rss()
664 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, 0, qp->mqp.usage); in create_qp_rss()
668 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_qp_rss()
672 INIT_LIST_HEAD(&qp->gid_list); in create_qp_rss()
673 INIT_LIST_HEAD(&qp->steering_rules); in create_qp_rss()
675 qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; in create_qp_rss()
676 qp->state = IB_QPS_RESET; in create_qp_rss()
679 qp->sq_no_prefetch = 1; in create_qp_rss()
680 qp->sq.wqe_cnt = 1; in create_qp_rss()
681 qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; in create_qp_rss()
682 qp->buf_size = qp->sq.wqe_cnt << MLX4_IB_MIN_SQ_STRIDE; in create_qp_rss()
683 qp->mtt = (to_mqp( in create_qp_rss()
686 qp->rss_ctx = kzalloc(sizeof(*qp->rss_ctx), GFP_KERNEL); in create_qp_rss()
687 if (!qp->rss_ctx) { in create_qp_rss()
692 err = set_qp_rss(dev, qp->rss_ctx, init_attr, ucmd); in create_qp_rss()
699 kfree(qp->rss_ctx); in create_qp_rss()
702 mlx4_qp_remove(dev->dev, &qp->mqp); in create_qp_rss()
703 mlx4_qp_free(dev->dev, &qp->mqp); in create_qp_rss()
710 static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp, in _mlx4_ib_create_qp_rss() argument
767 qp->pri.vid = 0xFFFF; in _mlx4_ib_create_qp_rss()
768 qp->alt.vid = 0xFFFF; in _mlx4_ib_create_qp_rss()
770 err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp); in _mlx4_ib_create_qp_rss()
774 qp->ibqp.qp_num = qp->mqp.qpn; in _mlx4_ib_create_qp_rss()
784 struct mlx4_ib_qp *qp, int range_size, int *wqn) in mlx4_ib_alloc_wqn() argument
804 qp->mqp.usage); in mlx4_ib_alloc_wqn()
821 qp->wqn_range = range; in mlx4_ib_alloc_wqn()
834 struct mlx4_ib_qp *qp, bool dirty_release) in mlx4_ib_release_wqn() argument
841 range = qp->wqn_range; in mlx4_ib_release_wqn()
862 struct ib_udata *udata, struct mlx4_ib_qp *qp) in create_rq() argument
877 qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; in create_rq()
879 spin_lock_init(&qp->sq.lock); in create_rq()
880 spin_lock_init(&qp->rq.lock); in create_rq()
881 INIT_LIST_HEAD(&qp->gid_list); in create_rq()
882 INIT_LIST_HEAD(&qp->steering_rules); in create_rq()
884 qp->state = IB_QPS_RESET; in create_rq()
909 qp->flags |= MLX4_IB_QP_SCATTER_FCS; in create_rq()
911 err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz); in create_rq()
915 qp->sq_no_prefetch = 1; in create_rq()
916 qp->sq.wqe_cnt = 1; in create_rq()
917 qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; in create_rq()
918 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in create_rq()
919 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in create_rq()
921 qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0); in create_rq()
922 if (IS_ERR(qp->umem)) { in create_rq()
923 err = PTR_ERR(qp->umem); in create_rq()
927 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); in create_rq()
928 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); in create_rq()
933 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_rq()
937 err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db); in create_rq()
940 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; in create_rq()
942 err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn); in create_rq()
946 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_rq()
955 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_rq()
957 qp->mqp.event = mlx4_ib_wq_event; in create_rq()
965 list_add_tail(&qp->qps_list, &dev->qp_list); in create_rq()
970 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); in create_rq()
972 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); in create_rq()
979 mlx4_ib_release_wqn(context, qp, 0); in create_rq()
981 mlx4_ib_db_unmap_user(context, &qp->db); in create_rq()
984 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_rq()
986 ib_umem_release(qp->umem); in create_rq()
993 struct mlx4_ib_qp *qp) in create_qp_common() argument
1050 qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL); in create_qp_common()
1051 if (!qp->sqp) in create_qp_common()
1055 qp->mlx4_ib_qp_type = qp_type; in create_qp_common()
1057 spin_lock_init(&qp->sq.lock); in create_qp_common()
1058 spin_lock_init(&qp->rq.lock); in create_qp_common()
1059 INIT_LIST_HEAD(&qp->gid_list); in create_qp_common()
1060 INIT_LIST_HEAD(&qp->steering_rules); in create_qp_common()
1062 qp->state = IB_QPS_RESET; in create_qp_common()
1064 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); in create_qp_common()
1079 qp->inl_recv_sz = ucmd.inl_recv_sz; in create_qp_common()
1089 qp->flags |= MLX4_IB_QP_SCATTER_FCS; in create_qp_common()
1093 qp_has_rq(init_attr), qp, qp->inl_recv_sz); in create_qp_common()
1097 qp->sq_no_prefetch = ucmd.sq_no_prefetch; in create_qp_common()
1099 err = set_user_sq_size(dev, qp, &ucmd); in create_qp_common()
1103 qp->umem = in create_qp_common()
1104 ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0); in create_qp_common()
1105 if (IS_ERR(qp->umem)) { in create_qp_common()
1106 err = PTR_ERR(qp->umem); in create_qp_common()
1110 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); in create_qp_common()
1111 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); in create_qp_common()
1116 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common()
1121 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db); in create_qp_common()
1125 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; in create_qp_common()
1128 qp_has_rq(init_attr), qp, 0); in create_qp_common()
1132 qp->sq_no_prefetch = 0; in create_qp_common()
1135 qp->flags |= MLX4_IB_QP_LSO; in create_qp_common()
1140 qp->flags |= MLX4_IB_QP_NETIF; in create_qp_common()
1147 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); in create_qp_common()
1152 err = mlx4_db_alloc(dev->dev, &qp->db, 0); in create_qp_common()
1156 *qp->db.db = 0; in create_qp_common()
1159 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, in create_qp_common()
1160 &qp->buf)) { in create_qp_common()
1165 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, in create_qp_common()
1166 &qp->mtt); in create_qp_common()
1170 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); in create_qp_common()
1174 qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt, in create_qp_common()
1176 qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt, in create_qp_common()
1178 if (!qp->sq.wrid || !qp->rq.wrid) { in create_qp_common()
1182 qp->mqp.usage = MLX4_RES_USAGE_DRIVER; in create_qp_common()
1186 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in create_qp_common()
1188 if (alloc_proxy_bufs(pd->device, qp)) { in create_qp_common()
1203 qp->mqp.usage); in create_qp_common()
1205 if (qp->flags & MLX4_IB_QP_NETIF) in create_qp_common()
1209 &qpn, 0, qp->mqp.usage); in create_qp_common()
1215 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; in create_qp_common()
1217 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_qp_common()
1222 qp->mqp.qpn |= (1 << 23); in create_qp_common()
1229 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common()
1231 qp->mqp.event = mlx4_ib_qp_event; in create_qp_common()
1239 list_add_tail(&qp->qps_list, &dev->qp_list); in create_qp_common()
1244 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); in create_qp_common()
1246 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); in create_qp_common()
1254 if (qp->flags & MLX4_IB_QP_NETIF) in create_qp_common()
1260 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) in create_qp_common()
1261 free_proxy_bufs(pd->device, qp); in create_qp_common()
1265 mlx4_ib_db_unmap_user(context, &qp->db); in create_qp_common()
1267 kvfree(qp->sq.wrid); in create_qp_common()
1268 kvfree(qp->rq.wrid); in create_qp_common()
1272 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_qp_common()
1275 if (!qp->umem) in create_qp_common()
1276 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in create_qp_common()
1277 ib_umem_release(qp->umem); in create_qp_common()
1281 mlx4_db_free(dev->dev, &qp->db); in create_qp_common()
1284 kfree(qp->sqp); in create_qp_common()
1332 static void del_gid_entries(struct mlx4_ib_qp *qp) in del_gid_entries() argument
1336 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in del_gid_entries()
1342 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) in get_pd() argument
1344 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) in get_pd()
1345 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); in get_pd()
1347 return to_mpd(qp->ibqp.pd); in get_pd()
1350 static void get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src, in get_cqs() argument
1353 switch (qp->ibqp.qp_type) { in get_cqs()
1355 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); in get_cqs()
1359 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
1363 *recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) : in get_cqs()
1364 to_mcq(qp->ibwq.cq); in get_cqs()
1365 *send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) : in get_cqs()
1371 static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in destroy_qp_rss() argument
1373 if (qp->state != IB_QPS_RESET) { in destroy_qp_rss()
1376 for (i = 0; i < (1 << qp->ibqp.rwq_ind_tbl->log_ind_tbl_size); in destroy_qp_rss()
1378 struct ib_wq *ibwq = qp->ibqp.rwq_ind_tbl->ind_tbl[i]; in destroy_qp_rss()
1388 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), in destroy_qp_rss()
1389 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) in destroy_qp_rss()
1391 qp->mqp.qpn); in destroy_qp_rss()
1394 mlx4_qp_remove(dev->dev, &qp->mqp); in destroy_qp_rss()
1395 mlx4_qp_free(dev->dev, &qp->mqp); in destroy_qp_rss()
1396 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); in destroy_qp_rss()
1397 del_gid_entries(qp); in destroy_qp_rss()
1400 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, in destroy_qp_common() argument
1407 if (qp->state != IB_QPS_RESET) { in destroy_qp_common()
1408 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), in destroy_qp_common()
1409 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) in destroy_qp_common()
1411 qp->mqp.qpn); in destroy_qp_common()
1412 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { in destroy_qp_common()
1413 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in destroy_qp_common()
1414 qp->pri.smac = 0; in destroy_qp_common()
1415 qp->pri.smac_port = 0; in destroy_qp_common()
1417 if (qp->alt.smac) { in destroy_qp_common()
1418 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in destroy_qp_common()
1419 qp->alt.smac = 0; in destroy_qp_common()
1421 if (qp->pri.vid < 0x1000) { in destroy_qp_common()
1422 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); in destroy_qp_common()
1423 qp->pri.vid = 0xFFFF; in destroy_qp_common()
1424 qp->pri.candidate_vid = 0xFFFF; in destroy_qp_common()
1425 qp->pri.update_vid = 0; in destroy_qp_common()
1427 if (qp->alt.vid < 0x1000) { in destroy_qp_common()
1428 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); in destroy_qp_common()
1429 qp->alt.vid = 0xFFFF; in destroy_qp_common()
1430 qp->alt.candidate_vid = 0xFFFF; in destroy_qp_common()
1431 qp->alt.update_vid = 0; in destroy_qp_common()
1435 get_cqs(qp, src, &send_cq, &recv_cq); in destroy_qp_common()
1441 list_del(&qp->qps_list); in destroy_qp_common()
1442 list_del(&qp->cq_send_list); in destroy_qp_common()
1443 list_del(&qp->cq_recv_list); in destroy_qp_common()
1445 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common()
1446 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); in destroy_qp_common()
1448 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
1451 mlx4_qp_remove(dev->dev, &qp->mqp); in destroy_qp_common()
1456 mlx4_qp_free(dev->dev, &qp->mqp); in destroy_qp_common()
1458 if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) { in destroy_qp_common()
1459 if (qp->flags & MLX4_IB_QP_NETIF) in destroy_qp_common()
1460 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); in destroy_qp_common()
1467 qp, 1); in destroy_qp_common()
1469 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); in destroy_qp_common()
1472 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in destroy_qp_common()
1475 if (qp->rq.wqe_cnt) { in destroy_qp_common()
1482 mlx4_ib_db_unmap_user(mcontext, &qp->db); in destroy_qp_common()
1485 kvfree(qp->sq.wrid); in destroy_qp_common()
1486 kvfree(qp->rq.wrid); in destroy_qp_common()
1487 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in destroy_qp_common()
1489 free_proxy_bufs(&dev->ib_dev, qp); in destroy_qp_common()
1490 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in destroy_qp_common()
1491 if (qp->rq.wqe_cnt) in destroy_qp_common()
1492 mlx4_db_free(dev->dev, &qp->db); in destroy_qp_common()
1494 ib_umem_release(qp->umem); in destroy_qp_common()
1496 del_gid_entries(qp); in destroy_qp_common()
1516 static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp, in _mlx4_ib_create_qp() argument
1525 return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata); in _mlx4_ib_create_qp()
1574 qp->pri.vid = 0xFFFF; in _mlx4_ib_create_qp()
1575 qp->alt.vid = 0xFFFF; in _mlx4_ib_create_qp()
1576 err = create_qp_common(pd, init_attr, udata, 0, qp); in _mlx4_ib_create_qp()
1580 qp->ibqp.qp_num = qp->mqp.qpn; in _mlx4_ib_create_qp()
1581 qp->xrcdn = xrcdn; in _mlx4_ib_create_qp()
1599 qp->pri.vid = 0xFFFF; in _mlx4_ib_create_qp()
1600 qp->alt.vid = 0xFFFF; in _mlx4_ib_create_qp()
1601 err = create_qp_common(pd, init_attr, udata, sqpn, qp); in _mlx4_ib_create_qp()
1608 rdma_restrack_no_track(&qp->ibqp.res); in _mlx4_ib_create_qp()
1610 qp->port = init_attr->port_num; in _mlx4_ib_create_qp()
1611 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : in _mlx4_ib_create_qp()
1627 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_create_qp() local
1631 mutex_init(&qp->mutex); in mlx4_ib_create_qp()
1632 ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata); in mlx4_ib_create_qp()
1638 struct mlx4_ib_sqp *sqp = qp->sqp; in mlx4_ib_create_qp()
1660 static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) in _mlx4_ib_destroy_qp() argument
1662 struct mlx4_ib_dev *dev = to_mdev(qp->device); in _mlx4_ib_destroy_qp()
1663 struct mlx4_ib_qp *mqp = to_mqp(qp); in _mlx4_ib_destroy_qp()
1678 if (qp->rwq_ind_tbl) { in _mlx4_ib_destroy_qp()
1688 int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) in mlx4_ib_destroy_qp() argument
1690 struct mlx4_ib_qp *mqp = to_mqp(qp); in mlx4_ib_destroy_qp()
1699 return _mlx4_ib_destroy_qp(qp, udata); in mlx4_ib_destroy_qp()
1726 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, in to_mlx4_access_flags() argument
1736 dest_rd_atomic = qp->resp_depth; in to_mlx4_access_flags()
1741 access_flags = qp->atomic_rd_en; in to_mlx4_access_flags()
1890 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, in mlx4_set_path() argument
1896 return _mlx4_set_path(dev, &qp->ah_attr, in mlx4_set_path()
1903 const struct ib_qp_attr *qp, in mlx4_set_alt_path() argument
1908 return _mlx4_set_path(dev, &qp->alt_ah_attr, in mlx4_set_alt_path()
1914 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in update_mcg_macs() argument
1918 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in update_mcg_macs()
1919 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { in update_mcg_macs()
1921 ge->port = qp->port; in update_mcg_macs()
1927 struct mlx4_ib_qp *qp, in handle_eth_ud_smac_index() argument
1933 u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); in handle_eth_ud_smac_index()
1935 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); in handle_eth_ud_smac_index()
1936 if (!qp->pri.smac && !qp->pri.smac_port) { in handle_eth_ud_smac_index()
1937 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); in handle_eth_ud_smac_index()
1939 qp->pri.candidate_smac_index = smac_index; in handle_eth_ud_smac_index()
1940 qp->pri.candidate_smac = u64_mac; in handle_eth_ud_smac_index()
1941 qp->pri.candidate_smac_port = qp->port; in handle_eth_ud_smac_index()
1950 static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in create_qp_lb_counter() argument
1956 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) != in create_qp_lb_counter()
1958 !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) || in create_qp_lb_counter()
1974 qp->counter_index = new_counter_index; in create_qp_lb_counter()
1976 mutex_lock(&dev->counters_table[qp->port - 1].mutex); in create_qp_lb_counter()
1978 &dev->counters_table[qp->port - 1].counters_list); in create_qp_lb_counter()
1979 mutex_unlock(&dev->counters_table[qp->port - 1].mutex); in create_qp_lb_counter()
2088 struct mlx4_ib_qp *qp) in fill_qp_rss_context() argument
2095 rss_context->base_qpn = cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz); in fill_qp_rss_context()
2097 cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz & 0xffffff); in fill_qp_rss_context()
2098 if (qp->rss_ctx->flags & (MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6)) in fill_qp_rss_context()
2100 rss_context->flags = qp->rss_ctx->flags; in fill_qp_rss_context()
2104 memcpy(rss_context->rss_key, qp->rss_ctx->rss_key, in fill_qp_rss_context()
2119 struct mlx4_ib_qp *qp; in __mlx4_ib_modify_qp() local
2138 qp = to_mqp((struct ib_qp *)ibwq); in __mlx4_ib_modify_qp()
2148 qp = to_mqp(ibqp); in __mlx4_ib_modify_qp()
2150 pd = get_pd(qp); in __mlx4_ib_modify_qp()
2155 rdma_port_get_link_layer(&dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
2164 (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); in __mlx4_ib_modify_qp()
2183 if (qp->inl_recv_sz) in __mlx4_ib_modify_qp()
2186 if (qp->flags & MLX4_IB_QP_SCATTER_FCS) in __mlx4_ib_modify_qp()
2194 if (qp->flags & MLX4_IB_QP_LSO) in __mlx4_ib_modify_qp()
2210 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
2211 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
2212 context->rq_size_stride |= qp->rq.wqe_shift - 4; in __mlx4_ib_modify_qp()
2215 if (qp->sq.wqe_cnt) in __mlx4_ib_modify_qp()
2216 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
2217 context->sq_size_stride |= qp->sq.wqe_shift - 4; in __mlx4_ib_modify_qp()
2219 if (new_state == IB_QPS_RESET && qp->counter_index) in __mlx4_ib_modify_qp()
2220 mlx4_ib_free_qp_counter(dev, qp); in __mlx4_ib_modify_qp()
2223 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; in __mlx4_ib_modify_qp()
2224 context->xrcd = cpu_to_be32((u32) qp->xrcdn); in __mlx4_ib_modify_qp()
2248 err = create_qp_lb_counter(dev, qp); in __mlx4_ib_modify_qp()
2253 dev->counters_table[qp->port - 1].default_counter; in __mlx4_ib_modify_qp()
2254 if (qp->counter_index) in __mlx4_ib_modify_qp()
2255 counter_index = qp->counter_index->index; in __mlx4_ib_modify_qp()
2260 if (qp->counter_index) { in __mlx4_ib_modify_qp()
2270 if (qp->flags & MLX4_IB_QP_NETIF) { in __mlx4_ib_modify_qp()
2271 mlx4_ib_steer_qp_reg(dev, qp, 1); in __mlx4_ib_modify_qp()
2276 enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ? in __mlx4_ib_modify_qp()
2285 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
2293 attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in __mlx4_ib_modify_qp()
2308 if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path, in __mlx4_ib_modify_qp()
2342 if (mlx4_set_alt_path(dev, attr, attr_mask, qp, in __mlx4_ib_modify_qp()
2356 get_cqs(qp, src_type, &send_cq, &recv_cq); in __mlx4_ib_modify_qp()
2396 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); in __mlx4_ib_modify_qp()
2412 if (qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
2417 !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && in __mlx4_ib_modify_qp()
2435 if (qp->rq.wqe_cnt && in __mlx4_ib_modify_qp()
2438 context->db_rec_addr = cpu_to_be64(qp->db.dma); in __mlx4_ib_modify_qp()
2444 context->pri_path.sched_queue = (qp->port - 1) << 6; in __mlx4_ib_modify_qp()
2445 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in __mlx4_ib_modify_qp()
2446 qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
2449 if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) in __mlx4_ib_modify_qp()
2452 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
2456 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
2458 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI || in __mlx4_ib_modify_qp()
2459 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) in __mlx4_ib_modify_qp()
2462 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || in __mlx4_ib_modify_qp()
2463 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || in __mlx4_ib_modify_qp()
2464 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { in __mlx4_ib_modify_qp()
2465 err = handle_eth_ud_smac_index(dev, qp, context); in __mlx4_ib_modify_qp()
2470 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) in __mlx4_ib_modify_qp()
2471 dev->qp1_proxy[qp->port - 1] = qp; in __mlx4_ib_modify_qp()
2488 &dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
2519 for (i = 0; i < qp->sq.wqe_cnt; ++i) { in __mlx4_ib_modify_qp()
2520 ctrl = get_send_wqe(qp, i); in __mlx4_ib_modify_qp()
2523 1 << (qp->sq.wqe_shift - 4); in __mlx4_ib_modify_qp()
2524 stamp_send_wqe(qp, i); in __mlx4_ib_modify_qp()
2531 fill_qp_rss_context(context, qp); in __mlx4_ib_modify_qp()
2535 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), in __mlx4_ib_modify_qp()
2537 sqd_event, &qp->mqp); in __mlx4_ib_modify_qp()
2541 qp->state = new_state; in __mlx4_ib_modify_qp()
2544 qp->atomic_rd_en = attr->qp_access_flags; in __mlx4_ib_modify_qp()
2546 qp->resp_depth = attr->max_dest_rd_atomic; in __mlx4_ib_modify_qp()
2548 qp->port = attr->port_num; in __mlx4_ib_modify_qp()
2549 update_mcg_macs(dev, qp); in __mlx4_ib_modify_qp()
2552 qp->alt_port = attr->alt_port_num; in __mlx4_ib_modify_qp()
2554 if (is_sqp(dev, qp)) in __mlx4_ib_modify_qp()
2555 store_sqp_attrs(qp->sqp, attr, attr_mask); in __mlx4_ib_modify_qp()
2561 if (is_qp0(dev, qp)) { in __mlx4_ib_modify_qp()
2563 if (mlx4_INIT_PORT(dev->dev, qp->port)) in __mlx4_ib_modify_qp()
2565 qp->port); in __mlx4_ib_modify_qp()
2569 mlx4_CLOSE_PORT(dev->dev, qp->port); in __mlx4_ib_modify_qp()
2578 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in __mlx4_ib_modify_qp()
2581 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in __mlx4_ib_modify_qp()
2583 qp->rq.head = 0; in __mlx4_ib_modify_qp()
2584 qp->rq.tail = 0; in __mlx4_ib_modify_qp()
2585 qp->sq.head = 0; in __mlx4_ib_modify_qp()
2586 qp->sq.tail = 0; in __mlx4_ib_modify_qp()
2587 qp->sq_next_wqe = 0; in __mlx4_ib_modify_qp()
2588 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
2589 *qp->db.db = 0; in __mlx4_ib_modify_qp()
2591 if (qp->flags & MLX4_IB_QP_NETIF) in __mlx4_ib_modify_qp()
2592 mlx4_ib_steer_qp_reg(dev, qp, 0); in __mlx4_ib_modify_qp()
2594 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { in __mlx4_ib_modify_qp()
2595 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in __mlx4_ib_modify_qp()
2596 qp->pri.smac = 0; in __mlx4_ib_modify_qp()
2597 qp->pri.smac_port = 0; in __mlx4_ib_modify_qp()
2599 if (qp->alt.smac) { in __mlx4_ib_modify_qp()
2600 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in __mlx4_ib_modify_qp()
2601 qp->alt.smac = 0; in __mlx4_ib_modify_qp()
2603 if (qp->pri.vid < 0x1000) { in __mlx4_ib_modify_qp()
2604 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); in __mlx4_ib_modify_qp()
2605 qp->pri.vid = 0xFFFF; in __mlx4_ib_modify_qp()
2606 qp->pri.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2607 qp->pri.update_vid = 0; in __mlx4_ib_modify_qp()
2610 if (qp->alt.vid < 0x1000) { in __mlx4_ib_modify_qp()
2611 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); in __mlx4_ib_modify_qp()
2612 qp->alt.vid = 0xFFFF; in __mlx4_ib_modify_qp()
2613 qp->alt.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2614 qp->alt.update_vid = 0; in __mlx4_ib_modify_qp()
2618 if (err && qp->counter_index) in __mlx4_ib_modify_qp()
2619 mlx4_ib_free_qp_counter(dev, qp); in __mlx4_ib_modify_qp()
2621 mlx4_ib_steer_qp_reg(dev, qp, 0); in __mlx4_ib_modify_qp()
2623 if (qp->pri.candidate_smac || in __mlx4_ib_modify_qp()
2624 (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { in __mlx4_ib_modify_qp()
2626 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); in __mlx4_ib_modify_qp()
2628 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) in __mlx4_ib_modify_qp()
2629 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in __mlx4_ib_modify_qp()
2630 qp->pri.smac = qp->pri.candidate_smac; in __mlx4_ib_modify_qp()
2631 qp->pri.smac_index = qp->pri.candidate_smac_index; in __mlx4_ib_modify_qp()
2632 qp->pri.smac_port = qp->pri.candidate_smac_port; in __mlx4_ib_modify_qp()
2634 qp->pri.candidate_smac = 0; in __mlx4_ib_modify_qp()
2635 qp->pri.candidate_smac_index = 0; in __mlx4_ib_modify_qp()
2636 qp->pri.candidate_smac_port = 0; in __mlx4_ib_modify_qp()
2638 if (qp->alt.candidate_smac) { in __mlx4_ib_modify_qp()
2640 mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac); in __mlx4_ib_modify_qp()
2642 if (qp->alt.smac) in __mlx4_ib_modify_qp()
2643 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in __mlx4_ib_modify_qp()
2644 qp->alt.smac = qp->alt.candidate_smac; in __mlx4_ib_modify_qp()
2645 qp->alt.smac_index = qp->alt.candidate_smac_index; in __mlx4_ib_modify_qp()
2646 qp->alt.smac_port = qp->alt.candidate_smac_port; in __mlx4_ib_modify_qp()
2648 qp->alt.candidate_smac = 0; in __mlx4_ib_modify_qp()
2649 qp->alt.candidate_smac_index = 0; in __mlx4_ib_modify_qp()
2650 qp->alt.candidate_smac_port = 0; in __mlx4_ib_modify_qp()
2653 if (qp->pri.update_vid) { in __mlx4_ib_modify_qp()
2655 if (qp->pri.candidate_vid < 0x1000) in __mlx4_ib_modify_qp()
2656 mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port, in __mlx4_ib_modify_qp()
2657 qp->pri.candidate_vid); in __mlx4_ib_modify_qp()
2659 if (qp->pri.vid < 0x1000) in __mlx4_ib_modify_qp()
2660 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, in __mlx4_ib_modify_qp()
2661 qp->pri.vid); in __mlx4_ib_modify_qp()
2662 qp->pri.vid = qp->pri.candidate_vid; in __mlx4_ib_modify_qp()
2663 qp->pri.vlan_port = qp->pri.candidate_vlan_port; in __mlx4_ib_modify_qp()
2664 qp->pri.vlan_index = qp->pri.candidate_vlan_index; in __mlx4_ib_modify_qp()
2666 qp->pri.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2667 qp->pri.update_vid = 0; in __mlx4_ib_modify_qp()
2670 if (qp->alt.update_vid) { in __mlx4_ib_modify_qp()
2672 if (qp->alt.candidate_vid < 0x1000) in __mlx4_ib_modify_qp()
2673 mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port, in __mlx4_ib_modify_qp()
2674 qp->alt.candidate_vid); in __mlx4_ib_modify_qp()
2676 if (qp->alt.vid < 0x1000) in __mlx4_ib_modify_qp()
2677 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, in __mlx4_ib_modify_qp()
2678 qp->alt.vid); in __mlx4_ib_modify_qp()
2679 qp->alt.vid = qp->alt.candidate_vid; in __mlx4_ib_modify_qp()
2680 qp->alt.vlan_port = qp->alt.candidate_vlan_port; in __mlx4_ib_modify_qp()
2681 qp->alt.vlan_index = qp->alt.candidate_vlan_index; in __mlx4_ib_modify_qp()
2683 qp->alt.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2684 qp->alt.update_vid = 0; in __mlx4_ib_modify_qp()
2699 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_modify_qp() local
2702 mutex_lock(&qp->mutex); in _mlx4_ib_modify_qp()
2704 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; in _mlx4_ib_modify_qp()
2769 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in _mlx4_ib_modify_qp()
2819 mutex_unlock(&qp->mutex); in _mlx4_ib_modify_qp()
2860 static int build_sriov_qp0_header(struct mlx4_ib_qp *qp, in build_sriov_qp0_header() argument
2864 struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device); in build_sriov_qp0_header()
2865 struct mlx4_ib_sqp *sqp = qp->sqp; in build_sriov_qp0_header()
2866 struct ib_device *ib_dev = qp->ibqp.device; in build_sriov_qp0_header()
2888 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) in build_sriov_qp0_header()
2893 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { in build_sriov_qp0_header()
2910 err = ib_get_cached_pkey(ib_dev, qp->port, 0, &pkey); in build_sriov_qp0_header()
2914 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) in build_sriov_qp0_header()
2918 cpu_to_be32(mdev->dev->caps.spec_qps[qp->port - 1].qp0_tunnel); in build_sriov_qp0_header()
2922 if (mlx4_get_parav_qkey(mdev->dev, qp->mqp.qpn, &qkey)) in build_sriov_qp0_header()
2925 if (vf_get_qp0_qkey(mdev->dev, qp->mqp.qpn, &qkey)) in build_sriov_qp0_header()
2929 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->mqp.qpn); in build_sriov_qp0_header()
3013 static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr, in build_mlx_header() argument
3016 struct mlx4_ib_sqp *sqp = qp->sqp; in build_mlx_header()
3017 struct ib_device *ib_dev = qp->ibqp.device; in build_mlx_header()
3041 is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET; in build_mlx_header()
3055 err = fill_gid_by_hw_index(ibdev, qp->port, in build_mlx_header()
3107 .demux[qp->port - 1] in build_mlx_header()
3112 ->sriov.demux[qp->port - 1] in build_mlx_header()
3146 cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | in build_mlx_header()
3195 !qp->ibqp.qp_num ? in build_mlx_header()
3199 qp->port); in build_mlx_header()
3200 if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15) in build_mlx_header()
3206 if (!qp->ibqp.qp_num) in build_mlx_header()
3207 err = ib_get_cached_pkey(ib_dev, qp->port, sqp->pkey_index, in build_mlx_header()
3210 err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index, in build_mlx_header()
3220 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num); in build_mlx_header()
3481 const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp, in build_lso_seg() argument
3489 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && in build_lso_seg()
3490 wr->wr.num_sge > qp->sq.max_gs - (halign >> 4))) in build_lso_seg()
3525 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_post_send() local
3542 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) { in _mlx4_ib_post_send()
3543 struct mlx4_ib_sqp *sqp = qp->sqp; in _mlx4_ib_post_send()
3550 if (!fill_gid_by_hw_index(mdev, qp->port, in _mlx4_ib_post_send()
3553 qp = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? in _mlx4_ib_post_send()
3554 to_mqp(sqp->roce_v2_gsi) : qp; in _mlx4_ib_post_send()
3561 spin_lock_irqsave(&qp->sq.lock, flags); in _mlx4_ib_post_send()
3570 ind = qp->sq_next_wqe; in _mlx4_ib_post_send()
3576 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in _mlx4_ib_post_send()
3582 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in _mlx4_ib_post_send()
3588 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in _mlx4_ib_post_send()
3589 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in _mlx4_ib_post_send()
3599 qp->sq_signal_bits; in _mlx4_ib_post_send()
3606 switch (qp->mlx4_ib_qp_type) { in _mlx4_ib_post_send()
3670 err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, in _mlx4_ib_post_send()
3694 err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen, in _mlx4_ib_post_send()
3707 err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, in _mlx4_ib_post_send()
3731 qp->mlx4_ib_qp_type); in _mlx4_ib_post_send()
3741 err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen); in _mlx4_ib_post_send()
3766 if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in _mlx4_ib_post_send()
3767 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || in _mlx4_ib_post_send()
3768 qp->mlx4_ib_qp_type & in _mlx4_ib_post_send()
3802 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; in _mlx4_ib_post_send()
3810 stamp_send_wqe(qp, ind + qp->sq_spare_wqes); in _mlx4_ib_post_send()
3816 qp->sq.head += nreq; in _mlx4_ib_post_send()
3824 writel_relaxed(qp->doorbell_qpn, in _mlx4_ib_post_send()
3827 stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1); in _mlx4_ib_post_send()
3829 qp->sq_next_wqe = ind; in _mlx4_ib_post_send()
3832 spin_unlock_irqrestore(&qp->sq.lock, flags); in _mlx4_ib_post_send()
3846 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_post_recv() local
3856 max_gs = qp->rq.max_gs; in _mlx4_ib_post_recv()
3857 spin_lock_irqsave(&qp->rq.lock, flags); in _mlx4_ib_post_recv()
3867 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in _mlx4_ib_post_recv()
3870 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in _mlx4_ib_post_recv()
3876 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in _mlx4_ib_post_recv()
3882 scat = get_recv_wqe(qp, ind); in _mlx4_ib_post_recv()
3884 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in _mlx4_ib_post_recv()
3887 qp->sqp_proxy_rcv[ind].map, in _mlx4_ib_post_recv()
3894 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); in _mlx4_ib_post_recv()
3908 qp->rq.wrid[ind] = wr->wr_id; in _mlx4_ib_post_recv()
3910 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in _mlx4_ib_post_recv()
3915 qp->rq.head += nreq; in _mlx4_ib_post_recv()
3923 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); in _mlx4_ib_post_recv()
3926 spin_unlock_irqrestore(&qp->rq.lock, flags); in _mlx4_ib_post_recv()
4014 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_query_qp() local
4022 mutex_lock(&qp->mutex); in mlx4_ib_query_qp()
4024 if (qp->state == IB_QPS_RESET) { in mlx4_ib_query_qp()
4029 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); in mlx4_ib_query_qp()
4037 qp->state = to_ib_qp_state(mlx4_state); in mlx4_ib_query_qp()
4038 qp_attr->qp_state = qp->state; in mlx4_ib_query_qp()
4049 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC || in mlx4_ib_query_qp()
4050 qp->ibqp.qp_type == IB_QPT_XRC_INI || in mlx4_ib_query_qp()
4051 qp->ibqp.qp_type == IB_QPT_XRC_TGT) { in mlx4_ib_query_qp()
4061 qp_attr->port_num = qp->port; in mlx4_ib_query_qp()
4081 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; in mlx4_ib_query_qp()
4082 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mlx4_ib_query_qp()
4085 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; in mlx4_ib_query_qp()
4086 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mlx4_ib_query_qp()
4101 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) in mlx4_ib_query_qp()
4104 if (qp->flags & MLX4_IB_QP_LSO) in mlx4_ib_query_qp()
4107 if (qp->flags & MLX4_IB_QP_NETIF) in mlx4_ib_query_qp()
4111 qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? in mlx4_ib_query_qp()
4115 mutex_unlock(&qp->mutex); in mlx4_ib_query_qp()
4125 struct mlx4_ib_qp *qp; in mlx4_ib_create_wq() local
4161 qp = kzalloc(sizeof(*qp), GFP_KERNEL); in mlx4_ib_create_wq()
4162 if (!qp) in mlx4_ib_create_wq()
4165 mutex_init(&qp->mutex); in mlx4_ib_create_wq()
4166 qp->pri.vid = 0xFFFF; in mlx4_ib_create_wq()
4167 qp->alt.vid = 0xFFFF; in mlx4_ib_create_wq()
4179 err = create_rq(pd, &ib_qp_init_attr, udata, qp); in mlx4_ib_create_wq()
4181 kfree(qp); in mlx4_ib_create_wq()
4185 qp->ibwq.event_handler = init_attr->event_handler; in mlx4_ib_create_wq()
4186 qp->ibwq.wq_num = qp->mqp.qpn; in mlx4_ib_create_wq()
4187 qp->ibwq.state = IB_WQS_RESET; in mlx4_ib_create_wq()
4189 return &qp->ibwq; in mlx4_ib_create_wq()
4207 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); in _mlx4_ib_modify_wq() local
4216 qp_cur_state = qp->state; in _mlx4_ib_modify_wq()
4225 attr.port_num = qp->port; in _mlx4_ib_modify_wq()
4255 qp->state = qp_new_state; in _mlx4_ib_modify_wq()
4263 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); in mlx4_ib_modify_wq() local
4300 mutex_lock(&qp->mutex); in mlx4_ib_modify_wq()
4305 if (qp->rss_usecnt) in mlx4_ib_modify_wq()
4311 mutex_unlock(&qp->mutex); in mlx4_ib_modify_wq()
4319 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); in mlx4_ib_destroy_wq() local
4321 if (qp->counter_index) in mlx4_ib_destroy_wq()
4322 mlx4_ib_free_qp_counter(dev, qp); in mlx4_ib_destroy_wq()
4324 destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata); in mlx4_ib_destroy_wq()
4326 kfree(qp); in mlx4_ib_destroy_wq()
4446 void mlx4_ib_drain_sq(struct ib_qp *qp) in mlx4_ib_drain_sq() argument
4448 struct ib_cq *cq = qp->send_cq; in mlx4_ib_drain_sq()
4460 struct mlx4_ib_dev *dev = to_mdev(qp->device); in mlx4_ib_drain_sq()
4463 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); in mlx4_ib_drain_sq()
4472 ret = _mlx4_ib_post_send(qp, &swr.wr, &bad_swr, true); in mlx4_ib_drain_sq()
4481 void mlx4_ib_drain_rq(struct ib_qp *qp) in mlx4_ib_drain_rq() argument
4483 struct ib_cq *cq = qp->recv_cq; in mlx4_ib_drain_rq()
4489 struct mlx4_ib_dev *dev = to_mdev(qp->device); in mlx4_ib_drain_rq()
4492 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); in mlx4_ib_drain_rq()
4502 ret = _mlx4_ib_post_recv(qp, &rwr, &bad_rwr, true); in mlx4_ib_drain_rq()