xref: /openbmc/linux/drivers/infiniband/hw/mlx5/srq.c (revision e0deb0e9)
16cd0014aSLeon Romanovsky // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2e126ba97SEli Cohen /*
36cd0014aSLeon Romanovsky  * Copyright (c) 2013-2018, Mellanox Technologies inc.  All rights reserved.
4e126ba97SEli Cohen  */
5e126ba97SEli Cohen 
6e126ba97SEli Cohen #include <linux/mlx5/qp.h>
7e126ba97SEli Cohen #include <linux/slab.h>
8e126ba97SEli Cohen #include <rdma/ib_umem.h>
943bc8893SYann Droneaud #include <rdma/ib_user_verbs.h>
10e126ba97SEli Cohen #include "mlx5_ib.h"
11f02d0d6eSLeon Romanovsky #include "srq.h"
12e126ba97SEli Cohen 
get_wqe(struct mlx5_ib_srq * srq,int n)13e126ba97SEli Cohen static void *get_wqe(struct mlx5_ib_srq *srq, int n)
14e126ba97SEli Cohen {
1520e5a59bSGuy Levi 	return mlx5_frag_buf_get_wqe(&srq->fbc, n);
16e126ba97SEli Cohen }
17e126ba97SEli Cohen 
mlx5_ib_srq_event(struct mlx5_core_srq * srq,enum mlx5_event type)18e126ba97SEli Cohen static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
19e126ba97SEli Cohen {
20e126ba97SEli Cohen 	struct ib_event event;
21e126ba97SEli Cohen 	struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
22e126ba97SEli Cohen 
23e126ba97SEli Cohen 	if (ibsrq->event_handler) {
24e126ba97SEli Cohen 		event.device      = ibsrq->device;
25e126ba97SEli Cohen 		event.element.srq = ibsrq;
26e126ba97SEli Cohen 		switch (type) {
27e126ba97SEli Cohen 		case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
28e126ba97SEli Cohen 			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
29e126ba97SEli Cohen 			break;
30e126ba97SEli Cohen 		case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
31e126ba97SEli Cohen 			event.event = IB_EVENT_SRQ_ERR;
32e126ba97SEli Cohen 			break;
33e126ba97SEli Cohen 		default:
34e126ba97SEli Cohen 			pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
35e126ba97SEli Cohen 				type, srq->srqn);
36e126ba97SEli Cohen 			return;
37e126ba97SEli Cohen 		}
38e126ba97SEli Cohen 
39e126ba97SEli Cohen 		ibsrq->event_handler(&event, ibsrq->srq_context);
40e126ba97SEli Cohen 	}
41e126ba97SEli Cohen }
42e126ba97SEli Cohen 
create_srq_user(struct ib_pd * pd,struct mlx5_ib_srq * srq,struct mlx5_srq_attr * in,struct ib_udata * udata,int buf_size)43e126ba97SEli Cohen static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
44af1ba291SArtemy Kovalyov 			   struct mlx5_srq_attr *in,
45af1ba291SArtemy Kovalyov 			   struct ib_udata *udata, int buf_size)
46e126ba97SEli Cohen {
47e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
48cfb5e088SHaggai Abramovsky 	struct mlx5_ib_create_srq ucmd = {};
4989944450SShamir Rabinovitch 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
5089944450SShamir Rabinovitch 		udata, struct mlx5_ib_ucontext, ibucontext);
5143bc8893SYann Droneaud 	size_t ucmdlen;
52e126ba97SEli Cohen 	int err;
53cfb5e088SHaggai Abramovsky 	u32 uidx = MLX5_IB_DEFAULT_UIDX;
54e126ba97SEli Cohen 
553d943c9dSMajd Dibbiny 	ucmdlen = min(udata->inlen, sizeof(ucmd));
5643bc8893SYann Droneaud 
5743bc8893SYann Droneaud 	if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
58e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "failed copy udata\n");
59e126ba97SEli Cohen 		return -EFAULT;
60e126ba97SEli Cohen 	}
6143bc8893SYann Droneaud 
62cfb5e088SHaggai Abramovsky 	if (ucmd.reserved0 || ucmd.reserved1)
6343bc8893SYann Droneaud 		return -EINVAL;
6443bc8893SYann Droneaud 
653d943c9dSMajd Dibbiny 	if (udata->inlen > sizeof(ucmd) &&
66cfb5e088SHaggai Abramovsky 	    !ib_is_udata_cleared(udata, sizeof(ucmd),
673d943c9dSMajd Dibbiny 				 udata->inlen - sizeof(ucmd)))
68cfb5e088SHaggai Abramovsky 		return -EINVAL;
69cfb5e088SHaggai Abramovsky 
703fd3307eSArtemy Kovalyov 	if (in->type != IB_SRQT_BASIC) {
7189944450SShamir Rabinovitch 		err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx);
72cfb5e088SHaggai Abramovsky 		if (err)
73cfb5e088SHaggai Abramovsky 			return err;
7485d9691cSMajd Dibbiny 	}
75cfb5e088SHaggai Abramovsky 
76e126ba97SEli Cohen 	srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
77e126ba97SEli Cohen 
78c320e527SMoni Shoua 	srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0);
79e126ba97SEli Cohen 	if (IS_ERR(srq->umem)) {
80e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
81e126ba97SEli Cohen 		err = PTR_ERR(srq->umem);
82e126ba97SEli Cohen 		return err;
83e126ba97SEli Cohen 	}
848a7904a6SJason Gunthorpe 	in->umem = srq->umem;
85e126ba97SEli Cohen 
860bedd3d0SLang Cheng 	err = mlx5_ib_db_map_user(ucontext, ucmd.db_addr, &srq->db);
87e126ba97SEli Cohen 	if (err) {
88e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "map doorbell failed\n");
898a7904a6SJason Gunthorpe 		goto err_umem;
90e126ba97SEli Cohen 	}
91e126ba97SEli Cohen 
925aa3771dSYishai Hadas 	in->uid = (in->type != IB_SRQT_XRC) ?  to_mpd(pd)->uid : 0;
93af1ba291SArtemy Kovalyov 	if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
943fd3307eSArtemy Kovalyov 	    in->type != IB_SRQT_BASIC)
95af1ba291SArtemy Kovalyov 		in->user_index = uidx;
96cfb5e088SHaggai Abramovsky 
97e126ba97SEli Cohen 	return 0;
98e126ba97SEli Cohen 
99e126ba97SEli Cohen err_umem:
100e126ba97SEli Cohen 	ib_umem_release(srq->umem);
101e126ba97SEli Cohen 
102e126ba97SEli Cohen 	return err;
103e126ba97SEli Cohen }
104e126ba97SEli Cohen 
create_srq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_srq * srq,struct mlx5_srq_attr * in,int buf_size)105e126ba97SEli Cohen static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
106af1ba291SArtemy Kovalyov 			     struct mlx5_srq_attr *in, int buf_size)
107e126ba97SEli Cohen {
108e126ba97SEli Cohen 	int err;
109e126ba97SEli Cohen 	int i;
110e126ba97SEli Cohen 	struct mlx5_wqe_srq_next_seg *next;
111e126ba97SEli Cohen 
1129603b61dSJack Morgenstein 	err = mlx5_db_alloc(dev->mdev, &srq->db);
113e126ba97SEli Cohen 	if (err) {
114e126ba97SEli Cohen 		mlx5_ib_warn(dev, "alloc dbell rec failed\n");
115e126ba97SEli Cohen 		return err;
116e126ba97SEli Cohen 	}
117e126ba97SEli Cohen 
11820e5a59bSGuy Levi 	if (mlx5_frag_buf_alloc_node(dev->mdev, buf_size, &srq->buf,
11920e5a59bSGuy Levi 				     dev->mdev->priv.numa_node)) {
120e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "buf alloc failed\n");
121e126ba97SEli Cohen 		err = -ENOMEM;
122e126ba97SEli Cohen 		goto err_db;
123e126ba97SEli Cohen 	}
124e126ba97SEli Cohen 
12520e5a59bSGuy Levi 	mlx5_init_fbc(srq->buf.frags, srq->msrq.wqe_shift, ilog2(srq->msrq.max),
12620e5a59bSGuy Levi 		      &srq->fbc);
12720e5a59bSGuy Levi 
128e126ba97SEli Cohen 	srq->head    = 0;
129e126ba97SEli Cohen 	srq->tail    = srq->msrq.max - 1;
130e126ba97SEli Cohen 	srq->wqe_ctr = 0;
131e126ba97SEli Cohen 
132e126ba97SEli Cohen 	for (i = 0; i < srq->msrq.max; i++) {
133e126ba97SEli Cohen 		next = get_wqe(srq, i);
134e126ba97SEli Cohen 		next->next_wqe_index =
135e126ba97SEli Cohen 			cpu_to_be16((i + 1) & (srq->msrq.max - 1));
136e126ba97SEli Cohen 	}
137e126ba97SEli Cohen 
1380fd27a88SLeon Romanovsky 	mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
139778e1cddSKees Cook 	in->pas = kvcalloc(srq->buf.npages, sizeof(*in->pas), GFP_KERNEL);
140af1ba291SArtemy Kovalyov 	if (!in->pas) {
141e126ba97SEli Cohen 		err = -ENOMEM;
142e126ba97SEli Cohen 		goto err_buf;
143e126ba97SEli Cohen 	}
14420e5a59bSGuy Levi 	mlx5_fill_page_frag_array(&srq->buf, in->pas);
145e126ba97SEli Cohen 
146b5883008SLi Dongyang 	srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL);
147e126ba97SEli Cohen 	if (!srq->wrid) {
148e126ba97SEli Cohen 		err = -ENOMEM;
149e126ba97SEli Cohen 		goto err_in;
150e126ba97SEli Cohen 	}
151c48d386bSLeon Romanovsky 	srq->wq_sig = 0;
152e126ba97SEli Cohen 
1530fd27a88SLeon Romanovsky 	in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
154af1ba291SArtemy Kovalyov 	if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
1553fd3307eSArtemy Kovalyov 	    in->type != IB_SRQT_BASIC)
156af1ba291SArtemy Kovalyov 		in->user_index = MLX5_IB_DEFAULT_UIDX;
157cfb5e088SHaggai Abramovsky 
158e126ba97SEli Cohen 	return 0;
159e126ba97SEli Cohen 
160e126ba97SEli Cohen err_in:
161af1ba291SArtemy Kovalyov 	kvfree(in->pas);
162e126ba97SEli Cohen 
163e126ba97SEli Cohen err_buf:
16420e5a59bSGuy Levi 	mlx5_frag_buf_free(dev->mdev, &srq->buf);
165e126ba97SEli Cohen 
166e126ba97SEli Cohen err_db:
1679603b61dSJack Morgenstein 	mlx5_db_free(dev->mdev, &srq->db);
168e126ba97SEli Cohen 	return err;
169e126ba97SEli Cohen }
170e126ba97SEli Cohen 
destroy_srq_user(struct ib_pd * pd,struct mlx5_ib_srq * srq,struct ib_udata * udata)171bdeacabdSShamir Rabinovitch static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
172bdeacabdSShamir Rabinovitch 			     struct ib_udata *udata)
173e126ba97SEli Cohen {
174bdeacabdSShamir Rabinovitch 	mlx5_ib_db_unmap_user(
175bdeacabdSShamir Rabinovitch 		rdma_udata_to_drv_context(
176bdeacabdSShamir Rabinovitch 			udata,
177bdeacabdSShamir Rabinovitch 			struct mlx5_ib_ucontext,
178bdeacabdSShamir Rabinovitch 			ibucontext),
179bdeacabdSShamir Rabinovitch 		&srq->db);
180e126ba97SEli Cohen 	ib_umem_release(srq->umem);
181e126ba97SEli Cohen }
182e126ba97SEli Cohen 
183e126ba97SEli Cohen 
destroy_srq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_srq * srq)184e126ba97SEli Cohen static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
185e126ba97SEli Cohen {
186b5883008SLi Dongyang 	kvfree(srq->wrid);
18720e5a59bSGuy Levi 	mlx5_frag_buf_free(dev->mdev, &srq->buf);
1889603b61dSJack Morgenstein 	mlx5_db_free(dev->mdev, &srq->db);
189e126ba97SEli Cohen }
190e126ba97SEli Cohen 
mlx5_ib_create_srq(struct ib_srq * ib_srq,struct ib_srq_init_attr * init_attr,struct ib_udata * udata)19168e326deSLeon Romanovsky int mlx5_ib_create_srq(struct ib_srq *ib_srq,
192e126ba97SEli Cohen 		       struct ib_srq_init_attr *init_attr,
193e126ba97SEli Cohen 		       struct ib_udata *udata)
194e126ba97SEli Cohen {
19568e326deSLeon Romanovsky 	struct mlx5_ib_dev *dev = to_mdev(ib_srq->device);
19668e326deSLeon Romanovsky 	struct mlx5_ib_srq *srq = to_msrq(ib_srq);
197c2b37f76SBoris Pismenny 	size_t desc_size;
198c2b37f76SBoris Pismenny 	size_t buf_size;
199e126ba97SEli Cohen 	int err;
20068e326deSLeon Romanovsky 	struct mlx5_srq_attr in = {};
201938fe83cSSaeed Mahameed 	__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
202*e0deb0e9SPatrisious Haddad 	__u32 max_sge_sz =  MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
203*e0deb0e9SPatrisious Haddad 			    sizeof(struct mlx5_wqe_data_seg);
204e126ba97SEli Cohen 
205652caba5SJason Gunthorpe 	if (init_attr->srq_type != IB_SRQT_BASIC &&
206652caba5SJason Gunthorpe 	    init_attr->srq_type != IB_SRQT_XRC &&
207652caba5SJason Gunthorpe 	    init_attr->srq_type != IB_SRQT_TM)
208652caba5SJason Gunthorpe 		return -EOPNOTSUPP;
209652caba5SJason Gunthorpe 
210*e0deb0e9SPatrisious Haddad 	/* Sanity check SRQ and sge size before proceeding */
211*e0deb0e9SPatrisious Haddad 	if (init_attr->attr.max_wr >= max_srq_wqes ||
212*e0deb0e9SPatrisious Haddad 	    init_attr->attr.max_sge > max_sge_sz) {
213*e0deb0e9SPatrisious Haddad 		mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
214*e0deb0e9SPatrisious Haddad 			    init_attr->attr.max_wr, max_srq_wqes,
215*e0deb0e9SPatrisious Haddad 			    init_attr->attr.max_sge, max_sge_sz);
21668e326deSLeon Romanovsky 		return -EINVAL;
217e126ba97SEli Cohen 	}
218e126ba97SEli Cohen 
219e126ba97SEli Cohen 	mutex_init(&srq->mutex);
220e126ba97SEli Cohen 	spin_lock_init(&srq->lock);
221e126ba97SEli Cohen 	srq->msrq.max    = roundup_pow_of_two(init_attr->attr.max_wr + 1);
222e126ba97SEli Cohen 	srq->msrq.max_gs = init_attr->attr.max_sge;
223e126ba97SEli Cohen 
224e126ba97SEli Cohen 	desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
225e126ba97SEli Cohen 		    srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
22668e326deSLeon Romanovsky 	if (desc_size == 0 || srq->msrq.max_gs > desc_size)
22768e326deSLeon Romanovsky 		return -EINVAL;
22868e326deSLeon Romanovsky 
229e126ba97SEli Cohen 	desc_size = roundup_pow_of_two(desc_size);
230c2b37f76SBoris Pismenny 	desc_size = max_t(size_t, 32, desc_size);
23168e326deSLeon Romanovsky 	if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
23268e326deSLeon Romanovsky 		return -EINVAL;
23368e326deSLeon Romanovsky 
234e126ba97SEli Cohen 	srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
235e126ba97SEli Cohen 		sizeof(struct mlx5_wqe_data_seg);
236e126ba97SEli Cohen 	srq->msrq.wqe_shift = ilog2(desc_size);
237e126ba97SEli Cohen 	buf_size = srq->msrq.max * desc_size;
23868e326deSLeon Romanovsky 	if (buf_size < desc_size)
23968e326deSLeon Romanovsky 		return -EINVAL;
24068e326deSLeon Romanovsky 
241c73b7911SMaor Gottlieb 	in.type = init_attr->srq_type;
242e126ba97SEli Cohen 
243e00b64f7SShamir Rabinovitch 	if (udata)
24468e326deSLeon Romanovsky 		err = create_srq_user(ib_srq->pd, srq, &in, udata, buf_size);
245e126ba97SEli Cohen 	else
246af1ba291SArtemy Kovalyov 		err = create_srq_kernel(dev, srq, &in, buf_size);
247e126ba97SEli Cohen 
248e126ba97SEli Cohen 	if (err) {
249e126ba97SEli Cohen 		mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
250e00b64f7SShamir Rabinovitch 			     udata ? "user" : "kernel", err);
25168e326deSLeon Romanovsky 		return err;
252e126ba97SEli Cohen 	}
253e126ba97SEli Cohen 
254af1ba291SArtemy Kovalyov 	in.log_size = ilog2(srq->msrq.max);
255af1ba291SArtemy Kovalyov 	in.wqe_shift = srq->msrq.wqe_shift - 4;
256af1ba291SArtemy Kovalyov 	if (srq->wq_sig)
257af1ba291SArtemy Kovalyov 		in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
2581a56ff6dSArtemy Kovalyov 
259f4375443SLeon Romanovsky 	if (init_attr->srq_type == IB_SRQT_XRC && init_attr->ext.xrc.xrcd)
260af1ba291SArtemy Kovalyov 		in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
2611a56ff6dSArtemy Kovalyov 	else
262f4375443SLeon Romanovsky 		in.xrcd = dev->devr.xrcdn0;
2631a56ff6dSArtemy Kovalyov 
2643fd3307eSArtemy Kovalyov 	if (init_attr->srq_type == IB_SRQT_TM) {
2653fd3307eSArtemy Kovalyov 		in.tm_log_list_size =
2663fd3307eSArtemy Kovalyov 			ilog2(init_attr->ext.tag_matching.max_num_tags) + 1;
2673fd3307eSArtemy Kovalyov 		if (in.tm_log_list_size >
2683fd3307eSArtemy Kovalyov 		    MLX5_CAP_GEN(dev->mdev, log_tag_matching_list_sz)) {
2693fd3307eSArtemy Kovalyov 			mlx5_ib_dbg(dev, "TM SRQ max_num_tags exceeding limit\n");
2703fd3307eSArtemy Kovalyov 			err = -EINVAL;
2713fd3307eSArtemy Kovalyov 			goto err_usr_kern_srq;
2723fd3307eSArtemy Kovalyov 		}
2733fd3307eSArtemy Kovalyov 		in.flags |= MLX5_SRQ_FLAG_RNDV;
2743fd3307eSArtemy Kovalyov 	}
2753fd3307eSArtemy Kovalyov 
2761a56ff6dSArtemy Kovalyov 	if (ib_srq_has_cq(init_attr->srq_type))
2771a56ff6dSArtemy Kovalyov 		in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn;
2781a56ff6dSArtemy Kovalyov 	else
279af1ba291SArtemy Kovalyov 		in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
280e126ba97SEli Cohen 
28168e326deSLeon Romanovsky 	in.pd = to_mpd(ib_srq->pd)->pdn;
282af1ba291SArtemy Kovalyov 	in.db_record = srq->db.dma;
283b4990804SLeon Romanovsky 	err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
284af1ba291SArtemy Kovalyov 	kvfree(in.pas);
285e126ba97SEli Cohen 	if (err) {
286e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
28756e1ab0fSMoshe Lazer 		goto err_usr_kern_srq;
288e126ba97SEli Cohen 	}
289e126ba97SEli Cohen 
290e126ba97SEli Cohen 	mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
291e126ba97SEli Cohen 
292e126ba97SEli Cohen 	srq->msrq.event = mlx5_ib_srq_event;
293e126ba97SEli Cohen 	srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
294e126ba97SEli Cohen 
295cf26deffSYishai Hadas 	if (udata) {
296cf26deffSYishai Hadas 		struct mlx5_ib_create_srq_resp resp = {
297cf26deffSYishai Hadas 			.srqn = srq->msrq.srqn,
298cf26deffSYishai Hadas 		};
299cf26deffSYishai Hadas 
300cf26deffSYishai Hadas 		if (ib_copy_to_udata(udata, &resp, min(udata->outlen,
301cf26deffSYishai Hadas 				     sizeof(resp)))) {
302e126ba97SEli Cohen 			mlx5_ib_dbg(dev, "copy to user failed\n");
303e126ba97SEli Cohen 			err = -EFAULT;
304e126ba97SEli Cohen 			goto err_core;
305e126ba97SEli Cohen 		}
306cf26deffSYishai Hadas 	}
307e126ba97SEli Cohen 
308e126ba97SEli Cohen 	init_attr->attr.max_wr = srq->msrq.max - 1;
309e126ba97SEli Cohen 
31068e326deSLeon Romanovsky 	return 0;
311e126ba97SEli Cohen 
312e126ba97SEli Cohen err_core:
313b4990804SLeon Romanovsky 	mlx5_cmd_destroy_srq(dev, &srq->msrq);
31456e1ab0fSMoshe Lazer 
31556e1ab0fSMoshe Lazer err_usr_kern_srq:
316e00b64f7SShamir Rabinovitch 	if (udata)
31768e326deSLeon Romanovsky 		destroy_srq_user(ib_srq->pd, srq, udata);
318e126ba97SEli Cohen 	else
319e126ba97SEli Cohen 		destroy_srq_kernel(dev, srq);
320e126ba97SEli Cohen 
32168e326deSLeon Romanovsky 	return err;
322e126ba97SEli Cohen }
323e126ba97SEli Cohen 
mlx5_ib_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask attr_mask,struct ib_udata * udata)324e126ba97SEli Cohen int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
325e126ba97SEli Cohen 		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
326e126ba97SEli Cohen {
327e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
328e126ba97SEli Cohen 	struct mlx5_ib_srq *srq = to_msrq(ibsrq);
329e126ba97SEli Cohen 	int ret;
330e126ba97SEli Cohen 
331e126ba97SEli Cohen 	/* We don't support resizing SRQs yet */
332e126ba97SEli Cohen 	if (attr_mask & IB_SRQ_MAX_WR)
333e126ba97SEli Cohen 		return -EINVAL;
334e126ba97SEli Cohen 
335e126ba97SEli Cohen 	if (attr_mask & IB_SRQ_LIMIT) {
336e126ba97SEli Cohen 		if (attr->srq_limit >= srq->msrq.max)
337e126ba97SEli Cohen 			return -EINVAL;
338e126ba97SEli Cohen 
339e126ba97SEli Cohen 		mutex_lock(&srq->mutex);
340b4990804SLeon Romanovsky 		ret = mlx5_cmd_arm_srq(dev, &srq->msrq, attr->srq_limit, 1);
341e126ba97SEli Cohen 		mutex_unlock(&srq->mutex);
342e126ba97SEli Cohen 
343e126ba97SEli Cohen 		if (ret)
344e126ba97SEli Cohen 			return ret;
345e126ba97SEli Cohen 	}
346e126ba97SEli Cohen 
347e126ba97SEli Cohen 	return 0;
348e126ba97SEli Cohen }
349e126ba97SEli Cohen 
mlx5_ib_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * srq_attr)350e126ba97SEli Cohen int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
351e126ba97SEli Cohen {
352e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
353e126ba97SEli Cohen 	struct mlx5_ib_srq *srq = to_msrq(ibsrq);
354e126ba97SEli Cohen 	int ret;
355af1ba291SArtemy Kovalyov 	struct mlx5_srq_attr *out;
356e126ba97SEli Cohen 
357e126ba97SEli Cohen 	out = kzalloc(sizeof(*out), GFP_KERNEL);
358e126ba97SEli Cohen 	if (!out)
359e126ba97SEli Cohen 		return -ENOMEM;
360e126ba97SEli Cohen 
361b4990804SLeon Romanovsky 	ret = mlx5_cmd_query_srq(dev, &srq->msrq, out);
362e126ba97SEli Cohen 	if (ret)
363e126ba97SEli Cohen 		goto out_box;
364e126ba97SEli Cohen 
365af1ba291SArtemy Kovalyov 	srq_attr->srq_limit = out->lwm;
366e126ba97SEli Cohen 	srq_attr->max_wr    = srq->msrq.max - 1;
367e126ba97SEli Cohen 	srq_attr->max_sge   = srq->msrq.max_gs;
368e126ba97SEli Cohen 
369e126ba97SEli Cohen out_box:
370e126ba97SEli Cohen 	kfree(out);
371e126ba97SEli Cohen 	return ret;
372e126ba97SEli Cohen }
373e126ba97SEli Cohen 
mlx5_ib_destroy_srq(struct ib_srq * srq,struct ib_udata * udata)374119181d1SLeon Romanovsky int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
375e126ba97SEli Cohen {
376e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(srq->device);
377e126ba97SEli Cohen 	struct mlx5_ib_srq *msrq = to_msrq(srq);
378119181d1SLeon Romanovsky 	int ret;
379e126ba97SEli Cohen 
380119181d1SLeon Romanovsky 	ret = mlx5_cmd_destroy_srq(dev, &msrq->msrq);
381119181d1SLeon Romanovsky 	if (ret)
382119181d1SLeon Romanovsky 		return ret;
383e126ba97SEli Cohen 
384119181d1SLeon Romanovsky 	if (udata)
385119181d1SLeon Romanovsky 		destroy_srq_user(srq->pd, msrq, udata);
386119181d1SLeon Romanovsky 	else
3871faacf82SEli Cohen 		destroy_srq_kernel(dev, msrq);
388119181d1SLeon Romanovsky 	return 0;
389e126ba97SEli Cohen }
390e126ba97SEli Cohen 
mlx5_ib_free_srq_wqe(struct mlx5_ib_srq * srq,int wqe_index)391e126ba97SEli Cohen void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
392e126ba97SEli Cohen {
393e126ba97SEli Cohen 	struct mlx5_wqe_srq_next_seg *next;
394e126ba97SEli Cohen 
395e126ba97SEli Cohen 	/* always called with interrupts disabled. */
396e126ba97SEli Cohen 	spin_lock(&srq->lock);
397e126ba97SEli Cohen 
398e126ba97SEli Cohen 	next = get_wqe(srq, srq->tail);
399e126ba97SEli Cohen 	next->next_wqe_index = cpu_to_be16(wqe_index);
400e126ba97SEli Cohen 	srq->tail = wqe_index;
401e126ba97SEli Cohen 
402e126ba97SEli Cohen 	spin_unlock(&srq->lock);
403e126ba97SEli Cohen }
404e126ba97SEli Cohen 
mlx5_ib_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)405d34ac5cdSBart Van Assche int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
406d34ac5cdSBart Van Assche 			  const struct ib_recv_wr **bad_wr)
407e126ba97SEli Cohen {
408e126ba97SEli Cohen 	struct mlx5_ib_srq *srq = to_msrq(ibsrq);
409e126ba97SEli Cohen 	struct mlx5_wqe_srq_next_seg *next;
410e126ba97SEli Cohen 	struct mlx5_wqe_data_seg *scat;
41189ea94a7SMaor Gottlieb 	struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
41289ea94a7SMaor Gottlieb 	struct mlx5_core_dev *mdev = dev->mdev;
413e126ba97SEli Cohen 	unsigned long flags;
414e126ba97SEli Cohen 	int err = 0;
415e126ba97SEli Cohen 	int nreq;
416e126ba97SEli Cohen 	int i;
417e126ba97SEli Cohen 
418e126ba97SEli Cohen 	spin_lock_irqsave(&srq->lock, flags);
419e126ba97SEli Cohen 
42089ea94a7SMaor Gottlieb 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
42189ea94a7SMaor Gottlieb 		err = -EIO;
42289ea94a7SMaor Gottlieb 		*bad_wr = wr;
42389ea94a7SMaor Gottlieb 		goto out;
42489ea94a7SMaor Gottlieb 	}
42589ea94a7SMaor Gottlieb 
426e126ba97SEli Cohen 	for (nreq = 0; wr; nreq++, wr = wr->next) {
427e126ba97SEli Cohen 		if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
428e126ba97SEli Cohen 			err = -EINVAL;
429e126ba97SEli Cohen 			*bad_wr = wr;
430e126ba97SEli Cohen 			break;
431e126ba97SEli Cohen 		}
432e126ba97SEli Cohen 
433e126ba97SEli Cohen 		if (unlikely(srq->head == srq->tail)) {
434e126ba97SEli Cohen 			err = -ENOMEM;
435e126ba97SEli Cohen 			*bad_wr = wr;
436e126ba97SEli Cohen 			break;
437e126ba97SEli Cohen 		}
438e126ba97SEli Cohen 
439e126ba97SEli Cohen 		srq->wrid[srq->head] = wr->wr_id;
440e126ba97SEli Cohen 
441e126ba97SEli Cohen 		next      = get_wqe(srq, srq->head);
442e126ba97SEli Cohen 		srq->head = be16_to_cpu(next->next_wqe_index);
443e126ba97SEli Cohen 		scat      = (struct mlx5_wqe_data_seg *)(next + 1);
444e126ba97SEli Cohen 
445e126ba97SEli Cohen 		for (i = 0; i < wr->num_sge; i++) {
446e126ba97SEli Cohen 			scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
447e126ba97SEli Cohen 			scat[i].lkey       = cpu_to_be32(wr->sg_list[i].lkey);
448e126ba97SEli Cohen 			scat[i].addr       = cpu_to_be64(wr->sg_list[i].addr);
449e126ba97SEli Cohen 		}
450e126ba97SEli Cohen 
451e126ba97SEli Cohen 		if (i < srq->msrq.max_avail_gather) {
452e126ba97SEli Cohen 			scat[i].byte_count = 0;
453594cac11SOr Har-Toov 			scat[i].lkey = dev->mkeys.terminate_scatter_list_mkey;
454e126ba97SEli Cohen 			scat[i].addr       = 0;
455e126ba97SEli Cohen 		}
456e126ba97SEli Cohen 	}
457e126ba97SEli Cohen 
458e126ba97SEli Cohen 	if (likely(nreq)) {
459e126ba97SEli Cohen 		srq->wqe_ctr += nreq;
460e126ba97SEli Cohen 
461e126ba97SEli Cohen 		/* Make sure that descriptors are written before
462e126ba97SEli Cohen 		 * doorbell record.
463e126ba97SEli Cohen 		 */
464e126ba97SEli Cohen 		wmb();
465e126ba97SEli Cohen 
466e126ba97SEli Cohen 		*srq->db.db = cpu_to_be32(srq->wqe_ctr);
467e126ba97SEli Cohen 	}
46889ea94a7SMaor Gottlieb out:
469e126ba97SEli Cohen 	spin_unlock_irqrestore(&srq->lock, flags);
470e126ba97SEli Cohen 
471e126ba97SEli Cohen 	return err;
472e126ba97SEli Cohen }
473