xref: /openbmc/linux/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c (revision cdd38c5f1ce4398ec58fec95904b75824daab7b5)
18b10ba78SBryan Tan /*
28b10ba78SBryan Tan  * Copyright (c) 2016-2017 VMware, Inc.  All rights reserved.
38b10ba78SBryan Tan  *
48b10ba78SBryan Tan  * This program is free software; you can redistribute it and/or
58b10ba78SBryan Tan  * modify it under the terms of EITHER the GNU General Public License
68b10ba78SBryan Tan  * version 2 as published by the Free Software Foundation or the BSD
78b10ba78SBryan Tan  * 2-Clause License. This program is distributed in the hope that it
88b10ba78SBryan Tan  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
98b10ba78SBryan Tan  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
108b10ba78SBryan Tan  * See the GNU General Public License version 2 for more details at
118b10ba78SBryan Tan  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
128b10ba78SBryan Tan  *
138b10ba78SBryan Tan  * You should have received a copy of the GNU General Public License
148b10ba78SBryan Tan  * along with this program available in the file COPYING in the main
158b10ba78SBryan Tan  * directory of this source tree.
168b10ba78SBryan Tan  *
178b10ba78SBryan Tan  * The BSD 2-Clause License
188b10ba78SBryan Tan  *
198b10ba78SBryan Tan  *     Redistribution and use in source and binary forms, with or
208b10ba78SBryan Tan  *     without modification, are permitted provided that the following
218b10ba78SBryan Tan  *     conditions are met:
228b10ba78SBryan Tan  *
238b10ba78SBryan Tan  *      - Redistributions of source code must retain the above
248b10ba78SBryan Tan  *        copyright notice, this list of conditions and the following
258b10ba78SBryan Tan  *        disclaimer.
268b10ba78SBryan Tan  *
278b10ba78SBryan Tan  *      - Redistributions in binary form must reproduce the above
288b10ba78SBryan Tan  *        copyright notice, this list of conditions and the following
298b10ba78SBryan Tan  *        disclaimer in the documentation and/or other materials
308b10ba78SBryan Tan  *        provided with the distribution.
318b10ba78SBryan Tan  *
328b10ba78SBryan Tan  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
338b10ba78SBryan Tan  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
348b10ba78SBryan Tan  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
358b10ba78SBryan Tan  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
368b10ba78SBryan Tan  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
378b10ba78SBryan Tan  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
388b10ba78SBryan Tan  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
398b10ba78SBryan Tan  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
408b10ba78SBryan Tan  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
418b10ba78SBryan Tan  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
428b10ba78SBryan Tan  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
438b10ba78SBryan Tan  * OF THE POSSIBILITY OF SUCH DAMAGE.
448b10ba78SBryan Tan  */
458b10ba78SBryan Tan 
468b10ba78SBryan Tan #include <asm/page.h>
478b10ba78SBryan Tan #include <linux/io.h>
488b10ba78SBryan Tan #include <linux/wait.h>
498b10ba78SBryan Tan #include <rdma/ib_addr.h>
508b10ba78SBryan Tan #include <rdma/ib_smi.h>
518b10ba78SBryan Tan #include <rdma/ib_user_verbs.h>
528b10ba78SBryan Tan 
538b10ba78SBryan Tan #include "pvrdma.h"
548b10ba78SBryan Tan 
558b10ba78SBryan Tan /**
568b10ba78SBryan Tan  * pvrdma_query_srq - query shared receive queue
578b10ba78SBryan Tan  * @ibsrq: the shared receive queue to query
588b10ba78SBryan Tan  * @srq_attr: attributes to query and return to client
598b10ba78SBryan Tan  *
608b10ba78SBryan Tan  * @return: 0 for success, otherwise returns an errno.
618b10ba78SBryan Tan  */
pvrdma_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * srq_attr)628b10ba78SBryan Tan int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
638b10ba78SBryan Tan {
648b10ba78SBryan Tan 	struct pvrdma_dev *dev = to_vdev(ibsrq->device);
658b10ba78SBryan Tan 	struct pvrdma_srq *srq = to_vsrq(ibsrq);
668b10ba78SBryan Tan 	union pvrdma_cmd_req req;
678b10ba78SBryan Tan 	union pvrdma_cmd_resp rsp;
688b10ba78SBryan Tan 	struct pvrdma_cmd_query_srq *cmd = &req.query_srq;
698b10ba78SBryan Tan 	struct pvrdma_cmd_query_srq_resp *resp = &rsp.query_srq_resp;
708b10ba78SBryan Tan 	int ret;
718b10ba78SBryan Tan 
728b10ba78SBryan Tan 	memset(cmd, 0, sizeof(*cmd));
738b10ba78SBryan Tan 	cmd->hdr.cmd = PVRDMA_CMD_QUERY_SRQ;
748b10ba78SBryan Tan 	cmd->srq_handle = srq->srq_handle;
758b10ba78SBryan Tan 
768b10ba78SBryan Tan 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_SRQ_RESP);
778b10ba78SBryan Tan 	if (ret < 0) {
788b10ba78SBryan Tan 		dev_warn(&dev->pdev->dev,
798b10ba78SBryan Tan 			 "could not query shared receive queue, error: %d\n",
808b10ba78SBryan Tan 			 ret);
818b10ba78SBryan Tan 		return -EINVAL;
828b10ba78SBryan Tan 	}
838b10ba78SBryan Tan 
848b10ba78SBryan Tan 	srq_attr->srq_limit = resp->attrs.srq_limit;
858b10ba78SBryan Tan 	srq_attr->max_wr = resp->attrs.max_wr;
868b10ba78SBryan Tan 	srq_attr->max_sge = resp->attrs.max_sge;
878b10ba78SBryan Tan 
888b10ba78SBryan Tan 	return 0;
898b10ba78SBryan Tan }
908b10ba78SBryan Tan 
918b10ba78SBryan Tan /**
928b10ba78SBryan Tan  * pvrdma_create_srq - create shared receive queue
9362cbff32SKamal Heib  * @ibsrq: the IB shared receive queue
948b10ba78SBryan Tan  * @init_attr: shared receive queue attributes
958b10ba78SBryan Tan  * @udata: user data
968b10ba78SBryan Tan  *
9768e326deSLeon Romanovsky  * @return: 0 on success, otherwise returns an errno.
988b10ba78SBryan Tan  */
pvrdma_create_srq(struct ib_srq * ibsrq,struct ib_srq_init_attr * init_attr,struct ib_udata * udata)9968e326deSLeon Romanovsky int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1008b10ba78SBryan Tan 		      struct ib_udata *udata)
1018b10ba78SBryan Tan {
10268e326deSLeon Romanovsky 	struct pvrdma_srq *srq = to_vsrq(ibsrq);
10368e326deSLeon Romanovsky 	struct pvrdma_dev *dev = to_vdev(ibsrq->device);
1048b10ba78SBryan Tan 	union pvrdma_cmd_req req;
1058b10ba78SBryan Tan 	union pvrdma_cmd_resp rsp;
1068b10ba78SBryan Tan 	struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
1078b10ba78SBryan Tan 	struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
10868e326deSLeon Romanovsky 	struct pvrdma_create_srq_resp srq_resp = {};
1098b10ba78SBryan Tan 	struct pvrdma_create_srq ucmd;
1108b10ba78SBryan Tan 	unsigned long flags;
1118b10ba78SBryan Tan 	int ret;
1128b10ba78SBryan Tan 
113e00b64f7SShamir Rabinovitch 	if (!udata) {
1148b10ba78SBryan Tan 		/* No support for kernel clients. */
1158b10ba78SBryan Tan 		dev_warn(&dev->pdev->dev,
1168b10ba78SBryan Tan 			 "no shared receive queue support for kernel client\n");
11768e326deSLeon Romanovsky 		return -EOPNOTSUPP;
1188b10ba78SBryan Tan 	}
1198b10ba78SBryan Tan 
1208b10ba78SBryan Tan 	if (init_attr->srq_type != IB_SRQT_BASIC) {
1218b10ba78SBryan Tan 		dev_warn(&dev->pdev->dev,
1228b10ba78SBryan Tan 			 "shared receive queue type %d not supported\n",
1238b10ba78SBryan Tan 			 init_attr->srq_type);
124*652caba5SJason Gunthorpe 		return -EOPNOTSUPP;
1258b10ba78SBryan Tan 	}
1268b10ba78SBryan Tan 
1278b10ba78SBryan Tan 	if (init_attr->attr.max_wr  > dev->dsr->caps.max_srq_wr ||
1288b10ba78SBryan Tan 	    init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
1298b10ba78SBryan Tan 		dev_warn(&dev->pdev->dev,
1308b10ba78SBryan Tan 			 "shared receive queue size invalid\n");
13168e326deSLeon Romanovsky 		return -EINVAL;
1328b10ba78SBryan Tan 	}
1338b10ba78SBryan Tan 
1348b10ba78SBryan Tan 	if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
13568e326deSLeon Romanovsky 		return -ENOMEM;
1368b10ba78SBryan Tan 
1378b10ba78SBryan Tan 	spin_lock_init(&srq->lock);
1388b10ba78SBryan Tan 	refcount_set(&srq->refcnt, 1);
139e3524b26SBryan Tan 	init_completion(&srq->free);
1408b10ba78SBryan Tan 
1418b10ba78SBryan Tan 	dev_dbg(&dev->pdev->dev,
1428b10ba78SBryan Tan 		"create shared receive queue from user space\n");
1438b10ba78SBryan Tan 
1448b10ba78SBryan Tan 	if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
1458b10ba78SBryan Tan 		ret = -EFAULT;
1468b10ba78SBryan Tan 		goto err_srq;
1478b10ba78SBryan Tan 	}
1488b10ba78SBryan Tan 
149c320e527SMoni Shoua 	srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0);
1508b10ba78SBryan Tan 	if (IS_ERR(srq->umem)) {
1518b10ba78SBryan Tan 		ret = PTR_ERR(srq->umem);
1528b10ba78SBryan Tan 		goto err_srq;
1538b10ba78SBryan Tan 	}
1548b10ba78SBryan Tan 
15587aebd3fSJason Gunthorpe 	srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE);
1568b10ba78SBryan Tan 
1578b10ba78SBryan Tan 	if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
1588b10ba78SBryan Tan 		dev_warn(&dev->pdev->dev,
1598b10ba78SBryan Tan 			 "overflow pages in shared receive queue\n");
1608b10ba78SBryan Tan 		ret = -EINVAL;
1618b10ba78SBryan Tan 		goto err_umem;
1628b10ba78SBryan Tan 	}
1638b10ba78SBryan Tan 
1648b10ba78SBryan Tan 	ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false);
1658b10ba78SBryan Tan 	if (ret) {
1668b10ba78SBryan Tan 		dev_warn(&dev->pdev->dev,
1678b10ba78SBryan Tan 			 "could not allocate page directory\n");
1688b10ba78SBryan Tan 		goto err_umem;
1698b10ba78SBryan Tan 	}
1708b10ba78SBryan Tan 
1718b10ba78SBryan Tan 	pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0);
1728b10ba78SBryan Tan 
1738b10ba78SBryan Tan 	memset(cmd, 0, sizeof(*cmd));
1748b10ba78SBryan Tan 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
1758b10ba78SBryan Tan 	cmd->srq_type = init_attr->srq_type;
1768b10ba78SBryan Tan 	cmd->nchunks = srq->npages;
17768e326deSLeon Romanovsky 	cmd->pd_handle = to_vpd(ibsrq->pd)->pd_handle;
1788b10ba78SBryan Tan 	cmd->attrs.max_wr = init_attr->attr.max_wr;
1798b10ba78SBryan Tan 	cmd->attrs.max_sge = init_attr->attr.max_sge;
1808b10ba78SBryan Tan 	cmd->attrs.srq_limit = init_attr->attr.srq_limit;
1818b10ba78SBryan Tan 	cmd->pdir_dma = srq->pdir.dir_dma;
1828b10ba78SBryan Tan 
1838b10ba78SBryan Tan 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP);
1848b10ba78SBryan Tan 	if (ret < 0) {
1858b10ba78SBryan Tan 		dev_warn(&dev->pdev->dev,
1868b10ba78SBryan Tan 			 "could not create shared receive queue, error: %d\n",
1878b10ba78SBryan Tan 			 ret);
1888b10ba78SBryan Tan 		goto err_page_dir;
1898b10ba78SBryan Tan 	}
1908b10ba78SBryan Tan 
1918b10ba78SBryan Tan 	srq->srq_handle = resp->srqn;
1921f5a6c47SAdit Ranadive 	srq_resp.srqn = resp->srqn;
1938b10ba78SBryan Tan 	spin_lock_irqsave(&dev->srq_tbl_lock, flags);
1948b10ba78SBryan Tan 	dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
1958b10ba78SBryan Tan 	spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
1968b10ba78SBryan Tan 
1978b10ba78SBryan Tan 	/* Copy udata back. */
1981f5a6c47SAdit Ranadive 	if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
1998b10ba78SBryan Tan 		dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
200c4367a26SShamir Rabinovitch 		pvrdma_destroy_srq(&srq->ibsrq, udata);
20168e326deSLeon Romanovsky 		return -EINVAL;
2028b10ba78SBryan Tan 	}
2038b10ba78SBryan Tan 
20468e326deSLeon Romanovsky 	return 0;
2058b10ba78SBryan Tan 
2068b10ba78SBryan Tan err_page_dir:
2078b10ba78SBryan Tan 	pvrdma_page_dir_cleanup(dev, &srq->pdir);
2088b10ba78SBryan Tan err_umem:
2098b10ba78SBryan Tan 	ib_umem_release(srq->umem);
2108b10ba78SBryan Tan err_srq:
2118b10ba78SBryan Tan 	atomic_dec(&dev->num_srqs);
2128b10ba78SBryan Tan 
21368e326deSLeon Romanovsky 	return ret;
2148b10ba78SBryan Tan }
2158b10ba78SBryan Tan 
pvrdma_free_srq(struct pvrdma_dev * dev,struct pvrdma_srq * srq)2168b10ba78SBryan Tan static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
2178b10ba78SBryan Tan {
2188b10ba78SBryan Tan 	unsigned long flags;
2198b10ba78SBryan Tan 
2208b10ba78SBryan Tan 	spin_lock_irqsave(&dev->srq_tbl_lock, flags);
2218b10ba78SBryan Tan 	dev->srq_tbl[srq->srq_handle] = NULL;
2228b10ba78SBryan Tan 	spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
2238b10ba78SBryan Tan 
224e3524b26SBryan Tan 	if (refcount_dec_and_test(&srq->refcnt))
225e3524b26SBryan Tan 		complete(&srq->free);
226e3524b26SBryan Tan 	wait_for_completion(&srq->free);
2278b10ba78SBryan Tan 
2288b10ba78SBryan Tan 	/* There is no support for kernel clients, so this is safe. */
2298b10ba78SBryan Tan 	ib_umem_release(srq->umem);
2308b10ba78SBryan Tan 
2318b10ba78SBryan Tan 	pvrdma_page_dir_cleanup(dev, &srq->pdir);
2328b10ba78SBryan Tan 
2338b10ba78SBryan Tan 	atomic_dec(&dev->num_srqs);
2348b10ba78SBryan Tan }
2358b10ba78SBryan Tan 
2368b10ba78SBryan Tan /**
2378b10ba78SBryan Tan  * pvrdma_destroy_srq - destroy shared receive queue
2388b10ba78SBryan Tan  * @srq: the shared receive queue to destroy
239c4367a26SShamir Rabinovitch  * @udata: user data or null for kernel object
2408b10ba78SBryan Tan  *
2418b10ba78SBryan Tan  * @return: 0 for success.
2428b10ba78SBryan Tan  */
pvrdma_destroy_srq(struct ib_srq * srq,struct ib_udata * udata)243119181d1SLeon Romanovsky int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
2448b10ba78SBryan Tan {
2458b10ba78SBryan Tan 	struct pvrdma_srq *vsrq = to_vsrq(srq);
2468b10ba78SBryan Tan 	union pvrdma_cmd_req req;
2478b10ba78SBryan Tan 	struct pvrdma_cmd_destroy_srq *cmd = &req.destroy_srq;
2488b10ba78SBryan Tan 	struct pvrdma_dev *dev = to_vdev(srq->device);
2498b10ba78SBryan Tan 	int ret;
2508b10ba78SBryan Tan 
2518b10ba78SBryan Tan 	memset(cmd, 0, sizeof(*cmd));
2528b10ba78SBryan Tan 	cmd->hdr.cmd = PVRDMA_CMD_DESTROY_SRQ;
2538b10ba78SBryan Tan 	cmd->srq_handle = vsrq->srq_handle;
2548b10ba78SBryan Tan 
2558b10ba78SBryan Tan 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
2568b10ba78SBryan Tan 	if (ret < 0)
2578b10ba78SBryan Tan 		dev_warn(&dev->pdev->dev,
2588b10ba78SBryan Tan 			 "destroy shared receive queue failed, error: %d\n",
2598b10ba78SBryan Tan 			 ret);
2608b10ba78SBryan Tan 
2618b10ba78SBryan Tan 	pvrdma_free_srq(dev, vsrq);
262119181d1SLeon Romanovsky 	return 0;
2638b10ba78SBryan Tan }
2648b10ba78SBryan Tan 
2658b10ba78SBryan Tan /**
2668b10ba78SBryan Tan  * pvrdma_modify_srq - modify shared receive queue attributes
2678b10ba78SBryan Tan  * @ibsrq: the shared receive queue to modify
2688b10ba78SBryan Tan  * @attr: the shared receive queue's new attributes
2698b10ba78SBryan Tan  * @attr_mask: attributes mask
2708b10ba78SBryan Tan  * @udata: user data
2718b10ba78SBryan Tan  *
2728b10ba78SBryan Tan  * @returns 0 on success, otherwise returns an errno.
2738b10ba78SBryan Tan  */
pvrdma_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask attr_mask,struct ib_udata * udata)2748b10ba78SBryan Tan int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2758b10ba78SBryan Tan 		      enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
2768b10ba78SBryan Tan {
2778b10ba78SBryan Tan 	struct pvrdma_srq *vsrq = to_vsrq(ibsrq);
2788b10ba78SBryan Tan 	union pvrdma_cmd_req req;
2798b10ba78SBryan Tan 	struct pvrdma_cmd_modify_srq *cmd = &req.modify_srq;
2808b10ba78SBryan Tan 	struct pvrdma_dev *dev = to_vdev(ibsrq->device);
2818b10ba78SBryan Tan 	int ret;
2828b10ba78SBryan Tan 
2838b10ba78SBryan Tan 	/* Only support SRQ limit. */
2848b10ba78SBryan Tan 	if (!(attr_mask & IB_SRQ_LIMIT))
2858b10ba78SBryan Tan 		return -EINVAL;
2868b10ba78SBryan Tan 
2878b10ba78SBryan Tan 	memset(cmd, 0, sizeof(*cmd));
2888b10ba78SBryan Tan 	cmd->hdr.cmd = PVRDMA_CMD_MODIFY_SRQ;
2898b10ba78SBryan Tan 	cmd->srq_handle = vsrq->srq_handle;
2908b10ba78SBryan Tan 	cmd->attrs.srq_limit = attr->srq_limit;
2918b10ba78SBryan Tan 	cmd->attr_mask = attr_mask;
2928b10ba78SBryan Tan 
2938b10ba78SBryan Tan 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
2948b10ba78SBryan Tan 	if (ret < 0) {
2958b10ba78SBryan Tan 		dev_warn(&dev->pdev->dev,
2968b10ba78SBryan Tan 			 "could not modify shared receive queue, error: %d\n",
2978b10ba78SBryan Tan 			 ret);
2988b10ba78SBryan Tan 
2998b10ba78SBryan Tan 		return -EINVAL;
3008b10ba78SBryan Tan 	}
3018b10ba78SBryan Tan 
3028b10ba78SBryan Tan 	return ret;
3038b10ba78SBryan Tan }
304