1ac1b36e5SRam Amrani /* QLogic qedr NIC Driver
2ac1b36e5SRam Amrani * Copyright (c) 2015-2016 QLogic Corporation
3ac1b36e5SRam Amrani *
4ac1b36e5SRam Amrani * This software is available to you under a choice of one of two
5ac1b36e5SRam Amrani * licenses. You may choose to be licensed under the terms of the GNU
6ac1b36e5SRam Amrani * General Public License (GPL) Version 2, available from the file
7ac1b36e5SRam Amrani * COPYING in the main directory of this source tree, or the
8ac1b36e5SRam Amrani * OpenIB.org BSD license below:
9ac1b36e5SRam Amrani *
10ac1b36e5SRam Amrani * Redistribution and use in source and binary forms, with or
11ac1b36e5SRam Amrani * without modification, are permitted provided that the following
12ac1b36e5SRam Amrani * conditions are met:
13ac1b36e5SRam Amrani *
14ac1b36e5SRam Amrani * - Redistributions of source code must retain the above
15ac1b36e5SRam Amrani * copyright notice, this list of conditions and the following
16ac1b36e5SRam Amrani * disclaimer.
17ac1b36e5SRam Amrani *
18ac1b36e5SRam Amrani * - Redistributions in binary form must reproduce the above
19ac1b36e5SRam Amrani * copyright notice, this list of conditions and the following
20ac1b36e5SRam Amrani * disclaimer in the documentation and /or other materials
21ac1b36e5SRam Amrani * provided with the distribution.
22ac1b36e5SRam Amrani *
23ac1b36e5SRam Amrani * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24ac1b36e5SRam Amrani * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25ac1b36e5SRam Amrani * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26ac1b36e5SRam Amrani * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27ac1b36e5SRam Amrani * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28ac1b36e5SRam Amrani * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29ac1b36e5SRam Amrani * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30ac1b36e5SRam Amrani * SOFTWARE.
31ac1b36e5SRam Amrani */
32ac1b36e5SRam Amrani #include <linux/dma-mapping.h>
33ac1b36e5SRam Amrani #include <linux/crc32.h>
34ac1b36e5SRam Amrani #include <net/ip.h>
35ac1b36e5SRam Amrani #include <net/ipv6.h>
36ac1b36e5SRam Amrani #include <net/udp.h>
37ac1b36e5SRam Amrani #include <linux/iommu.h>
38ac1b36e5SRam Amrani
39ac1b36e5SRam Amrani #include <rdma/ib_verbs.h>
40ac1b36e5SRam Amrani #include <rdma/ib_user_verbs.h>
41ac1b36e5SRam Amrani #include <rdma/iw_cm.h>
42ac1b36e5SRam Amrani #include <rdma/ib_umem.h>
43ac1b36e5SRam Amrani #include <rdma/ib_addr.h>
44ac1b36e5SRam Amrani #include <rdma/ib_cache.h>
45ff23dfa1SShamir Rabinovitch #include <rdma/uverbs_ioctl.h>
46ac1b36e5SRam Amrani
47be086e7cSMintz, Yuval #include <linux/qed/common_hsi.h>
48be086e7cSMintz, Yuval #include "qedr_hsi_rdma.h"
49ac1b36e5SRam Amrani #include <linux/qed/qed_if.h>
50ac1b36e5SRam Amrani #include "qedr.h"
51ac1b36e5SRam Amrani #include "verbs.h"
52ac1b36e5SRam Amrani #include <rdma/qedr-abi.h>
5399d195ccSKalderon, Michal #include "qedr_roce_cm.h"
5482af6d19SMichal Kalderon #include "qedr_iw_cm.h"
55ac1b36e5SRam Amrani
563491c9e7SYuval Bason #define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
573491c9e7SYuval Bason #define RDMA_MAX_SGE_PER_SRQ (4)
583491c9e7SYuval Bason #define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1)
593491c9e7SYuval Bason
60a7efd777SRam Amrani #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
61a7efd777SRam Amrani
624c6bb02dSMichal Kalderon enum {
634c6bb02dSMichal Kalderon QEDR_USER_MMAP_IO_WC = 0,
6497f61250SMichal Kalderon QEDR_USER_MMAP_PHYS_PAGE,
654c6bb02dSMichal Kalderon };
664c6bb02dSMichal Kalderon
qedr_ib_copy_to_udata(struct ib_udata * udata,void * src,size_t len)67c75d3ec8SAmrani, Ram static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
68c75d3ec8SAmrani, Ram size_t len)
69c75d3ec8SAmrani, Ram {
70c75d3ec8SAmrani, Ram size_t min_len = min_t(size_t, len, udata->outlen);
71c75d3ec8SAmrani, Ram
72c75d3ec8SAmrani, Ram return ib_copy_to_udata(udata, src, min_len);
73c75d3ec8SAmrani, Ram }
74c75d3ec8SAmrani, Ram
qedr_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)751fb7f897SMark Bloch int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
76a7efd777SRam Amrani {
77dbe30daeSGal Pressman if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
78a7efd777SRam Amrani return -EINVAL;
79a7efd777SRam Amrani
80a7efd777SRam Amrani *pkey = QEDR_ROCE_PKEY_DEFAULT;
81a7efd777SRam Amrani return 0;
82a7efd777SRam Amrani }
83a7efd777SRam Amrani
qedr_iw_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * sgid)841fb7f897SMark Bloch int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
85e6a38c54SKalderon, Michal int index, union ib_gid *sgid)
86e6a38c54SKalderon, Michal {
87e6a38c54SKalderon, Michal struct qedr_dev *dev = get_qedr_dev(ibdev);
88e6a38c54SKalderon, Michal
89e6a38c54SKalderon, Michal memset(sgid->raw, 0, sizeof(sgid->raw));
90e6a38c54SKalderon, Michal ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
91e6a38c54SKalderon, Michal
92e6a38c54SKalderon, Michal DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
93e6a38c54SKalderon, Michal sgid->global.interface_id, sgid->global.subnet_prefix);
94e6a38c54SKalderon, Michal
95e6a38c54SKalderon, Michal return 0;
96e6a38c54SKalderon, Michal }
97e6a38c54SKalderon, Michal
qedr_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * srq_attr)983491c9e7SYuval Bason int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
993491c9e7SYuval Bason {
1003491c9e7SYuval Bason struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1013491c9e7SYuval Bason struct qedr_device_attr *qattr = &dev->attr;
1023491c9e7SYuval Bason struct qedr_srq *srq = get_qedr_srq(ibsrq);
1033491c9e7SYuval Bason
1043491c9e7SYuval Bason srq_attr->srq_limit = srq->srq_limit;
1053491c9e7SYuval Bason srq_attr->max_wr = qattr->max_srq_wr;
1063491c9e7SYuval Bason srq_attr->max_sge = qattr->max_sge;
1073491c9e7SYuval Bason
1083491c9e7SYuval Bason return 0;
1093491c9e7SYuval Bason }
1103491c9e7SYuval Bason
qedr_query_device(struct ib_device * ibdev,struct ib_device_attr * attr,struct ib_udata * udata)111ac1b36e5SRam Amrani int qedr_query_device(struct ib_device *ibdev,
112ac1b36e5SRam Amrani struct ib_device_attr *attr, struct ib_udata *udata)
113ac1b36e5SRam Amrani {
114ac1b36e5SRam Amrani struct qedr_dev *dev = get_qedr_dev(ibdev);
115ac1b36e5SRam Amrani struct qedr_device_attr *qattr = &dev->attr;
116ac1b36e5SRam Amrani
117ac1b36e5SRam Amrani if (!dev->rdma_ctx) {
118ac1b36e5SRam Amrani DP_ERR(dev,
119ac1b36e5SRam Amrani "qedr_query_device called with invalid params rdma_ctx=%p\n",
120ac1b36e5SRam Amrani dev->rdma_ctx);
121ac1b36e5SRam Amrani return -EINVAL;
122ac1b36e5SRam Amrani }
123ac1b36e5SRam Amrani
124ac1b36e5SRam Amrani memset(attr, 0, sizeof(*attr));
125ac1b36e5SRam Amrani
126ac1b36e5SRam Amrani attr->fw_ver = qattr->fw_ver;
127ac1b36e5SRam Amrani attr->sys_image_guid = qattr->sys_image_guid;
128ac1b36e5SRam Amrani attr->max_mr_size = qattr->max_mr_size;
129ac1b36e5SRam Amrani attr->page_size_cap = qattr->page_size_caps;
130ac1b36e5SRam Amrani attr->vendor_id = qattr->vendor_id;
131ac1b36e5SRam Amrani attr->vendor_part_id = qattr->vendor_part_id;
132ac1b36e5SRam Amrani attr->hw_ver = qattr->hw_ver;
133ac1b36e5SRam Amrani attr->max_qp = qattr->max_qp;
134ac1b36e5SRam Amrani attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
135ac1b36e5SRam Amrani attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
136ac1b36e5SRam Amrani IB_DEVICE_RC_RNR_NAK_GEN |
137e945c653SJason Gunthorpe IB_DEVICE_MEM_MGT_EXTENSIONS;
138e945c653SJason Gunthorpe attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
139ac1b36e5SRam Amrani
14006e8d1dfSYuval Basson if (!rdma_protocol_iwarp(&dev->ibdev, 1))
14106e8d1dfSYuval Basson attr->device_cap_flags |= IB_DEVICE_XRC;
14233023fb8SSteve Wise attr->max_send_sge = qattr->max_sge;
14333023fb8SSteve Wise attr->max_recv_sge = qattr->max_sge;
144ac1b36e5SRam Amrani attr->max_sge_rd = qattr->max_sge;
145ac1b36e5SRam Amrani attr->max_cq = qattr->max_cq;
146ac1b36e5SRam Amrani attr->max_cqe = qattr->max_cqe;
147ac1b36e5SRam Amrani attr->max_mr = qattr->max_mr;
148ac1b36e5SRam Amrani attr->max_mw = qattr->max_mw;
149ac1b36e5SRam Amrani attr->max_pd = qattr->max_pd;
150ac1b36e5SRam Amrani attr->atomic_cap = dev->atomic_cap;
151ac1b36e5SRam Amrani attr->max_qp_init_rd_atom =
152ac1b36e5SRam Amrani 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
153ac1b36e5SRam Amrani attr->max_qp_rd_atom =
154ac1b36e5SRam Amrani min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
155ac1b36e5SRam Amrani attr->max_qp_init_rd_atom);
156ac1b36e5SRam Amrani
157ac1b36e5SRam Amrani attr->max_srq = qattr->max_srq;
158ac1b36e5SRam Amrani attr->max_srq_sge = qattr->max_srq_sge;
159ac1b36e5SRam Amrani attr->max_srq_wr = qattr->max_srq_wr;
160ac1b36e5SRam Amrani
161ac1b36e5SRam Amrani attr->local_ca_ack_delay = qattr->dev_ack_delay;
162ac1b36e5SRam Amrani attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
1637d11b478SKamal Heib attr->max_pkeys = qattr->max_pkey;
164ac1b36e5SRam Amrani attr->max_ah = qattr->max_ah;
165ac1b36e5SRam Amrani
166ac1b36e5SRam Amrani return 0;
167ac1b36e5SRam Amrani }
168ac1b36e5SRam Amrani
get_link_speed_and_width(int speed,u16 * ib_speed,u8 * ib_width)169376ceb31SAharon Landau static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
170ac1b36e5SRam Amrani u8 *ib_width)
171ac1b36e5SRam Amrani {
172ac1b36e5SRam Amrani switch (speed) {
173ac1b36e5SRam Amrani case 1000:
17469054666SSagiv Ozeri *ib_speed = IB_SPEED_SDR;
175ac1b36e5SRam Amrani *ib_width = IB_WIDTH_1X;
176ac1b36e5SRam Amrani break;
177ac1b36e5SRam Amrani case 10000:
17869054666SSagiv Ozeri *ib_speed = IB_SPEED_QDR;
179ac1b36e5SRam Amrani *ib_width = IB_WIDTH_1X;
180ac1b36e5SRam Amrani break;
181ac1b36e5SRam Amrani
182ac1b36e5SRam Amrani case 20000:
18369054666SSagiv Ozeri *ib_speed = IB_SPEED_DDR;
184ac1b36e5SRam Amrani *ib_width = IB_WIDTH_4X;
185ac1b36e5SRam Amrani break;
186ac1b36e5SRam Amrani
187ac1b36e5SRam Amrani case 25000:
18869054666SSagiv Ozeri *ib_speed = IB_SPEED_EDR;
189ac1b36e5SRam Amrani *ib_width = IB_WIDTH_1X;
190ac1b36e5SRam Amrani break;
191ac1b36e5SRam Amrani
192ac1b36e5SRam Amrani case 40000:
19369054666SSagiv Ozeri *ib_speed = IB_SPEED_QDR;
194ac1b36e5SRam Amrani *ib_width = IB_WIDTH_4X;
195ac1b36e5SRam Amrani break;
196ac1b36e5SRam Amrani
197ac1b36e5SRam Amrani case 50000:
19869054666SSagiv Ozeri *ib_speed = IB_SPEED_HDR;
19969054666SSagiv Ozeri *ib_width = IB_WIDTH_1X;
200ac1b36e5SRam Amrani break;
201ac1b36e5SRam Amrani
202ac1b36e5SRam Amrani case 100000:
20369054666SSagiv Ozeri *ib_speed = IB_SPEED_EDR;
204ac1b36e5SRam Amrani *ib_width = IB_WIDTH_4X;
205ac1b36e5SRam Amrani break;
206ac1b36e5SRam Amrani
207ac1b36e5SRam Amrani default:
208ac1b36e5SRam Amrani /* Unsupported */
20969054666SSagiv Ozeri *ib_speed = IB_SPEED_SDR;
210ac1b36e5SRam Amrani *ib_width = IB_WIDTH_1X;
211ac1b36e5SRam Amrani }
212ac1b36e5SRam Amrani }
213ac1b36e5SRam Amrani
qedr_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * attr)2141fb7f897SMark Bloch int qedr_query_port(struct ib_device *ibdev, u32 port,
2151fb7f897SMark Bloch struct ib_port_attr *attr)
216ac1b36e5SRam Amrani {
217ac1b36e5SRam Amrani struct qedr_dev *dev;
218ac1b36e5SRam Amrani struct qed_rdma_port *rdma_port;
219ac1b36e5SRam Amrani
220ac1b36e5SRam Amrani dev = get_qedr_dev(ibdev);
221ac1b36e5SRam Amrani
222ac1b36e5SRam Amrani if (!dev->rdma_ctx) {
223ac1b36e5SRam Amrani DP_ERR(dev, "rdma_ctx is NULL\n");
224ac1b36e5SRam Amrani return -EINVAL;
225ac1b36e5SRam Amrani }
226ac1b36e5SRam Amrani
227ac1b36e5SRam Amrani rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
228ac1b36e5SRam Amrani
229c4550c63SOr Gerlitz /* *attr being zeroed by the caller, avoid zeroing it here */
230ac1b36e5SRam Amrani if (rdma_port->port_state == QED_RDMA_PORT_UP) {
231ac1b36e5SRam Amrani attr->state = IB_PORT_ACTIVE;
23272a7720fSKamal Heib attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
233ac1b36e5SRam Amrani } else {
234ac1b36e5SRam Amrani attr->state = IB_PORT_DOWN;
23572a7720fSKamal Heib attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
236ac1b36e5SRam Amrani }
237ac1b36e5SRam Amrani attr->max_mtu = IB_MTU_4096;
238ac1b36e5SRam Amrani attr->lid = 0;
239ac1b36e5SRam Amrani attr->lmc = 0;
240ac1b36e5SRam Amrani attr->sm_lid = 0;
241ac1b36e5SRam Amrani attr->sm_sl = 0;
2422f944c0fSJason Gunthorpe attr->ip_gids = true;
243f5b1b177SKalderon, Michal if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
244cc293f54SMichal Kalderon attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
245f5b1b177SKalderon, Michal attr->gid_tbl_len = 1;
246f5b1b177SKalderon, Michal } else {
247cc293f54SMichal Kalderon attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
248ac1b36e5SRam Amrani attr->gid_tbl_len = QEDR_MAX_SGID;
249ac1b36e5SRam Amrani attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
250f5b1b177SKalderon, Michal }
251ac1b36e5SRam Amrani attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
252ac1b36e5SRam Amrani attr->qkey_viol_cntr = 0;
253ac1b36e5SRam Amrani get_link_speed_and_width(rdma_port->link_speed,
254ac1b36e5SRam Amrani &attr->active_speed, &attr->active_width);
255ac1b36e5SRam Amrani attr->max_msg_sz = rdma_port->max_msg_size;
256ac1b36e5SRam Amrani attr->max_vl_num = 4;
257ac1b36e5SRam Amrani
258ac1b36e5SRam Amrani return 0;
259ac1b36e5SRam Amrani }
260ac1b36e5SRam Amrani
qedr_alloc_ucontext(struct ib_ucontext * uctx,struct ib_udata * udata)261a2a074efSLeon Romanovsky int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
262ac1b36e5SRam Amrani {
263a2a074efSLeon Romanovsky struct ib_device *ibdev = uctx->device;
264ac1b36e5SRam Amrani int rc;
265a2a074efSLeon Romanovsky struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
266a2a074efSLeon Romanovsky struct qedr_alloc_ucontext_resp uresp = {};
26797f61250SMichal Kalderon struct qedr_alloc_ucontext_req ureq = {};
268ac1b36e5SRam Amrani struct qedr_dev *dev = get_qedr_dev(ibdev);
269ac1b36e5SRam Amrani struct qed_rdma_add_user_out_params oparams;
2704c6bb02dSMichal Kalderon struct qedr_user_mmap_entry *entry;
271ac1b36e5SRam Amrani
272ac1b36e5SRam Amrani if (!udata)
273a2a074efSLeon Romanovsky return -EFAULT;
274ac1b36e5SRam Amrani
27597f61250SMichal Kalderon if (udata->inlen) {
27697f61250SMichal Kalderon rc = ib_copy_from_udata(&ureq, udata,
27797f61250SMichal Kalderon min(sizeof(ureq), udata->inlen));
27897f61250SMichal Kalderon if (rc) {
27997f61250SMichal Kalderon DP_ERR(dev, "Problem copying data from user space\n");
28097f61250SMichal Kalderon return -EFAULT;
28197f61250SMichal Kalderon }
282bbe4f424SMichal Kalderon ctx->edpm_mode = !!(ureq.context_flags &
283bbe4f424SMichal Kalderon QEDR_ALLOC_UCTX_EDPM_MODE);
28497f61250SMichal Kalderon ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
28597f61250SMichal Kalderon }
28697f61250SMichal Kalderon
287ac1b36e5SRam Amrani rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
288ac1b36e5SRam Amrani if (rc) {
289ac1b36e5SRam Amrani DP_ERR(dev,
290ac1b36e5SRam Amrani "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
291ac1b36e5SRam Amrani rc);
292a2a074efSLeon Romanovsky return rc;
293ac1b36e5SRam Amrani }
294ac1b36e5SRam Amrani
295ac1b36e5SRam Amrani ctx->dpi = oparams.dpi;
296ac1b36e5SRam Amrani ctx->dpi_addr = oparams.dpi_addr;
297ac1b36e5SRam Amrani ctx->dpi_phys_addr = oparams.dpi_phys_addr;
298ac1b36e5SRam Amrani ctx->dpi_size = oparams.dpi_size;
2994c6bb02dSMichal Kalderon entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3004c6bb02dSMichal Kalderon if (!entry) {
3014c6bb02dSMichal Kalderon rc = -ENOMEM;
3024c6bb02dSMichal Kalderon goto err;
3034c6bb02dSMichal Kalderon }
3044c6bb02dSMichal Kalderon
3054c6bb02dSMichal Kalderon entry->io_address = ctx->dpi_phys_addr;
3064c6bb02dSMichal Kalderon entry->length = ctx->dpi_size;
3074c6bb02dSMichal Kalderon entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
3084c6bb02dSMichal Kalderon entry->dpi = ctx->dpi;
3094c6bb02dSMichal Kalderon entry->dev = dev;
3104c6bb02dSMichal Kalderon rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
3114c6bb02dSMichal Kalderon ctx->dpi_size);
3124c6bb02dSMichal Kalderon if (rc) {
3134c6bb02dSMichal Kalderon kfree(entry);
3144c6bb02dSMichal Kalderon goto err;
3154c6bb02dSMichal Kalderon }
3164c6bb02dSMichal Kalderon ctx->db_mmap_entry = &entry->rdma_entry;
317ac1b36e5SRam Amrani
31893a3d05fSMichal Kalderon if (!dev->user_dpm_enabled)
31993a3d05fSMichal Kalderon uresp.dpm_flags = 0;
32093a3d05fSMichal Kalderon else if (rdma_protocol_iwarp(&dev->ibdev, 1))
32193a3d05fSMichal Kalderon uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
32293a3d05fSMichal Kalderon else
32393a3d05fSMichal Kalderon uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
324bbe4f424SMichal Kalderon QEDR_DPM_TYPE_ROCE_LEGACY |
325bbe4f424SMichal Kalderon QEDR_DPM_TYPE_ROCE_EDPM_MODE;
32693a3d05fSMichal Kalderon
327eb7f84e3SMichal Kalderon if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
32893a3d05fSMichal Kalderon uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
32993a3d05fSMichal Kalderon uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
33093a3d05fSMichal Kalderon uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
331eb7f84e3SMichal Kalderon uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
332eb7f84e3SMichal Kalderon }
33393a3d05fSMichal Kalderon
33467cbe353SAmrani, Ram uresp.wids_enabled = 1;
33567cbe353SAmrani, Ram uresp.wid_count = oparams.wid_count;
3364c6bb02dSMichal Kalderon uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
337ac1b36e5SRam Amrani uresp.db_size = ctx->dpi_size;
338ac1b36e5SRam Amrani uresp.max_send_wr = dev->attr.max_sqe;
339ac1b36e5SRam Amrani uresp.max_recv_wr = dev->attr.max_rqe;
340ac1b36e5SRam Amrani uresp.max_srq_wr = dev->attr.max_srq_wr;
341ac1b36e5SRam Amrani uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
342ac1b36e5SRam Amrani uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
343ac1b36e5SRam Amrani uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
344ac1b36e5SRam Amrani uresp.max_cqes = QEDR_MAX_CQES;
345ac1b36e5SRam Amrani
346c75d3ec8SAmrani, Ram rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
347ac1b36e5SRam Amrani if (rc)
3484c6bb02dSMichal Kalderon goto err;
349ac1b36e5SRam Amrani
350ac1b36e5SRam Amrani ctx->dev = dev;
351ac1b36e5SRam Amrani
352ac1b36e5SRam Amrani DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
353ac1b36e5SRam Amrani &ctx->ibucontext);
354a2a074efSLeon Romanovsky return 0;
3554c6bb02dSMichal Kalderon
3564c6bb02dSMichal Kalderon err:
3574c6bb02dSMichal Kalderon if (!ctx->db_mmap_entry)
3584c6bb02dSMichal Kalderon dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
3594c6bb02dSMichal Kalderon else
3604c6bb02dSMichal Kalderon rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
3614c6bb02dSMichal Kalderon
3624c6bb02dSMichal Kalderon return rc;
363ac1b36e5SRam Amrani }
364ac1b36e5SRam Amrani
qedr_dealloc_ucontext(struct ib_ucontext * ibctx)365a2a074efSLeon Romanovsky void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
366ac1b36e5SRam Amrani {
367ac1b36e5SRam Amrani struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
368ac1b36e5SRam Amrani
369ac1b36e5SRam Amrani DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
370ac1b36e5SRam Amrani uctx);
371ac1b36e5SRam Amrani
3724c6bb02dSMichal Kalderon rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
373ac1b36e5SRam Amrani }
374ac1b36e5SRam Amrani
qedr_mmap_free(struct rdma_user_mmap_entry * rdma_entry)3754c6bb02dSMichal Kalderon void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
376ac1b36e5SRam Amrani {
3774c6bb02dSMichal Kalderon struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
3784c6bb02dSMichal Kalderon struct qedr_dev *dev = entry->dev;
37930bf066cSKalderon, Michal
38097f61250SMichal Kalderon if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
38197f61250SMichal Kalderon free_page((unsigned long)entry->address);
38297f61250SMichal Kalderon else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
3834c6bb02dSMichal Kalderon dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
384ac1b36e5SRam Amrani
3854c6bb02dSMichal Kalderon kfree(entry);
386ac1b36e5SRam Amrani }
387ac1b36e5SRam Amrani
qedr_mmap(struct ib_ucontext * ucontext,struct vm_area_struct * vma)3884c6bb02dSMichal Kalderon int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
3894c6bb02dSMichal Kalderon {
3904c6bb02dSMichal Kalderon struct ib_device *dev = ucontext->device;
3914c6bb02dSMichal Kalderon size_t length = vma->vm_end - vma->vm_start;
3924c6bb02dSMichal Kalderon struct rdma_user_mmap_entry *rdma_entry;
3934c6bb02dSMichal Kalderon struct qedr_user_mmap_entry *entry;
3944c6bb02dSMichal Kalderon int rc = 0;
3954c6bb02dSMichal Kalderon u64 pfn;
3964c6bb02dSMichal Kalderon
3974c6bb02dSMichal Kalderon ibdev_dbg(dev,
3984c6bb02dSMichal Kalderon "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
3994c6bb02dSMichal Kalderon vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
4004c6bb02dSMichal Kalderon
4014c6bb02dSMichal Kalderon rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
4024c6bb02dSMichal Kalderon if (!rdma_entry) {
4034c6bb02dSMichal Kalderon ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
404ac1b36e5SRam Amrani vma->vm_pgoff);
405ac1b36e5SRam Amrani return -EINVAL;
406ac1b36e5SRam Amrani }
4074c6bb02dSMichal Kalderon entry = get_qedr_mmap_entry(rdma_entry);
4084c6bb02dSMichal Kalderon ibdev_dbg(dev,
4094c6bb02dSMichal Kalderon "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
4104c6bb02dSMichal Kalderon entry->io_address, length, entry->mmap_flag);
411ac1b36e5SRam Amrani
4124c6bb02dSMichal Kalderon switch (entry->mmap_flag) {
4134c6bb02dSMichal Kalderon case QEDR_USER_MMAP_IO_WC:
4144c6bb02dSMichal Kalderon pfn = entry->io_address >> PAGE_SHIFT;
4154c6bb02dSMichal Kalderon rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
4164c6bb02dSMichal Kalderon pgprot_writecombine(vma->vm_page_prot),
4174c6bb02dSMichal Kalderon rdma_entry);
4184c6bb02dSMichal Kalderon break;
41997f61250SMichal Kalderon case QEDR_USER_MMAP_PHYS_PAGE:
42097f61250SMichal Kalderon rc = vm_insert_page(vma, vma->vm_start,
42197f61250SMichal Kalderon virt_to_page(entry->address));
42297f61250SMichal Kalderon break;
4234c6bb02dSMichal Kalderon default:
4244c6bb02dSMichal Kalderon rc = -EINVAL;
42530bf066cSKalderon, Michal }
426ac1b36e5SRam Amrani
4274c6bb02dSMichal Kalderon if (rc)
4284c6bb02dSMichal Kalderon ibdev_dbg(dev,
4294c6bb02dSMichal Kalderon "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
4304c6bb02dSMichal Kalderon entry->io_address, length, entry->mmap_flag, rc);
431ac1b36e5SRam Amrani
4324c6bb02dSMichal Kalderon rdma_user_mmap_entry_put(rdma_entry);
4334c6bb02dSMichal Kalderon return rc;
434ac1b36e5SRam Amrani }
435a7efd777SRam Amrani
qedr_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)436ff23dfa1SShamir Rabinovitch int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
437a7efd777SRam Amrani {
43821a428a0SLeon Romanovsky struct ib_device *ibdev = ibpd->device;
439a7efd777SRam Amrani struct qedr_dev *dev = get_qedr_dev(ibdev);
44021a428a0SLeon Romanovsky struct qedr_pd *pd = get_qedr_pd(ibpd);
441a7efd777SRam Amrani u16 pd_id;
442a7efd777SRam Amrani int rc;
443a7efd777SRam Amrani
444a7efd777SRam Amrani DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
445ff23dfa1SShamir Rabinovitch udata ? "User Lib" : "Kernel");
446a7efd777SRam Amrani
447a7efd777SRam Amrani if (!dev->rdma_ctx) {
448847cb1a3SColin Ian King DP_ERR(dev, "invalid RDMA context\n");
44921a428a0SLeon Romanovsky return -EINVAL;
450a7efd777SRam Amrani }
451a7efd777SRam Amrani
4529c1e0228SRam Amrani rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
4539c1e0228SRam Amrani if (rc)
45421a428a0SLeon Romanovsky return rc;
455a7efd777SRam Amrani
456a7efd777SRam Amrani pd->pd_id = pd_id;
457a7efd777SRam Amrani
458ff23dfa1SShamir Rabinovitch if (udata) {
45957939021SJason Gunthorpe struct qedr_alloc_pd_uresp uresp = {
46057939021SJason Gunthorpe .pd_id = pd_id,
46157939021SJason Gunthorpe };
462ff23dfa1SShamir Rabinovitch struct qedr_ucontext *context = rdma_udata_to_drv_context(
463ff23dfa1SShamir Rabinovitch udata, struct qedr_ucontext, ibucontext);
4649c1e0228SRam Amrani
465c75d3ec8SAmrani, Ram rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
4669c1e0228SRam Amrani if (rc) {
467a7efd777SRam Amrani DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
4689c1e0228SRam Amrani dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
46921a428a0SLeon Romanovsky return rc;
4709c1e0228SRam Amrani }
4719c1e0228SRam Amrani
472ff23dfa1SShamir Rabinovitch pd->uctx = context;
4739c1e0228SRam Amrani pd->uctx->pd = pd;
474a7efd777SRam Amrani }
475a7efd777SRam Amrani
47621a428a0SLeon Romanovsky return 0;
477a7efd777SRam Amrani }
478a7efd777SRam Amrani
qedr_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)47991a7c58fSLeon Romanovsky int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
480a7efd777SRam Amrani {
481a7efd777SRam Amrani struct qedr_dev *dev = get_qedr_dev(ibpd->device);
482a7efd777SRam Amrani struct qedr_pd *pd = get_qedr_pd(ibpd);
483a7efd777SRam Amrani
484a7efd777SRam Amrani DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
485a7efd777SRam Amrani dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
48691a7c58fSLeon Romanovsky return 0;
487a7efd777SRam Amrani }
488a7efd777SRam Amrani
48906e8d1dfSYuval Basson
qedr_alloc_xrcd(struct ib_xrcd * ibxrcd,struct ib_udata * udata)49006e8d1dfSYuval Basson int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
49106e8d1dfSYuval Basson {
49206e8d1dfSYuval Basson struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
49306e8d1dfSYuval Basson struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
49406e8d1dfSYuval Basson
49506e8d1dfSYuval Basson return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
49606e8d1dfSYuval Basson }
49706e8d1dfSYuval Basson
qedr_dealloc_xrcd(struct ib_xrcd * ibxrcd,struct ib_udata * udata)49806e8d1dfSYuval Basson int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
49906e8d1dfSYuval Basson {
50006e8d1dfSYuval Basson struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
50106e8d1dfSYuval Basson u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
50206e8d1dfSYuval Basson
50306e8d1dfSYuval Basson dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
50406e8d1dfSYuval Basson return 0;
50506e8d1dfSYuval Basson }
qedr_free_pbl(struct qedr_dev * dev,struct qedr_pbl_info * pbl_info,struct qedr_pbl * pbl)506a7efd777SRam Amrani static void qedr_free_pbl(struct qedr_dev *dev,
507a7efd777SRam Amrani struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
508a7efd777SRam Amrani {
509a7efd777SRam Amrani struct pci_dev *pdev = dev->pdev;
510a7efd777SRam Amrani int i;
511a7efd777SRam Amrani
512a7efd777SRam Amrani for (i = 0; i < pbl_info->num_pbls; i++) {
513a7efd777SRam Amrani if (!pbl[i].va)
514a7efd777SRam Amrani continue;
515a7efd777SRam Amrani dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
516a7efd777SRam Amrani pbl[i].va, pbl[i].pa);
517a7efd777SRam Amrani }
518a7efd777SRam Amrani
519a7efd777SRam Amrani kfree(pbl);
520a7efd777SRam Amrani }
521a7efd777SRam Amrani
522a7efd777SRam Amrani #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
523a7efd777SRam Amrani #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
524a7efd777SRam Amrani
525a7efd777SRam Amrani #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
526a7efd777SRam Amrani #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
527a7efd777SRam Amrani #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
528a7efd777SRam Amrani
qedr_alloc_pbl_tbl(struct qedr_dev * dev,struct qedr_pbl_info * pbl_info,gfp_t flags)529a7efd777SRam Amrani static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
530a7efd777SRam Amrani struct qedr_pbl_info *pbl_info,
531a7efd777SRam Amrani gfp_t flags)
532a7efd777SRam Amrani {
533a7efd777SRam Amrani struct pci_dev *pdev = dev->pdev;
534a7efd777SRam Amrani struct qedr_pbl *pbl_table;
535a7efd777SRam Amrani dma_addr_t *pbl_main_tbl;
536a7efd777SRam Amrani dma_addr_t pa;
537a7efd777SRam Amrani void *va;
538a7efd777SRam Amrani int i;
539a7efd777SRam Amrani
540a7efd777SRam Amrani pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
541a7efd777SRam Amrani if (!pbl_table)
542a7efd777SRam Amrani return ERR_PTR(-ENOMEM);
543a7efd777SRam Amrani
544a7efd777SRam Amrani for (i = 0; i < pbl_info->num_pbls; i++) {
545750afb08SLuis Chamberlain va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
546750afb08SLuis Chamberlain flags);
547a7efd777SRam Amrani if (!va)
548a7efd777SRam Amrani goto err;
549a7efd777SRam Amrani
550a7efd777SRam Amrani pbl_table[i].va = va;
551a7efd777SRam Amrani pbl_table[i].pa = pa;
552a7efd777SRam Amrani }
553a7efd777SRam Amrani
554a7efd777SRam Amrani /* Two-Layer PBLs, if we have more than one pbl we need to initialize
555a7efd777SRam Amrani * the first one with physical pointers to all of the rest
556a7efd777SRam Amrani */
557a7efd777SRam Amrani pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
558a7efd777SRam Amrani for (i = 0; i < pbl_info->num_pbls - 1; i++)
559a7efd777SRam Amrani pbl_main_tbl[i] = pbl_table[i + 1].pa;
560a7efd777SRam Amrani
561a7efd777SRam Amrani return pbl_table;
562a7efd777SRam Amrani
563a7efd777SRam Amrani err:
564a7efd777SRam Amrani for (i--; i >= 0; i--)
565a7efd777SRam Amrani dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
566a7efd777SRam Amrani pbl_table[i].va, pbl_table[i].pa);
567a7efd777SRam Amrani
568a7efd777SRam Amrani qedr_free_pbl(dev, pbl_info, pbl_table);
569a7efd777SRam Amrani
570a7efd777SRam Amrani return ERR_PTR(-ENOMEM);
571a7efd777SRam Amrani }
572a7efd777SRam Amrani
qedr_prepare_pbl_tbl(struct qedr_dev * dev,struct qedr_pbl_info * pbl_info,u32 num_pbes,int two_layer_capable)573a7efd777SRam Amrani static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
574a7efd777SRam Amrani struct qedr_pbl_info *pbl_info,
575a7efd777SRam Amrani u32 num_pbes, int two_layer_capable)
576a7efd777SRam Amrani {
577a7efd777SRam Amrani u32 pbl_capacity;
578a7efd777SRam Amrani u32 pbl_size;
579a7efd777SRam Amrani u32 num_pbls;
580a7efd777SRam Amrani
581a7efd777SRam Amrani if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
582a7efd777SRam Amrani if (num_pbes > MAX_PBES_TWO_LAYER) {
583a7efd777SRam Amrani DP_ERR(dev, "prepare pbl table: too many pages %d\n",
584a7efd777SRam Amrani num_pbes);
585a7efd777SRam Amrani return -EINVAL;
586a7efd777SRam Amrani }
587a7efd777SRam Amrani
588a7efd777SRam Amrani /* calculate required pbl page size */
589a7efd777SRam Amrani pbl_size = MIN_FW_PBL_PAGE_SIZE;
590a7efd777SRam Amrani pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
591a7efd777SRam Amrani NUM_PBES_ON_PAGE(pbl_size);
592a7efd777SRam Amrani
593a7efd777SRam Amrani while (pbl_capacity < num_pbes) {
594a7efd777SRam Amrani pbl_size *= 2;
595a7efd777SRam Amrani pbl_capacity = pbl_size / sizeof(u64);
596a7efd777SRam Amrani pbl_capacity = pbl_capacity * pbl_capacity;
597a7efd777SRam Amrani }
598a7efd777SRam Amrani
599a7efd777SRam Amrani num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
600a7efd777SRam Amrani num_pbls++; /* One for the layer0 ( points to the pbls) */
601a7efd777SRam Amrani pbl_info->two_layered = true;
602a7efd777SRam Amrani } else {
603a7efd777SRam Amrani /* One layered PBL */
604a7efd777SRam Amrani num_pbls = 1;
605a7efd777SRam Amrani pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
606a7efd777SRam Amrani roundup_pow_of_two((num_pbes * sizeof(u64))));
607a7efd777SRam Amrani pbl_info->two_layered = false;
608a7efd777SRam Amrani }
609a7efd777SRam Amrani
610a7efd777SRam Amrani pbl_info->num_pbls = num_pbls;
611a7efd777SRam Amrani pbl_info->pbl_size = pbl_size;
612a7efd777SRam Amrani pbl_info->num_pbes = num_pbes;
613a7efd777SRam Amrani
614a7efd777SRam Amrani DP_DEBUG(dev, QEDR_MSG_MR,
615a7efd777SRam Amrani "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
616a7efd777SRam Amrani pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
617a7efd777SRam Amrani
618a7efd777SRam Amrani return 0;
619a7efd777SRam Amrani }
620a7efd777SRam Amrani
qedr_populate_pbls(struct qedr_dev * dev,struct ib_umem * umem,struct qedr_pbl * pbl,struct qedr_pbl_info * pbl_info,u32 pg_shift)621a7efd777SRam Amrani static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
622a7efd777SRam Amrani struct qedr_pbl *pbl,
623e57bb6beSRam Amrani struct qedr_pbl_info *pbl_info, u32 pg_shift)
624a7efd777SRam Amrani {
62595ad233fSShiraz, Saleem int pbe_cnt, total_num_pbes = 0;
626a7efd777SRam Amrani struct qedr_pbl *pbl_tbl;
62768363052SJason Gunthorpe struct ib_block_iter biter;
628a7efd777SRam Amrani struct regpair *pbe;
629a7efd777SRam Amrani
630a7efd777SRam Amrani if (!pbl_info->num_pbes)
631a7efd777SRam Amrani return;
632a7efd777SRam Amrani
633a7efd777SRam Amrani /* If we have a two layered pbl, the first pbl points to the rest
634a7efd777SRam Amrani * of the pbls and the first entry lays on the second pbl in the table
635a7efd777SRam Amrani */
636a7efd777SRam Amrani if (pbl_info->two_layered)
637a7efd777SRam Amrani pbl_tbl = &pbl[1];
638a7efd777SRam Amrani else
639a7efd777SRam Amrani pbl_tbl = pbl;
640a7efd777SRam Amrani
641a7efd777SRam Amrani pbe = (struct regpair *)pbl_tbl->va;
642a7efd777SRam Amrani if (!pbe) {
643a7efd777SRam Amrani DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
644a7efd777SRam Amrani return;
645a7efd777SRam Amrani }
646a7efd777SRam Amrani
647a7efd777SRam Amrani pbe_cnt = 0;
648a7efd777SRam Amrani
64968363052SJason Gunthorpe rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
65068363052SJason Gunthorpe u64 pg_addr = rdma_block_iter_dma_address(&biter);
651a7efd777SRam Amrani
652e57bb6beSRam Amrani pbe->lo = cpu_to_le32(pg_addr);
653e57bb6beSRam Amrani pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
654e57bb6beSRam Amrani
655a7efd777SRam Amrani pbe_cnt++;
656a7efd777SRam Amrani total_num_pbes++;
657a7efd777SRam Amrani pbe++;
658a7efd777SRam Amrani
659a7efd777SRam Amrani if (total_num_pbes == pbl_info->num_pbes)
660a7efd777SRam Amrani return;
661a7efd777SRam Amrani
66268363052SJason Gunthorpe /* If the given pbl is full storing the pbes, move to next pbl.
663a7efd777SRam Amrani */
66495ad233fSShiraz, Saleem if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
665a7efd777SRam Amrani pbl_tbl++;
666a7efd777SRam Amrani pbe = (struct regpair *)pbl_tbl->va;
667a7efd777SRam Amrani pbe_cnt = 0;
668a7efd777SRam Amrani }
669a7efd777SRam Amrani }
670a7efd777SRam Amrani }
671a7efd777SRam Amrani
qedr_db_recovery_add(struct qedr_dev * dev,void __iomem * db_addr,void * db_data,enum qed_db_rec_width db_width,enum qed_db_rec_space db_space)67297f61250SMichal Kalderon static int qedr_db_recovery_add(struct qedr_dev *dev,
67397f61250SMichal Kalderon void __iomem *db_addr,
67497f61250SMichal Kalderon void *db_data,
67597f61250SMichal Kalderon enum qed_db_rec_width db_width,
67697f61250SMichal Kalderon enum qed_db_rec_space db_space)
67797f61250SMichal Kalderon {
67897f61250SMichal Kalderon if (!db_data) {
67997f61250SMichal Kalderon DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
68097f61250SMichal Kalderon return 0;
68197f61250SMichal Kalderon }
68297f61250SMichal Kalderon
68397f61250SMichal Kalderon return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
68497f61250SMichal Kalderon db_width, db_space);
68597f61250SMichal Kalderon }
68697f61250SMichal Kalderon
qedr_db_recovery_del(struct qedr_dev * dev,void __iomem * db_addr,void * db_data)68797f61250SMichal Kalderon static void qedr_db_recovery_del(struct qedr_dev *dev,
68897f61250SMichal Kalderon void __iomem *db_addr,
68997f61250SMichal Kalderon void *db_data)
69097f61250SMichal Kalderon {
69197f61250SMichal Kalderon if (!db_data) {
69297f61250SMichal Kalderon DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
69397f61250SMichal Kalderon return;
69497f61250SMichal Kalderon }
69597f61250SMichal Kalderon
69697f61250SMichal Kalderon /* Ignore return code as there is not much we can do about it. Error
69797f61250SMichal Kalderon * log will be printed inside.
69897f61250SMichal Kalderon */
69997f61250SMichal Kalderon dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
70097f61250SMichal Kalderon }
70197f61250SMichal Kalderon
qedr_copy_cq_uresp(struct qedr_dev * dev,struct qedr_cq * cq,struct ib_udata * udata,u32 db_offset)702a7efd777SRam Amrani static int qedr_copy_cq_uresp(struct qedr_dev *dev,
70397f61250SMichal Kalderon struct qedr_cq *cq, struct ib_udata *udata,
70497f61250SMichal Kalderon u32 db_offset)
705a7efd777SRam Amrani {
706a7efd777SRam Amrani struct qedr_create_cq_uresp uresp;
707a7efd777SRam Amrani int rc;
708a7efd777SRam Amrani
709a7efd777SRam Amrani memset(&uresp, 0, sizeof(uresp));
710a7efd777SRam Amrani
71197f61250SMichal Kalderon uresp.db_offset = db_offset;
712a7efd777SRam Amrani uresp.icid = cq->icid;
713a25984f3SMichal Kalderon if (cq->q.db_mmap_entry)
714a25984f3SMichal Kalderon uresp.db_rec_addr =
715a25984f3SMichal Kalderon rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
716a7efd777SRam Amrani
717c75d3ec8SAmrani, Ram rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
718a7efd777SRam Amrani if (rc)
719a7efd777SRam Amrani DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
720a7efd777SRam Amrani
721a7efd777SRam Amrani return rc;
722a7efd777SRam Amrani }
723a7efd777SRam Amrani
consume_cqe(struct qedr_cq * cq)724a7efd777SRam Amrani static void consume_cqe(struct qedr_cq *cq)
725a7efd777SRam Amrani {
726a7efd777SRam Amrani if (cq->latest_cqe == cq->toggle_cqe)
727a7efd777SRam Amrani cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
728a7efd777SRam Amrani
729a7efd777SRam Amrani cq->latest_cqe = qed_chain_consume(&cq->pbl);
730a7efd777SRam Amrani }
731a7efd777SRam Amrani
qedr_align_cq_entries(int entries)732a7efd777SRam Amrani static inline int qedr_align_cq_entries(int entries)
733a7efd777SRam Amrani {
734a7efd777SRam Amrani u64 size, aligned_size;
735a7efd777SRam Amrani
736a7efd777SRam Amrani /* We allocate an extra entry that we don't report to the FW. */
737a7efd777SRam Amrani size = (entries + 1) * QEDR_CQE_SIZE;
738a7efd777SRam Amrani aligned_size = ALIGN(size, PAGE_SIZE);
739a7efd777SRam Amrani
740a7efd777SRam Amrani return aligned_size / QEDR_CQE_SIZE;
741a7efd777SRam Amrani }
742a7efd777SRam Amrani
qedr_init_user_db_rec(struct ib_udata * udata,struct qedr_dev * dev,struct qedr_userq * q,bool requires_db_rec)74397f61250SMichal Kalderon static int qedr_init_user_db_rec(struct ib_udata *udata,
74497f61250SMichal Kalderon struct qedr_dev *dev, struct qedr_userq *q,
74597f61250SMichal Kalderon bool requires_db_rec)
74697f61250SMichal Kalderon {
74797f61250SMichal Kalderon struct qedr_ucontext *uctx =
74897f61250SMichal Kalderon rdma_udata_to_drv_context(udata, struct qedr_ucontext,
74997f61250SMichal Kalderon ibucontext);
75097f61250SMichal Kalderon struct qedr_user_mmap_entry *entry;
75197f61250SMichal Kalderon int rc;
75297f61250SMichal Kalderon
75397f61250SMichal Kalderon /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
75497f61250SMichal Kalderon if (requires_db_rec == 0 || !uctx->db_rec)
75597f61250SMichal Kalderon return 0;
75697f61250SMichal Kalderon
75797f61250SMichal Kalderon /* Allocate a page for doorbell recovery, add to mmap */
75897f61250SMichal Kalderon q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
75997f61250SMichal Kalderon if (!q->db_rec_data) {
76097f61250SMichal Kalderon DP_ERR(dev, "get_zeroed_page failed\n");
76197f61250SMichal Kalderon return -ENOMEM;
76297f61250SMichal Kalderon }
76397f61250SMichal Kalderon
76497f61250SMichal Kalderon entry = kzalloc(sizeof(*entry), GFP_KERNEL);
76597f61250SMichal Kalderon if (!entry)
76697f61250SMichal Kalderon goto err_free_db_data;
76797f61250SMichal Kalderon
76897f61250SMichal Kalderon entry->address = q->db_rec_data;
76997f61250SMichal Kalderon entry->length = PAGE_SIZE;
77097f61250SMichal Kalderon entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
77197f61250SMichal Kalderon rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
77297f61250SMichal Kalderon &entry->rdma_entry,
77397f61250SMichal Kalderon PAGE_SIZE);
77497f61250SMichal Kalderon if (rc)
77597f61250SMichal Kalderon goto err_free_entry;
77697f61250SMichal Kalderon
77797f61250SMichal Kalderon q->db_mmap_entry = &entry->rdma_entry;
77897f61250SMichal Kalderon
77997f61250SMichal Kalderon return 0;
78097f61250SMichal Kalderon
78197f61250SMichal Kalderon err_free_entry:
78297f61250SMichal Kalderon kfree(entry);
78397f61250SMichal Kalderon
78497f61250SMichal Kalderon err_free_db_data:
78597f61250SMichal Kalderon free_page((unsigned long)q->db_rec_data);
78697f61250SMichal Kalderon q->db_rec_data = NULL;
78797f61250SMichal Kalderon return -ENOMEM;
78897f61250SMichal Kalderon }
78997f61250SMichal Kalderon
qedr_init_user_queue(struct ib_udata * udata,struct qedr_dev * dev,struct qedr_userq * q,u64 buf_addr,size_t buf_len,bool requires_db_rec,int access,int alloc_and_init)790b0ea0fa5SJason Gunthorpe static inline int qedr_init_user_queue(struct ib_udata *udata,
791a7efd777SRam Amrani struct qedr_dev *dev,
792b0ea0fa5SJason Gunthorpe struct qedr_userq *q, u64 buf_addr,
79397f61250SMichal Kalderon size_t buf_len, bool requires_db_rec,
79472b894b0SChristoph Hellwig int access,
79569ad0e7fSKalderon, Michal int alloc_and_init)
796a7efd777SRam Amrani {
797e57bb6beSRam Amrani u32 fw_pages;
798a7efd777SRam Amrani int rc;
799a7efd777SRam Amrani
800a7efd777SRam Amrani q->buf_addr = buf_addr;
801a7efd777SRam Amrani q->buf_len = buf_len;
802c320e527SMoni Shoua q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
803a7efd777SRam Amrani if (IS_ERR(q->umem)) {
804a7efd777SRam Amrani DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
805a7efd777SRam Amrani PTR_ERR(q->umem));
806a7efd777SRam Amrani return PTR_ERR(q->umem);
807a7efd777SRam Amrani }
808a7efd777SRam Amrani
809901bca71SJason Gunthorpe fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
810e57bb6beSRam Amrani rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
811a7efd777SRam Amrani if (rc)
812a7efd777SRam Amrani goto err0;
813a7efd777SRam Amrani
81469ad0e7fSKalderon, Michal if (alloc_and_init) {
815a7efd777SRam Amrani q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
8164cd33aafSChristophe Jaillet if (IS_ERR(q->pbl_tbl)) {
8174cd33aafSChristophe Jaillet rc = PTR_ERR(q->pbl_tbl);
818a7efd777SRam Amrani goto err0;
8194cd33aafSChristophe Jaillet }
820e57bb6beSRam Amrani qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
821e57bb6beSRam Amrani FW_PAGE_SHIFT);
82269ad0e7fSKalderon, Michal } else {
82369ad0e7fSKalderon, Michal q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
82489fd2576SDan Carpenter if (!q->pbl_tbl) {
82589fd2576SDan Carpenter rc = -ENOMEM;
82669ad0e7fSKalderon, Michal goto err0;
82769ad0e7fSKalderon, Michal }
82889fd2576SDan Carpenter }
829a7efd777SRam Amrani
83097f61250SMichal Kalderon /* mmap the user address used to store doorbell data for recovery */
83197f61250SMichal Kalderon return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
832a7efd777SRam Amrani
833a7efd777SRam Amrani err0:
834a7efd777SRam Amrani ib_umem_release(q->umem);
83569ad0e7fSKalderon, Michal q->umem = NULL;
836a7efd777SRam Amrani
837a7efd777SRam Amrani return rc;
838a7efd777SRam Amrani }
839a7efd777SRam Amrani
qedr_init_cq_params(struct qedr_cq * cq,struct qedr_ucontext * ctx,struct qedr_dev * dev,int vector,int chain_entries,int page_cnt,u64 pbl_ptr,struct qed_rdma_create_cq_in_params * params)840a7efd777SRam Amrani static inline void qedr_init_cq_params(struct qedr_cq *cq,
841a7efd777SRam Amrani struct qedr_ucontext *ctx,
842a7efd777SRam Amrani struct qedr_dev *dev, int vector,
843a7efd777SRam Amrani int chain_entries, int page_cnt,
844a7efd777SRam Amrani u64 pbl_ptr,
845a7efd777SRam Amrani struct qed_rdma_create_cq_in_params
846a7efd777SRam Amrani *params)
847a7efd777SRam Amrani {
848a7efd777SRam Amrani memset(params, 0, sizeof(*params));
849a7efd777SRam Amrani params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
850a7efd777SRam Amrani params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
851a7efd777SRam Amrani params->cnq_id = vector;
852a7efd777SRam Amrani params->cq_size = chain_entries - 1;
853a7efd777SRam Amrani params->dpi = (ctx) ? ctx->dpi : dev->dpi;
854a7efd777SRam Amrani params->pbl_num_pages = page_cnt;
855a7efd777SRam Amrani params->pbl_ptr = pbl_ptr;
856a7efd777SRam Amrani params->pbl_two_level = 0;
857a7efd777SRam Amrani }
858a7efd777SRam Amrani
doorbell_cq(struct qedr_cq * cq,u32 cons,u8 flags)859a7efd777SRam Amrani static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
860a7efd777SRam Amrani {
861a7efd777SRam Amrani cq->db.data.agg_flags = flags;
862a7efd777SRam Amrani cq->db.data.value = cpu_to_le32(cons);
863a7efd777SRam Amrani writeq(cq->db.raw, cq->db_addr);
864a7efd777SRam Amrani }
865a7efd777SRam Amrani
qedr_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)866a7efd777SRam Amrani int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
867a7efd777SRam Amrani {
868a7efd777SRam Amrani struct qedr_cq *cq = get_qedr_cq(ibcq);
869a7efd777SRam Amrani unsigned long sflags;
8704dd72636SAmrani, Ram struct qedr_dev *dev;
8714dd72636SAmrani, Ram
8724dd72636SAmrani, Ram dev = get_qedr_dev(ibcq->device);
8734dd72636SAmrani, Ram
8744dd72636SAmrani, Ram if (cq->destroyed) {
8754dd72636SAmrani, Ram DP_ERR(dev,
8764dd72636SAmrani, Ram "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
8774dd72636SAmrani, Ram cq, cq->icid);
8784dd72636SAmrani, Ram return -EINVAL;
8794dd72636SAmrani, Ram }
8804dd72636SAmrani, Ram
881a7efd777SRam Amrani
882a7efd777SRam Amrani if (cq->cq_type == QEDR_CQ_TYPE_GSI)
883a7efd777SRam Amrani return 0;
884a7efd777SRam Amrani
885a7efd777SRam Amrani spin_lock_irqsave(&cq->cq_lock, sflags);
886a7efd777SRam Amrani
887a7efd777SRam Amrani cq->arm_flags = 0;
888a7efd777SRam Amrani
889a7efd777SRam Amrani if (flags & IB_CQ_SOLICITED)
890a7efd777SRam Amrani cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
891a7efd777SRam Amrani
892a7efd777SRam Amrani if (flags & IB_CQ_NEXT_COMP)
893a7efd777SRam Amrani cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
894a7efd777SRam Amrani
895a7efd777SRam Amrani doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
896a7efd777SRam Amrani
897a7efd777SRam Amrani spin_unlock_irqrestore(&cq->cq_lock, sflags);
898a7efd777SRam Amrani
899a7efd777SRam Amrani return 0;
900a7efd777SRam Amrani }
901a7efd777SRam Amrani
qedr_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)902e39afe3dSLeon Romanovsky int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
903ff23dfa1SShamir Rabinovitch struct ib_udata *udata)
904a7efd777SRam Amrani {
905e39afe3dSLeon Romanovsky struct ib_device *ibdev = ibcq->device;
906ff23dfa1SShamir Rabinovitch struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
907ff23dfa1SShamir Rabinovitch udata, struct qedr_ucontext, ibucontext);
908a7efd777SRam Amrani struct qed_rdma_destroy_cq_out_params destroy_oparams;
909a7efd777SRam Amrani struct qed_rdma_destroy_cq_in_params destroy_iparams;
910b6db3f71SAlexander Lobakin struct qed_chain_init_params chain_params = {
911b6db3f71SAlexander Lobakin .mode = QED_CHAIN_MODE_PBL,
912b6db3f71SAlexander Lobakin .intended_use = QED_CHAIN_USE_TO_CONSUME,
913b6db3f71SAlexander Lobakin .cnt_type = QED_CHAIN_CNT_TYPE_U32,
914b6db3f71SAlexander Lobakin .elem_size = sizeof(union rdma_cqe),
915b6db3f71SAlexander Lobakin };
916a7efd777SRam Amrani struct qedr_dev *dev = get_qedr_dev(ibdev);
917a7efd777SRam Amrani struct qed_rdma_create_cq_in_params params;
918e39afe3dSLeon Romanovsky struct qedr_create_cq_ureq ureq = {};
919a7efd777SRam Amrani int vector = attr->comp_vector;
920a7efd777SRam Amrani int entries = attr->cqe;
921e39afe3dSLeon Romanovsky struct qedr_cq *cq = get_qedr_cq(ibcq);
922a7efd777SRam Amrani int chain_entries;
92397f61250SMichal Kalderon u32 db_offset;
924a7efd777SRam Amrani int page_cnt;
925a7efd777SRam Amrani u64 pbl_ptr;
926a7efd777SRam Amrani u16 icid;
927a7efd777SRam Amrani int rc;
928a7efd777SRam Amrani
929a7efd777SRam Amrani DP_DEBUG(dev, QEDR_MSG_INIT,
930a7efd777SRam Amrani "create_cq: called from %s. entries=%d, vector=%d\n",
931a7efd777SRam Amrani udata ? "User Lib" : "Kernel", entries, vector);
932a7efd777SRam Amrani
9331c407cb5SJason Gunthorpe if (attr->flags)
9341c407cb5SJason Gunthorpe return -EOPNOTSUPP;
9351c407cb5SJason Gunthorpe
936a7efd777SRam Amrani if (entries > QEDR_MAX_CQES) {
937a7efd777SRam Amrani DP_ERR(dev,
938a7efd777SRam Amrani "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
939a7efd777SRam Amrani entries, QEDR_MAX_CQES);
940e39afe3dSLeon Romanovsky return -EINVAL;
941a7efd777SRam Amrani }
942a7efd777SRam Amrani
943a7efd777SRam Amrani chain_entries = qedr_align_cq_entries(entries);
944a7efd777SRam Amrani chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
945b6db3f71SAlexander Lobakin chain_params.num_elems = chain_entries;
946a7efd777SRam Amrani
94797f61250SMichal Kalderon /* calc db offset. user will add DPI base, kernel will add db addr */
94897f61250SMichal Kalderon db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
94997f61250SMichal Kalderon
950a7efd777SRam Amrani if (udata) {
95197f61250SMichal Kalderon if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
95297f61250SMichal Kalderon udata->inlen))) {
953a7efd777SRam Amrani DP_ERR(dev,
954a7efd777SRam Amrani "create cq: problem copying data from user space\n");
955a7efd777SRam Amrani goto err0;
956a7efd777SRam Amrani }
957a7efd777SRam Amrani
958a7efd777SRam Amrani if (!ureq.len) {
959a7efd777SRam Amrani DP_ERR(dev,
960a7efd777SRam Amrani "create cq: cannot create a cq with 0 entries\n");
961a7efd777SRam Amrani goto err0;
962a7efd777SRam Amrani }
963a7efd777SRam Amrani
964a7efd777SRam Amrani cq->cq_type = QEDR_CQ_TYPE_USER;
965a7efd777SRam Amrani
966b0ea0fa5SJason Gunthorpe rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
96772b894b0SChristoph Hellwig ureq.len, true, IB_ACCESS_LOCAL_WRITE,
96872b894b0SChristoph Hellwig 1);
969a7efd777SRam Amrani if (rc)
970a7efd777SRam Amrani goto err0;
971a7efd777SRam Amrani
972a7efd777SRam Amrani pbl_ptr = cq->q.pbl_tbl->pa;
973a7efd777SRam Amrani page_cnt = cq->q.pbl_info.num_pbes;
974c7eb3bceSAmrani, Ram
975c7eb3bceSAmrani, Ram cq->ibcq.cqe = chain_entries;
97697f61250SMichal Kalderon cq->q.db_addr = ctx->dpi_addr + db_offset;
977a7efd777SRam Amrani } else {
978a7efd777SRam Amrani cq->cq_type = QEDR_CQ_TYPE_KERNEL;
979a7efd777SRam Amrani
980b6db3f71SAlexander Lobakin rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
981b6db3f71SAlexander Lobakin &chain_params);
982a7efd777SRam Amrani if (rc)
98397f61250SMichal Kalderon goto err0;
984a7efd777SRam Amrani
985a7efd777SRam Amrani page_cnt = qed_chain_get_page_cnt(&cq->pbl);
986a7efd777SRam Amrani pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
987c7eb3bceSAmrani, Ram cq->ibcq.cqe = cq->pbl.capacity;
988a7efd777SRam Amrani }
989a7efd777SRam Amrani
990a7efd777SRam Amrani qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
991a7efd777SRam Amrani pbl_ptr, ¶ms);
992a7efd777SRam Amrani
993a7efd777SRam Amrani rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid);
994a7efd777SRam Amrani if (rc)
99597f61250SMichal Kalderon goto err1;
996a7efd777SRam Amrani
997a7efd777SRam Amrani cq->icid = icid;
998a7efd777SRam Amrani cq->sig = QEDR_CQ_MAGIC_NUMBER;
999a7efd777SRam Amrani spin_lock_init(&cq->cq_lock);
1000a7efd777SRam Amrani
1001ff23dfa1SShamir Rabinovitch if (udata) {
100297f61250SMichal Kalderon rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
1003a7efd777SRam Amrani if (rc)
100497f61250SMichal Kalderon goto err2;
100597f61250SMichal Kalderon
100697f61250SMichal Kalderon rc = qedr_db_recovery_add(dev, cq->q.db_addr,
100797f61250SMichal Kalderon &cq->q.db_rec_data->db_data,
100897f61250SMichal Kalderon DB_REC_WIDTH_64B,
100997f61250SMichal Kalderon DB_REC_USER);
101097f61250SMichal Kalderon if (rc)
101197f61250SMichal Kalderon goto err2;
101297f61250SMichal Kalderon
1013a7efd777SRam Amrani } else {
1014a7efd777SRam Amrani /* Generate doorbell address. */
1015a7efd777SRam Amrani cq->db.data.icid = cq->icid;
101697f61250SMichal Kalderon cq->db_addr = dev->db_addr + db_offset;
10170b1eddc1SMichal Kalderon cq->db.data.params = DB_AGG_CMD_MAX <<
1018a7efd777SRam Amrani RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1019a7efd777SRam Amrani
1020a7efd777SRam Amrani /* point to the very last element, passing it we will toggle */
1021a7efd777SRam Amrani cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1022a7efd777SRam Amrani cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1023a7efd777SRam Amrani cq->latest_cqe = NULL;
1024a7efd777SRam Amrani consume_cqe(cq);
1025a7efd777SRam Amrani cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
102697f61250SMichal Kalderon
102797f61250SMichal Kalderon rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
102897f61250SMichal Kalderon DB_REC_WIDTH_64B, DB_REC_KERNEL);
102997f61250SMichal Kalderon if (rc)
103097f61250SMichal Kalderon goto err2;
1031a7efd777SRam Amrani }
1032a7efd777SRam Amrani
1033a7efd777SRam Amrani DP_DEBUG(dev, QEDR_MSG_CQ,
1034a7efd777SRam Amrani "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1035a7efd777SRam Amrani cq->icid, cq, params.cq_size);
1036a7efd777SRam Amrani
1037e39afe3dSLeon Romanovsky return 0;
1038a7efd777SRam Amrani
103997f61250SMichal Kalderon err2:
1040a7efd777SRam Amrani destroy_iparams.icid = cq->icid;
1041a7efd777SRam Amrani dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1042a7efd777SRam Amrani &destroy_oparams);
1043a7efd777SRam Amrani err1:
104497f61250SMichal Kalderon if (udata) {
104597f61250SMichal Kalderon qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1046a7efd777SRam Amrani ib_umem_release(cq->q.umem);
1047a25984f3SMichal Kalderon if (cq->q.db_mmap_entry)
104897f61250SMichal Kalderon rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
104997f61250SMichal Kalderon } else {
105097f61250SMichal Kalderon dev->ops->common->chain_free(dev->cdev, &cq->pbl);
105197f61250SMichal Kalderon }
1052a7efd777SRam Amrani err0:
1053e39afe3dSLeon Romanovsky return -EINVAL;
1054a7efd777SRam Amrani }
1055a7efd777SRam Amrani
10564dd72636SAmrani, Ram #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
10574dd72636SAmrani, Ram #define QEDR_DESTROY_CQ_ITER_DURATION (10)
10584dd72636SAmrani, Ram
qedr_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)105943d781b9SLeon Romanovsky int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1060a7efd777SRam Amrani {
1061a7efd777SRam Amrani struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1062a7efd777SRam Amrani struct qed_rdma_destroy_cq_out_params oparams;
1063a7efd777SRam Amrani struct qed_rdma_destroy_cq_in_params iparams;
1064a7efd777SRam Amrani struct qedr_cq *cq = get_qedr_cq(ibcq);
10654dd72636SAmrani, Ram int iter;
1066a1211359SAmrani, Ram
1067942b3b2cSAmrani, Ram DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1068942b3b2cSAmrani, Ram
10694dd72636SAmrani, Ram cq->destroyed = 1;
10704dd72636SAmrani, Ram
1071942b3b2cSAmrani, Ram /* GSIs CQs are handled by driver, so they don't exist in the FW */
107297f61250SMichal Kalderon if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
107397f61250SMichal Kalderon qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
107443d781b9SLeon Romanovsky return 0;
107597f61250SMichal Kalderon }
1076942b3b2cSAmrani, Ram
1077a7efd777SRam Amrani iparams.icid = cq->icid;
1078a52c8e24SLeon Romanovsky dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1079a7efd777SRam Amrani dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1080a7efd777SRam Amrani
1081bdeacabdSShamir Rabinovitch if (udata) {
1082a7efd777SRam Amrani qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1083a7efd777SRam Amrani ib_umem_release(cq->q.umem);
108497f61250SMichal Kalderon
108597f61250SMichal Kalderon if (cq->q.db_rec_data) {
108697f61250SMichal Kalderon qedr_db_recovery_del(dev, cq->q.db_addr,
108797f61250SMichal Kalderon &cq->q.db_rec_data->db_data);
108897f61250SMichal Kalderon rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
108997f61250SMichal Kalderon }
109097f61250SMichal Kalderon } else {
109197f61250SMichal Kalderon qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1092a7efd777SRam Amrani }
1093a7efd777SRam Amrani
10944dd72636SAmrani, Ram /* We don't want the IRQ handler to handle a non-existing CQ so we
10954dd72636SAmrani, Ram * wait until all CNQ interrupts, if any, are received. This will always
10964dd72636SAmrani, Ram * happen and will always happen very fast. If not, then a serious error
10974dd72636SAmrani, Ram * has occured. That is why we can use a long delay.
10984dd72636SAmrani, Ram * We spin for a short time so we don’t lose time on context switching
10994dd72636SAmrani, Ram * in case all the completions are handled in that span. Otherwise
11004dd72636SAmrani, Ram * we sleep for a while and check again. Since the CNQ may be
11014dd72636SAmrani, Ram * associated with (only) the current CPU we use msleep to allow the
11024dd72636SAmrani, Ram * current CPU to be freed.
11034dd72636SAmrani, Ram * The CNQ notification is increased in qedr_irq_handler().
11044dd72636SAmrani, Ram */
11054dd72636SAmrani, Ram iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
11064dd72636SAmrani, Ram while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
11074dd72636SAmrani, Ram udelay(QEDR_DESTROY_CQ_ITER_DURATION);
11084dd72636SAmrani, Ram iter--;
11094dd72636SAmrani, Ram }
11104dd72636SAmrani, Ram
11114dd72636SAmrani, Ram iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
11124dd72636SAmrani, Ram while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
11134dd72636SAmrani, Ram msleep(QEDR_DESTROY_CQ_ITER_DURATION);
11144dd72636SAmrani, Ram iter--;
11154dd72636SAmrani, Ram }
11164dd72636SAmrani, Ram
11174dd72636SAmrani, Ram /* Note that we don't need to have explicit code to wait for the
11184dd72636SAmrani, Ram * completion of the event handler because it is invoked from the EQ.
11194dd72636SAmrani, Ram * Since the destroy CQ ramrod has also been received on the EQ we can
11204dd72636SAmrani, Ram * be certain that there's no event handler in process.
11214dd72636SAmrani, Ram */
112243d781b9SLeon Romanovsky return 0;
1123a7efd777SRam Amrani }
1124cecbcddfSRam Amrani
get_gid_info_from_table(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct qed_rdma_modify_qp_in_params * qp_params)1125cecbcddfSRam Amrani static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1126cecbcddfSRam Amrani struct ib_qp_attr *attr,
1127cecbcddfSRam Amrani int attr_mask,
1128cecbcddfSRam Amrani struct qed_rdma_modify_qp_in_params
1129cecbcddfSRam Amrani *qp_params)
1130cecbcddfSRam Amrani {
113147ec3866SParav Pandit const struct ib_gid_attr *gid_attr;
1132cecbcddfSRam Amrani enum rdma_network_type nw_type;
1133d8966fcdSDasaratharaman Chandramouli const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1134cecbcddfSRam Amrani u32 ipv4_addr;
1135a70c0739SParav Pandit int ret;
1136cecbcddfSRam Amrani int i;
1137cecbcddfSRam Amrani
113847ec3866SParav Pandit gid_attr = grh->sgid_attr;
1139a70c0739SParav Pandit ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1140a70c0739SParav Pandit if (ret)
1141a70c0739SParav Pandit return ret;
1142cecbcddfSRam Amrani
114347ec3866SParav Pandit nw_type = rdma_gid_attr_network_type(gid_attr);
1144cecbcddfSRam Amrani switch (nw_type) {
1145cecbcddfSRam Amrani case RDMA_NETWORK_IPV6:
114647ec3866SParav Pandit memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1147cecbcddfSRam Amrani sizeof(qp_params->sgid));
1148cecbcddfSRam Amrani memcpy(&qp_params->dgid.bytes[0],
1149d8966fcdSDasaratharaman Chandramouli &grh->dgid,
1150cecbcddfSRam Amrani sizeof(qp_params->dgid));
1151cecbcddfSRam Amrani qp_params->roce_mode = ROCE_V2_IPV6;
1152cecbcddfSRam Amrani SET_FIELD(qp_params->modify_flags,
1153cecbcddfSRam Amrani QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1154cecbcddfSRam Amrani break;
11551c15b4f2SAvihai Horon case RDMA_NETWORK_ROCE_V1:
115647ec3866SParav Pandit memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1157cecbcddfSRam Amrani sizeof(qp_params->sgid));
1158cecbcddfSRam Amrani memcpy(&qp_params->dgid.bytes[0],
1159d8966fcdSDasaratharaman Chandramouli &grh->dgid,
1160cecbcddfSRam Amrani sizeof(qp_params->dgid));
1161cecbcddfSRam Amrani qp_params->roce_mode = ROCE_V1;
1162cecbcddfSRam Amrani break;
1163cecbcddfSRam Amrani case RDMA_NETWORK_IPV4:
1164cecbcddfSRam Amrani memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1165cecbcddfSRam Amrani memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
116647ec3866SParav Pandit ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1167cecbcddfSRam Amrani qp_params->sgid.ipv4_addr = ipv4_addr;
1168cecbcddfSRam Amrani ipv4_addr =
1169d8966fcdSDasaratharaman Chandramouli qedr_get_ipv4_from_gid(grh->dgid.raw);
1170cecbcddfSRam Amrani qp_params->dgid.ipv4_addr = ipv4_addr;
1171cecbcddfSRam Amrani SET_FIELD(qp_params->modify_flags,
1172cecbcddfSRam Amrani QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1173cecbcddfSRam Amrani qp_params->roce_mode = ROCE_V2_IPV4;
1174cecbcddfSRam Amrani break;
11751c15b4f2SAvihai Horon default:
11761c15b4f2SAvihai Horon return -EINVAL;
1177cecbcddfSRam Amrani }
1178cecbcddfSRam Amrani
1179cecbcddfSRam Amrani for (i = 0; i < 4; i++) {
1180cecbcddfSRam Amrani qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1181cecbcddfSRam Amrani qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1182cecbcddfSRam Amrani }
1183cecbcddfSRam Amrani
1184cecbcddfSRam Amrani if (qp_params->vlan_id >= VLAN_CFI_MASK)
1185cecbcddfSRam Amrani qp_params->vlan_id = 0;
1186cecbcddfSRam Amrani
1187cecbcddfSRam Amrani return 0;
1188cecbcddfSRam Amrani }
1189cecbcddfSRam Amrani
qedr_check_qp_attrs(struct ib_pd * ibpd,struct qedr_dev * dev,struct ib_qp_init_attr * attrs,struct ib_udata * udata)1190cecbcddfSRam Amrani static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1191e00b64f7SShamir Rabinovitch struct ib_qp_init_attr *attrs,
1192e00b64f7SShamir Rabinovitch struct ib_udata *udata)
1193cecbcddfSRam Amrani {
1194cecbcddfSRam Amrani struct qedr_device_attr *qattr = &dev->attr;
1195cecbcddfSRam Amrani
1196cecbcddfSRam Amrani /* QP0... attrs->qp_type == IB_QPT_GSI */
119706e8d1dfSYuval Basson if (attrs->qp_type != IB_QPT_RC &&
119806e8d1dfSYuval Basson attrs->qp_type != IB_QPT_GSI &&
119906e8d1dfSYuval Basson attrs->qp_type != IB_QPT_XRC_INI &&
120006e8d1dfSYuval Basson attrs->qp_type != IB_QPT_XRC_TGT) {
1201cecbcddfSRam Amrani DP_DEBUG(dev, QEDR_MSG_QP,
1202cecbcddfSRam Amrani "create qp: unsupported qp type=0x%x requested\n",
1203cecbcddfSRam Amrani attrs->qp_type);
1204bb8865f4SKamal Heib return -EOPNOTSUPP;
1205cecbcddfSRam Amrani }
1206cecbcddfSRam Amrani
1207cecbcddfSRam Amrani if (attrs->cap.max_send_wr > qattr->max_sqe) {
1208cecbcddfSRam Amrani DP_ERR(dev,
1209cecbcddfSRam Amrani "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1210cecbcddfSRam Amrani attrs->cap.max_send_wr, qattr->max_sqe);
1211cecbcddfSRam Amrani return -EINVAL;
1212cecbcddfSRam Amrani }
1213cecbcddfSRam Amrani
1214cecbcddfSRam Amrani if (attrs->cap.max_inline_data > qattr->max_inline) {
1215cecbcddfSRam Amrani DP_ERR(dev,
1216cecbcddfSRam Amrani "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1217cecbcddfSRam Amrani attrs->cap.max_inline_data, qattr->max_inline);
1218cecbcddfSRam Amrani return -EINVAL;
1219cecbcddfSRam Amrani }
1220cecbcddfSRam Amrani
1221cecbcddfSRam Amrani if (attrs->cap.max_send_sge > qattr->max_sge) {
1222cecbcddfSRam Amrani DP_ERR(dev,
1223cecbcddfSRam Amrani "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1224cecbcddfSRam Amrani attrs->cap.max_send_sge, qattr->max_sge);
1225cecbcddfSRam Amrani return -EINVAL;
1226cecbcddfSRam Amrani }
1227cecbcddfSRam Amrani
1228cecbcddfSRam Amrani if (attrs->cap.max_recv_sge > qattr->max_sge) {
1229cecbcddfSRam Amrani DP_ERR(dev,
1230cecbcddfSRam Amrani "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1231cecbcddfSRam Amrani attrs->cap.max_recv_sge, qattr->max_sge);
1232cecbcddfSRam Amrani return -EINVAL;
1233cecbcddfSRam Amrani }
1234cecbcddfSRam Amrani
123506e8d1dfSYuval Basson /* verify consumer QPs are not trying to use GSI QP's CQ.
123606e8d1dfSYuval Basson * TGT QP isn't associated with RQ/SQ
123706e8d1dfSYuval Basson */
123806e8d1dfSYuval Basson if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
1239e1ad897bSKamal Heib (attrs->qp_type != IB_QPT_XRC_TGT) &&
1240e1ad897bSKamal Heib (attrs->qp_type != IB_QPT_XRC_INI)) {
124106e8d1dfSYuval Basson struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
124206e8d1dfSYuval Basson struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
124306e8d1dfSYuval Basson
124406e8d1dfSYuval Basson if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
124506e8d1dfSYuval Basson (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
124606e8d1dfSYuval Basson DP_ERR(dev,
124706e8d1dfSYuval Basson "create qp: consumer QP cannot use GSI CQs.\n");
124806e8d1dfSYuval Basson return -EINVAL;
124906e8d1dfSYuval Basson }
125006e8d1dfSYuval Basson }
125106e8d1dfSYuval Basson
1252cecbcddfSRam Amrani return 0;
1253cecbcddfSRam Amrani }
1254cecbcddfSRam Amrani
qedr_copy_srq_uresp(struct qedr_dev * dev,struct qedr_srq * srq,struct ib_udata * udata)125540b173ddSYuval Bason static int qedr_copy_srq_uresp(struct qedr_dev *dev,
125640b173ddSYuval Bason struct qedr_srq *srq, struct ib_udata *udata)
125740b173ddSYuval Bason {
125840b173ddSYuval Bason struct qedr_create_srq_uresp uresp = {};
125940b173ddSYuval Bason int rc;
126040b173ddSYuval Bason
126140b173ddSYuval Bason uresp.srq_id = srq->srq_id;
126240b173ddSYuval Bason
126340b173ddSYuval Bason rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
126440b173ddSYuval Bason if (rc)
126540b173ddSYuval Bason DP_ERR(dev, "create srq: problem copying data to user space\n");
126640b173ddSYuval Bason
126740b173ddSYuval Bason return rc;
126840b173ddSYuval Bason }
126940b173ddSYuval Bason
qedr_copy_rq_uresp(struct qedr_dev * dev,struct qedr_create_qp_uresp * uresp,struct qedr_qp * qp)127069ad0e7fSKalderon, Michal static void qedr_copy_rq_uresp(struct qedr_dev *dev,
127169ad0e7fSKalderon, Michal struct qedr_create_qp_uresp *uresp,
1272cecbcddfSRam Amrani struct qedr_qp *qp)
1273cecbcddfSRam Amrani {
127469ad0e7fSKalderon, Michal /* iWARP requires two doorbells per RQ. */
127569ad0e7fSKalderon, Michal if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
127669ad0e7fSKalderon, Michal uresp->rq_db_offset =
127769ad0e7fSKalderon, Michal DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
127869ad0e7fSKalderon, Michal uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
127969ad0e7fSKalderon, Michal } else {
128069ad0e7fSKalderon, Michal uresp->rq_db_offset =
128169ad0e7fSKalderon, Michal DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
128269ad0e7fSKalderon, Michal }
128369ad0e7fSKalderon, Michal
1284cecbcddfSRam Amrani uresp->rq_icid = qp->icid;
1285a25984f3SMichal Kalderon if (qp->urq.db_mmap_entry)
1286a25984f3SMichal Kalderon uresp->rq_db_rec_addr =
1287a25984f3SMichal Kalderon rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
1288cecbcddfSRam Amrani }
1289cecbcddfSRam Amrani
qedr_copy_sq_uresp(struct qedr_dev * dev,struct qedr_create_qp_uresp * uresp,struct qedr_qp * qp)129069ad0e7fSKalderon, Michal static void qedr_copy_sq_uresp(struct qedr_dev *dev,
129169ad0e7fSKalderon, Michal struct qedr_create_qp_uresp *uresp,
1292cecbcddfSRam Amrani struct qedr_qp *qp)
1293cecbcddfSRam Amrani {
1294cecbcddfSRam Amrani uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
129569ad0e7fSKalderon, Michal
129669ad0e7fSKalderon, Michal /* iWARP uses the same cid for rq and sq */
129769ad0e7fSKalderon, Michal if (rdma_protocol_iwarp(&dev->ibdev, 1))
129869ad0e7fSKalderon, Michal uresp->sq_icid = qp->icid;
129969ad0e7fSKalderon, Michal else
1300cecbcddfSRam Amrani uresp->sq_icid = qp->icid + 1;
130197f61250SMichal Kalderon
1302a25984f3SMichal Kalderon if (qp->usq.db_mmap_entry)
130397f61250SMichal Kalderon uresp->sq_db_rec_addr =
130497f61250SMichal Kalderon rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
1305cecbcddfSRam Amrani }
1306cecbcddfSRam Amrani
qedr_copy_qp_uresp(struct qedr_dev * dev,struct qedr_qp * qp,struct ib_udata * udata,struct qedr_create_qp_uresp * uresp)1307cecbcddfSRam Amrani static int qedr_copy_qp_uresp(struct qedr_dev *dev,
130897f61250SMichal Kalderon struct qedr_qp *qp, struct ib_udata *udata,
130997f61250SMichal Kalderon struct qedr_create_qp_uresp *uresp)
1310cecbcddfSRam Amrani {
1311cecbcddfSRam Amrani int rc;
1312cecbcddfSRam Amrani
131397f61250SMichal Kalderon memset(uresp, 0, sizeof(*uresp));
131406e8d1dfSYuval Basson
131506e8d1dfSYuval Basson if (qedr_qp_has_sq(qp))
131697f61250SMichal Kalderon qedr_copy_sq_uresp(dev, uresp, qp);
131706e8d1dfSYuval Basson
131806e8d1dfSYuval Basson if (qedr_qp_has_rq(qp))
131997f61250SMichal Kalderon qedr_copy_rq_uresp(dev, uresp, qp);
1320cecbcddfSRam Amrani
132197f61250SMichal Kalderon uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
132297f61250SMichal Kalderon uresp->qp_id = qp->qp_id;
1323cecbcddfSRam Amrani
132497f61250SMichal Kalderon rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
1325cecbcddfSRam Amrani if (rc)
1326cecbcddfSRam Amrani DP_ERR(dev,
1327cecbcddfSRam Amrani "create qp: failed a copy to user space with qp icid=0x%x.\n",
1328cecbcddfSRam Amrani qp->icid);
1329cecbcddfSRam Amrani
1330cecbcddfSRam Amrani return rc;
1331cecbcddfSRam Amrani }
1332cecbcddfSRam Amrani
qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info * qph)13336ef793cbSPrabhakar Kushwaha static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
13346ef793cbSPrabhakar Kushwaha {
13356ef793cbSPrabhakar Kushwaha qed_chain_reset(&qph->pbl);
13366ef793cbSPrabhakar Kushwaha qph->prod = 0;
13376ef793cbSPrabhakar Kushwaha qph->cons = 0;
13386ef793cbSPrabhakar Kushwaha qph->wqe_cons = 0;
13396ef793cbSPrabhakar Kushwaha qph->db_data.data.value = cpu_to_le16(0);
13406ef793cbSPrabhakar Kushwaha }
13416ef793cbSPrabhakar Kushwaha
qedr_set_common_qp_params(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_pd * pd,struct ib_qp_init_attr * attrs)1342df158561SAmrani, Ram static void qedr_set_common_qp_params(struct qedr_dev *dev,
1343cecbcddfSRam Amrani struct qedr_qp *qp,
1344cecbcddfSRam Amrani struct qedr_pd *pd,
1345cecbcddfSRam Amrani struct ib_qp_init_attr *attrs)
1346cecbcddfSRam Amrani {
1347cecbcddfSRam Amrani spin_lock_init(&qp->q_lock);
134882af6d19SMichal Kalderon if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
134982af6d19SMichal Kalderon kref_init(&qp->refcnt);
135082af6d19SMichal Kalderon init_completion(&qp->iwarp_cm_comp);
135160fab107SPrabhakar Kushwaha init_completion(&qp->qp_rel_comp);
135282af6d19SMichal Kalderon }
135306e8d1dfSYuval Basson
1354df158561SAmrani, Ram qp->pd = pd;
1355cecbcddfSRam Amrani qp->qp_type = attrs->qp_type;
1356cecbcddfSRam Amrani qp->max_inline_data = attrs->cap.max_inline_data;
1357cecbcddfSRam Amrani qp->state = QED_ROCE_QP_STATE_RESET;
13586ef793cbSPrabhakar Kushwaha
13596ef793cbSPrabhakar Kushwaha qp->prev_wqe_size = 0;
13606ef793cbSPrabhakar Kushwaha
1361272bba19SRuan Jinjie qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1362cecbcddfSRam Amrani qp->dev = dev;
136306e8d1dfSYuval Basson if (qedr_qp_has_sq(qp)) {
13646ef793cbSPrabhakar Kushwaha qedr_reset_qp_hwq_info(&qp->sq);
136506e8d1dfSYuval Basson qp->sq.max_sges = attrs->cap.max_send_sge;
136606e8d1dfSYuval Basson qp->sq_cq = get_qedr_cq(attrs->send_cq);
136706e8d1dfSYuval Basson DP_DEBUG(dev, QEDR_MSG_QP,
136806e8d1dfSYuval Basson "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
136906e8d1dfSYuval Basson qp->sq.max_sges, qp->sq_cq->icid);
137006e8d1dfSYuval Basson }
1371cecbcddfSRam Amrani
137206e8d1dfSYuval Basson if (attrs->srq)
13733491c9e7SYuval Bason qp->srq = get_qedr_srq(attrs->srq);
137406e8d1dfSYuval Basson
137506e8d1dfSYuval Basson if (qedr_qp_has_rq(qp)) {
13766ef793cbSPrabhakar Kushwaha qedr_reset_qp_hwq_info(&qp->rq);
13773491c9e7SYuval Bason qp->rq_cq = get_qedr_cq(attrs->recv_cq);
13783491c9e7SYuval Bason qp->rq.max_sges = attrs->cap.max_recv_sge;
1379cecbcddfSRam Amrani DP_DEBUG(dev, QEDR_MSG_QP,
1380df158561SAmrani, Ram "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1381df158561SAmrani, Ram qp->rq.max_sges, qp->rq_cq->icid);
13823491c9e7SYuval Bason }
13833491c9e7SYuval Bason
1384df158561SAmrani, Ram DP_DEBUG(dev, QEDR_MSG_QP,
1385cecbcddfSRam Amrani "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1386cecbcddfSRam Amrani pd->pd_id, qp->qp_type, qp->max_inline_data,
1387cecbcddfSRam Amrani qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1388cecbcddfSRam Amrani DP_DEBUG(dev, QEDR_MSG_QP,
1389cecbcddfSRam Amrani "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1390cecbcddfSRam Amrani qp->sq.max_sges, qp->sq_cq->icid);
1391cecbcddfSRam Amrani }
1392cecbcddfSRam Amrani
qedr_set_roce_db_info(struct qedr_dev * dev,struct qedr_qp * qp)139397f61250SMichal Kalderon static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1394cecbcddfSRam Amrani {
139506e8d1dfSYuval Basson int rc = 0;
139697f61250SMichal Kalderon
139706e8d1dfSYuval Basson if (qedr_qp_has_sq(qp)) {
1398cecbcddfSRam Amrani qp->sq.db = dev->db_addr +
1399cecbcddfSRam Amrani DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1400cecbcddfSRam Amrani qp->sq.db_data.data.icid = qp->icid + 1;
140106e8d1dfSYuval Basson rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
140206e8d1dfSYuval Basson DB_REC_WIDTH_32B, DB_REC_KERNEL);
140397f61250SMichal Kalderon if (rc)
140497f61250SMichal Kalderon return rc;
140506e8d1dfSYuval Basson }
140697f61250SMichal Kalderon
140706e8d1dfSYuval Basson if (qedr_qp_has_rq(qp)) {
1408cecbcddfSRam Amrani qp->rq.db = dev->db_addr +
1409cecbcddfSRam Amrani DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1410cecbcddfSRam Amrani qp->rq.db_data.data.icid = qp->icid;
141106e8d1dfSYuval Basson rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
141206e8d1dfSYuval Basson DB_REC_WIDTH_32B, DB_REC_KERNEL);
141306e8d1dfSYuval Basson if (rc && qedr_qp_has_sq(qp))
141406e8d1dfSYuval Basson qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
1415cecbcddfSRam Amrani }
141697f61250SMichal Kalderon
141797f61250SMichal Kalderon return rc;
14183491c9e7SYuval Bason }
14193491c9e7SYuval Bason
qedr_check_srq_params(struct qedr_dev * dev,struct ib_srq_init_attr * attrs,struct ib_udata * udata)142068e326deSLeon Romanovsky static int qedr_check_srq_params(struct qedr_dev *dev,
14213491c9e7SYuval Bason struct ib_srq_init_attr *attrs,
14223491c9e7SYuval Bason struct ib_udata *udata)
14233491c9e7SYuval Bason {
14243491c9e7SYuval Bason struct qedr_device_attr *qattr = &dev->attr;
14253491c9e7SYuval Bason
14263491c9e7SYuval Bason if (attrs->attr.max_wr > qattr->max_srq_wr) {
14273491c9e7SYuval Bason DP_ERR(dev,
14283491c9e7SYuval Bason "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
14293491c9e7SYuval Bason attrs->attr.max_wr, qattr->max_srq_wr);
14303491c9e7SYuval Bason return -EINVAL;
14313491c9e7SYuval Bason }
14323491c9e7SYuval Bason
14333491c9e7SYuval Bason if (attrs->attr.max_sge > qattr->max_sge) {
14343491c9e7SYuval Bason DP_ERR(dev,
14353491c9e7SYuval Bason "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
14363491c9e7SYuval Bason attrs->attr.max_sge, qattr->max_sge);
143706e8d1dfSYuval Basson }
143806e8d1dfSYuval Basson
143906e8d1dfSYuval Basson if (!udata && attrs->srq_type == IB_SRQT_XRC) {
144006e8d1dfSYuval Basson DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
14413491c9e7SYuval Bason return -EINVAL;
14423491c9e7SYuval Bason }
14433491c9e7SYuval Bason
14443491c9e7SYuval Bason return 0;
14453491c9e7SYuval Bason }
14463491c9e7SYuval Bason
qedr_free_srq_user_params(struct qedr_srq * srq)144740b173ddSYuval Bason static void qedr_free_srq_user_params(struct qedr_srq *srq)
144840b173ddSYuval Bason {
144940b173ddSYuval Bason qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
145040b173ddSYuval Bason ib_umem_release(srq->usrq.umem);
145140b173ddSYuval Bason ib_umem_release(srq->prod_umem);
145240b173ddSYuval Bason }
145340b173ddSYuval Bason
qedr_free_srq_kernel_params(struct qedr_srq * srq)14543491c9e7SYuval Bason static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
14553491c9e7SYuval Bason {
14563491c9e7SYuval Bason struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
14573491c9e7SYuval Bason struct qedr_dev *dev = srq->dev;
14583491c9e7SYuval Bason
14593491c9e7SYuval Bason dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
14603491c9e7SYuval Bason
14613491c9e7SYuval Bason dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
14623491c9e7SYuval Bason hw_srq->virt_prod_pair_addr,
14633491c9e7SYuval Bason hw_srq->phy_prod_pair_addr);
14643491c9e7SYuval Bason }
14653491c9e7SYuval Bason
qedr_init_srq_user_params(struct ib_udata * udata,struct qedr_srq * srq,struct qedr_create_srq_ureq * ureq,int access)1466b0ea0fa5SJason Gunthorpe static int qedr_init_srq_user_params(struct ib_udata *udata,
146740b173ddSYuval Bason struct qedr_srq *srq,
146840b173ddSYuval Bason struct qedr_create_srq_ureq *ureq,
146972b894b0SChristoph Hellwig int access)
147040b173ddSYuval Bason {
147140b173ddSYuval Bason struct scatterlist *sg;
147240b173ddSYuval Bason int rc;
147340b173ddSYuval Bason
1474b0ea0fa5SJason Gunthorpe rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
147572b894b0SChristoph Hellwig ureq->srq_len, false, access, 1);
147640b173ddSYuval Bason if (rc)
147740b173ddSYuval Bason return rc;
147840b173ddSYuval Bason
1479c320e527SMoni Shoua srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
148072b894b0SChristoph Hellwig sizeof(struct rdma_srq_producers), access);
148140b173ddSYuval Bason if (IS_ERR(srq->prod_umem)) {
148240b173ddSYuval Bason qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
148340b173ddSYuval Bason ib_umem_release(srq->usrq.umem);
148440b173ddSYuval Bason DP_ERR(srq->dev,
148540b173ddSYuval Bason "create srq: failed ib_umem_get for producer, got %ld\n",
148640b173ddSYuval Bason PTR_ERR(srq->prod_umem));
148740b173ddSYuval Bason return PTR_ERR(srq->prod_umem);
148840b173ddSYuval Bason }
148940b173ddSYuval Bason
149079fbd3e1SMaor Gottlieb sg = srq->prod_umem->sgt_append.sgt.sgl;
149140b173ddSYuval Bason srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
149240b173ddSYuval Bason
149340b173ddSYuval Bason return 0;
149440b173ddSYuval Bason }
149540b173ddSYuval Bason
qedr_alloc_srq_kernel_params(struct qedr_srq * srq,struct qedr_dev * dev,struct ib_srq_init_attr * init_attr)14963491c9e7SYuval Bason static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
14973491c9e7SYuval Bason struct qedr_dev *dev,
14983491c9e7SYuval Bason struct ib_srq_init_attr *init_attr)
14993491c9e7SYuval Bason {
15003491c9e7SYuval Bason struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1501b6db3f71SAlexander Lobakin struct qed_chain_init_params params = {
1502b6db3f71SAlexander Lobakin .mode = QED_CHAIN_MODE_PBL,
1503b6db3f71SAlexander Lobakin .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1504b6db3f71SAlexander Lobakin .cnt_type = QED_CHAIN_CNT_TYPE_U32,
1505b6db3f71SAlexander Lobakin .elem_size = QEDR_SRQ_WQE_ELEM_SIZE,
1506b6db3f71SAlexander Lobakin };
15073491c9e7SYuval Bason dma_addr_t phy_prod_pair_addr;
15083491c9e7SYuval Bason u32 num_elems;
15093491c9e7SYuval Bason void *va;
15103491c9e7SYuval Bason int rc;
15113491c9e7SYuval Bason
15123491c9e7SYuval Bason va = dma_alloc_coherent(&dev->pdev->dev,
15133491c9e7SYuval Bason sizeof(struct rdma_srq_producers),
15143491c9e7SYuval Bason &phy_prod_pair_addr, GFP_KERNEL);
15153491c9e7SYuval Bason if (!va) {
15163491c9e7SYuval Bason DP_ERR(dev,
15173491c9e7SYuval Bason "create srq: failed to allocate dma memory for producer\n");
15183491c9e7SYuval Bason return -ENOMEM;
15193491c9e7SYuval Bason }
15203491c9e7SYuval Bason
15213491c9e7SYuval Bason hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
15223491c9e7SYuval Bason hw_srq->virt_prod_pair_addr = va;
15233491c9e7SYuval Bason
15243491c9e7SYuval Bason num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1525b6db3f71SAlexander Lobakin params.num_elems = num_elems;
1526b6db3f71SAlexander Lobakin
1527b6db3f71SAlexander Lobakin rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, ¶ms);
15283491c9e7SYuval Bason if (rc)
15293491c9e7SYuval Bason goto err0;
15303491c9e7SYuval Bason
15313491c9e7SYuval Bason hw_srq->num_elems = num_elems;
15323491c9e7SYuval Bason
15333491c9e7SYuval Bason return 0;
15343491c9e7SYuval Bason
15353491c9e7SYuval Bason err0:
15363491c9e7SYuval Bason dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
15373491c9e7SYuval Bason va, phy_prod_pair_addr);
15383491c9e7SYuval Bason return rc;
15393491c9e7SYuval Bason }
15403491c9e7SYuval Bason
qedr_create_srq(struct ib_srq * ibsrq,struct ib_srq_init_attr * init_attr,struct ib_udata * udata)154168e326deSLeon Romanovsky int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
15423491c9e7SYuval Bason struct ib_udata *udata)
15433491c9e7SYuval Bason {
15443491c9e7SYuval Bason struct qed_rdma_destroy_srq_in_params destroy_in_params;
15453491c9e7SYuval Bason struct qed_rdma_create_srq_in_params in_params = {};
154668e326deSLeon Romanovsky struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
15473491c9e7SYuval Bason struct qed_rdma_create_srq_out_params out_params;
154868e326deSLeon Romanovsky struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
154940b173ddSYuval Bason struct qedr_create_srq_ureq ureq = {};
15503491c9e7SYuval Bason u64 pbl_base_addr, phy_prod_pair_addr;
15513491c9e7SYuval Bason struct qedr_srq_hwq_info *hw_srq;
15523491c9e7SYuval Bason u32 page_cnt, page_size;
155368e326deSLeon Romanovsky struct qedr_srq *srq = get_qedr_srq(ibsrq);
15543491c9e7SYuval Bason int rc = 0;
15553491c9e7SYuval Bason
15563491c9e7SYuval Bason DP_DEBUG(dev, QEDR_MSG_QP,
15573491c9e7SYuval Bason "create SRQ called from %s (pd %p)\n",
15583491c9e7SYuval Bason (udata) ? "User lib" : "kernel", pd);
15593491c9e7SYuval Bason
1560652caba5SJason Gunthorpe if (init_attr->srq_type != IB_SRQT_BASIC &&
1561652caba5SJason Gunthorpe init_attr->srq_type != IB_SRQT_XRC)
1562652caba5SJason Gunthorpe return -EOPNOTSUPP;
1563652caba5SJason Gunthorpe
156468e326deSLeon Romanovsky rc = qedr_check_srq_params(dev, init_attr, udata);
15653491c9e7SYuval Bason if (rc)
156668e326deSLeon Romanovsky return -EINVAL;
15673491c9e7SYuval Bason
15683491c9e7SYuval Bason srq->dev = dev;
156906e8d1dfSYuval Basson srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
15703491c9e7SYuval Bason hw_srq = &srq->hw_srq;
15713491c9e7SYuval Bason spin_lock_init(&srq->lock);
15723491c9e7SYuval Bason
15733491c9e7SYuval Bason hw_srq->max_wr = init_attr->attr.max_wr;
15743491c9e7SYuval Bason hw_srq->max_sges = init_attr->attr.max_sge;
15753491c9e7SYuval Bason
157689944450SShamir Rabinovitch if (udata) {
157797f61250SMichal Kalderon if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
157897f61250SMichal Kalderon udata->inlen))) {
157940b173ddSYuval Bason DP_ERR(dev,
158040b173ddSYuval Bason "create srq: problem copying data from user space\n");
158140b173ddSYuval Bason goto err0;
158240b173ddSYuval Bason }
158340b173ddSYuval Bason
158472b894b0SChristoph Hellwig rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
158540b173ddSYuval Bason if (rc)
158640b173ddSYuval Bason goto err0;
158740b173ddSYuval Bason
158840b173ddSYuval Bason page_cnt = srq->usrq.pbl_info.num_pbes;
158940b173ddSYuval Bason pbl_base_addr = srq->usrq.pbl_tbl->pa;
159040b173ddSYuval Bason phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
159195ad233fSShiraz, Saleem page_size = PAGE_SIZE;
159240b173ddSYuval Bason } else {
159340b173ddSYuval Bason struct qed_chain *pbl;
159440b173ddSYuval Bason
15953491c9e7SYuval Bason rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
15963491c9e7SYuval Bason if (rc)
15973491c9e7SYuval Bason goto err0;
15983491c9e7SYuval Bason
15993491c9e7SYuval Bason pbl = &hw_srq->pbl;
16003491c9e7SYuval Bason page_cnt = qed_chain_get_page_cnt(pbl);
16013491c9e7SYuval Bason pbl_base_addr = qed_chain_get_pbl_phys(pbl);
16023491c9e7SYuval Bason phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
16033491c9e7SYuval Bason page_size = QED_CHAIN_PAGE_SIZE;
160440b173ddSYuval Bason }
160540b173ddSYuval Bason
16063491c9e7SYuval Bason in_params.pd_id = pd->pd_id;
16073491c9e7SYuval Bason in_params.pbl_base_addr = pbl_base_addr;
16083491c9e7SYuval Bason in_params.prod_pair_addr = phy_prod_pair_addr;
16093491c9e7SYuval Bason in_params.num_pages = page_cnt;
16103491c9e7SYuval Bason in_params.page_size = page_size;
161106e8d1dfSYuval Basson if (srq->is_xrc) {
161206e8d1dfSYuval Basson struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
161306e8d1dfSYuval Basson struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
161406e8d1dfSYuval Basson
161506e8d1dfSYuval Basson in_params.is_xrc = 1;
161606e8d1dfSYuval Basson in_params.xrcd_id = xrcd->xrcd_id;
161706e8d1dfSYuval Basson in_params.cq_cid = cq->icid;
161806e8d1dfSYuval Basson }
16193491c9e7SYuval Bason
16203491c9e7SYuval Bason rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
16213491c9e7SYuval Bason if (rc)
16223491c9e7SYuval Bason goto err1;
16233491c9e7SYuval Bason
16243491c9e7SYuval Bason srq->srq_id = out_params.srq_id;
16253491c9e7SYuval Bason
162640b173ddSYuval Bason if (udata) {
162740b173ddSYuval Bason rc = qedr_copy_srq_uresp(dev, srq, udata);
162840b173ddSYuval Bason if (rc)
162940b173ddSYuval Bason goto err2;
163040b173ddSYuval Bason }
163140b173ddSYuval Bason
16329fd15987SMatthew Wilcox rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
16333491c9e7SYuval Bason if (rc)
16343491c9e7SYuval Bason goto err2;
16353491c9e7SYuval Bason
16363491c9e7SYuval Bason DP_DEBUG(dev, QEDR_MSG_SRQ,
16373491c9e7SYuval Bason "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
163868e326deSLeon Romanovsky return 0;
16393491c9e7SYuval Bason
16403491c9e7SYuval Bason err2:
16413491c9e7SYuval Bason destroy_in_params.srq_id = srq->srq_id;
16423491c9e7SYuval Bason
16433491c9e7SYuval Bason dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
16443491c9e7SYuval Bason err1:
164540b173ddSYuval Bason if (udata)
164640b173ddSYuval Bason qedr_free_srq_user_params(srq);
164740b173ddSYuval Bason else
16483491c9e7SYuval Bason qedr_free_srq_kernel_params(srq);
16493491c9e7SYuval Bason err0:
165068e326deSLeon Romanovsky return -EFAULT;
16513491c9e7SYuval Bason }
16523491c9e7SYuval Bason
qedr_destroy_srq(struct ib_srq * ibsrq,struct ib_udata * udata)1653119181d1SLeon Romanovsky int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
16543491c9e7SYuval Bason {
16553491c9e7SYuval Bason struct qed_rdma_destroy_srq_in_params in_params = {};
16563491c9e7SYuval Bason struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
16573491c9e7SYuval Bason struct qedr_srq *srq = get_qedr_srq(ibsrq);
16583491c9e7SYuval Bason
16599fd15987SMatthew Wilcox xa_erase_irq(&dev->srqs, srq->srq_id);
16603491c9e7SYuval Bason in_params.srq_id = srq->srq_id;
166106e8d1dfSYuval Basson in_params.is_xrc = srq->is_xrc;
16623491c9e7SYuval Bason dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
16633491c9e7SYuval Bason
1664e00b64f7SShamir Rabinovitch if (ibsrq->uobject)
166540b173ddSYuval Bason qedr_free_srq_user_params(srq);
166640b173ddSYuval Bason else
16673491c9e7SYuval Bason qedr_free_srq_kernel_params(srq);
16683491c9e7SYuval Bason
16693491c9e7SYuval Bason DP_DEBUG(dev, QEDR_MSG_SRQ,
16703491c9e7SYuval Bason "destroy srq: destroyed srq with srq_id=0x%0x\n",
16713491c9e7SYuval Bason srq->srq_id);
1672119181d1SLeon Romanovsky return 0;
16733491c9e7SYuval Bason }
16743491c9e7SYuval Bason
qedr_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask attr_mask,struct ib_udata * udata)16753491c9e7SYuval Bason int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
16763491c9e7SYuval Bason enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
16773491c9e7SYuval Bason {
16783491c9e7SYuval Bason struct qed_rdma_modify_srq_in_params in_params = {};
16793491c9e7SYuval Bason struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
16803491c9e7SYuval Bason struct qedr_srq *srq = get_qedr_srq(ibsrq);
16813491c9e7SYuval Bason int rc;
16823491c9e7SYuval Bason
16833491c9e7SYuval Bason if (attr_mask & IB_SRQ_MAX_WR) {
16843491c9e7SYuval Bason DP_ERR(dev,
16853491c9e7SYuval Bason "modify srq: invalid attribute mask=0x%x specified for %p\n",
16863491c9e7SYuval Bason attr_mask, srq);
16873491c9e7SYuval Bason return -EINVAL;
16883491c9e7SYuval Bason }
16893491c9e7SYuval Bason
16903491c9e7SYuval Bason if (attr_mask & IB_SRQ_LIMIT) {
16913491c9e7SYuval Bason if (attr->srq_limit >= srq->hw_srq.max_wr) {
16923491c9e7SYuval Bason DP_ERR(dev,
16933491c9e7SYuval Bason "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
16943491c9e7SYuval Bason attr->srq_limit, srq->hw_srq.max_wr);
16953491c9e7SYuval Bason return -EINVAL;
16963491c9e7SYuval Bason }
16973491c9e7SYuval Bason
16983491c9e7SYuval Bason in_params.srq_id = srq->srq_id;
16993491c9e7SYuval Bason in_params.wqe_limit = attr->srq_limit;
17003491c9e7SYuval Bason rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
17013491c9e7SYuval Bason if (rc)
17023491c9e7SYuval Bason return rc;
17033491c9e7SYuval Bason }
17043491c9e7SYuval Bason
17053491c9e7SYuval Bason srq->srq_limit = attr->srq_limit;
17063491c9e7SYuval Bason
17073491c9e7SYuval Bason DP_DEBUG(dev, QEDR_MSG_SRQ,
17083491c9e7SYuval Bason "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
17093491c9e7SYuval Bason
17103491c9e7SYuval Bason return 0;
17113491c9e7SYuval Bason }
1712cecbcddfSRam Amrani
qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)171306e8d1dfSYuval Basson static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
171406e8d1dfSYuval Basson {
171506e8d1dfSYuval Basson switch (ib_qp_type) {
171606e8d1dfSYuval Basson case IB_QPT_RC:
171706e8d1dfSYuval Basson return QED_RDMA_QP_TYPE_RC;
171806e8d1dfSYuval Basson case IB_QPT_XRC_INI:
171906e8d1dfSYuval Basson return QED_RDMA_QP_TYPE_XRC_INI;
172006e8d1dfSYuval Basson case IB_QPT_XRC_TGT:
172106e8d1dfSYuval Basson return QED_RDMA_QP_TYPE_XRC_TGT;
172206e8d1dfSYuval Basson default:
172306e8d1dfSYuval Basson return QED_RDMA_QP_TYPE_INVAL;
172406e8d1dfSYuval Basson }
172506e8d1dfSYuval Basson }
172606e8d1dfSYuval Basson
1727cecbcddfSRam Amrani static inline void
qedr_init_common_qp_in_params(struct qedr_dev * dev,struct qedr_pd * pd,struct qedr_qp * qp,struct ib_qp_init_attr * attrs,bool fmr_and_reserved_lkey,struct qed_rdma_create_qp_in_params * params)1728df158561SAmrani, Ram qedr_init_common_qp_in_params(struct qedr_dev *dev,
1729cecbcddfSRam Amrani struct qedr_pd *pd,
1730cecbcddfSRam Amrani struct qedr_qp *qp,
1731cecbcddfSRam Amrani struct ib_qp_init_attr *attrs,
1732df158561SAmrani, Ram bool fmr_and_reserved_lkey,
1733cecbcddfSRam Amrani struct qed_rdma_create_qp_in_params *params)
1734cecbcddfSRam Amrani {
1735cecbcddfSRam Amrani /* QP handle to be written in an async event */
1736cecbcddfSRam Amrani params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1737cecbcddfSRam Amrani params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1738cecbcddfSRam Amrani
1739cecbcddfSRam Amrani params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1740df158561SAmrani, Ram params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
174106e8d1dfSYuval Basson params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
174206e8d1dfSYuval Basson params->stats_queue = 0;
174306e8d1dfSYuval Basson
174406e8d1dfSYuval Basson if (pd) {
1745cecbcddfSRam Amrani params->pd = pd->pd_id;
1746cecbcddfSRam Amrani params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
174706e8d1dfSYuval Basson }
17483491c9e7SYuval Bason
174906e8d1dfSYuval Basson if (qedr_qp_has_sq(qp))
175006e8d1dfSYuval Basson params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
175106e8d1dfSYuval Basson
175206e8d1dfSYuval Basson if (qedr_qp_has_rq(qp))
17533491c9e7SYuval Bason params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
17543491c9e7SYuval Bason
175506e8d1dfSYuval Basson if (qedr_qp_has_srq(qp)) {
17563491c9e7SYuval Bason params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
17573491c9e7SYuval Bason params->srq_id = qp->srq->srq_id;
17583491c9e7SYuval Bason params->use_srq = true;
175906e8d1dfSYuval Basson } else {
176006e8d1dfSYuval Basson params->srq_id = 0;
176106e8d1dfSYuval Basson params->use_srq = false;
17623491c9e7SYuval Bason }
1763cecbcddfSRam Amrani }
1764cecbcddfSRam Amrani
qedr_qp_user_print(struct qedr_dev * dev,struct qedr_qp * qp)1765cecbcddfSRam Amrani static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1766cecbcddfSRam Amrani {
1767df158561SAmrani, Ram DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1768df158561SAmrani, Ram "qp=%p. "
1769df158561SAmrani, Ram "sq_addr=0x%llx, "
1770df158561SAmrani, Ram "sq_len=%zd, "
1771df158561SAmrani, Ram "rq_addr=0x%llx, "
1772df158561SAmrani, Ram "rq_len=%zd"
1773df158561SAmrani, Ram "\n",
1774df158561SAmrani, Ram qp,
177506e8d1dfSYuval Basson qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
177606e8d1dfSYuval Basson qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
177706e8d1dfSYuval Basson qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
177806e8d1dfSYuval Basson qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
1779cecbcddfSRam Amrani }
1780cecbcddfSRam Amrani
178169ad0e7fSKalderon, Michal static inline void
qedr_iwarp_populate_user_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct qed_rdma_create_qp_out_params * out_params)178269ad0e7fSKalderon, Michal qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
178369ad0e7fSKalderon, Michal struct qedr_qp *qp,
178469ad0e7fSKalderon, Michal struct qed_rdma_create_qp_out_params *out_params)
178569ad0e7fSKalderon, Michal {
178669ad0e7fSKalderon, Michal qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
178769ad0e7fSKalderon, Michal qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
178869ad0e7fSKalderon, Michal
178969ad0e7fSKalderon, Michal qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
179069ad0e7fSKalderon, Michal &qp->usq.pbl_info, FW_PAGE_SHIFT);
179140b173ddSYuval Bason if (!qp->srq) {
179269ad0e7fSKalderon, Michal qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
179369ad0e7fSKalderon, Michal qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
179440b173ddSYuval Bason }
179569ad0e7fSKalderon, Michal
179669ad0e7fSKalderon, Michal qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
179769ad0e7fSKalderon, Michal &qp->urq.pbl_info, FW_PAGE_SHIFT);
179869ad0e7fSKalderon, Michal }
179969ad0e7fSKalderon, Michal
qedr_cleanup_user(struct qedr_dev * dev,struct qedr_ucontext * ctx,struct qedr_qp * qp)180097f61250SMichal Kalderon static void qedr_cleanup_user(struct qedr_dev *dev,
180197f61250SMichal Kalderon struct qedr_ucontext *ctx,
180297f61250SMichal Kalderon struct qedr_qp *qp)
1803cecbcddfSRam Amrani {
180406e8d1dfSYuval Basson if (qedr_qp_has_sq(qp)) {
1805df158561SAmrani, Ram ib_umem_release(qp->usq.umem);
1806df158561SAmrani, Ram qp->usq.umem = NULL;
180706e8d1dfSYuval Basson }
1808df158561SAmrani, Ram
180906e8d1dfSYuval Basson if (qedr_qp_has_rq(qp)) {
1810df158561SAmrani, Ram ib_umem_release(qp->urq.umem);
1811df158561SAmrani, Ram qp->urq.umem = NULL;
181206e8d1dfSYuval Basson }
181324e412c1SMichal Kalderon
181424e412c1SMichal Kalderon if (rdma_protocol_roce(&dev->ibdev, 1)) {
181524e412c1SMichal Kalderon qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
181624e412c1SMichal Kalderon qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
181724e412c1SMichal Kalderon } else {
181824e412c1SMichal Kalderon kfree(qp->usq.pbl_tbl);
181924e412c1SMichal Kalderon kfree(qp->urq.pbl_tbl);
182024e412c1SMichal Kalderon }
182197f61250SMichal Kalderon
182297f61250SMichal Kalderon if (qp->usq.db_rec_data) {
182397f61250SMichal Kalderon qedr_db_recovery_del(dev, qp->usq.db_addr,
182497f61250SMichal Kalderon &qp->usq.db_rec_data->db_data);
182597f61250SMichal Kalderon rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
182697f61250SMichal Kalderon }
182797f61250SMichal Kalderon
182897f61250SMichal Kalderon if (qp->urq.db_rec_data) {
182997f61250SMichal Kalderon qedr_db_recovery_del(dev, qp->urq.db_addr,
183097f61250SMichal Kalderon &qp->urq.db_rec_data->db_data);
183197f61250SMichal Kalderon rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
183297f61250SMichal Kalderon }
1833b4bc7660SMichal Kalderon
1834b4bc7660SMichal Kalderon if (rdma_protocol_iwarp(&dev->ibdev, 1))
1835b4bc7660SMichal Kalderon qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1836b4bc7660SMichal Kalderon &qp->urq.db_rec_db2_data);
1837df158561SAmrani, Ram }
1838df158561SAmrani, Ram
qedr_create_user_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct ib_pd * ibpd,struct ib_udata * udata,struct ib_qp_init_attr * attrs)1839df158561SAmrani, Ram static int qedr_create_user_qp(struct qedr_dev *dev,
1840df158561SAmrani, Ram struct qedr_qp *qp,
1841df158561SAmrani, Ram struct ib_pd *ibpd,
1842df158561SAmrani, Ram struct ib_udata *udata,
1843df158561SAmrani, Ram struct ib_qp_init_attr *attrs)
1844df158561SAmrani, Ram {
1845df158561SAmrani, Ram struct qed_rdma_create_qp_in_params in_params;
1846df158561SAmrani, Ram struct qed_rdma_create_qp_out_params out_params;
184706e8d1dfSYuval Basson struct qedr_create_qp_uresp uresp = {};
184806e8d1dfSYuval Basson struct qedr_create_qp_ureq ureq = {};
184969ad0e7fSKalderon, Michal int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
185006e8d1dfSYuval Basson struct qedr_ucontext *ctx = NULL;
185106e8d1dfSYuval Basson struct qedr_pd *pd = NULL;
185206e8d1dfSYuval Basson int rc = 0;
1853df158561SAmrani, Ram
185482af6d19SMichal Kalderon qp->create_type = QEDR_QP_CREATE_USER;
185506e8d1dfSYuval Basson
185606e8d1dfSYuval Basson if (ibpd) {
185706e8d1dfSYuval Basson pd = get_qedr_pd(ibpd);
185806e8d1dfSYuval Basson ctx = pd->uctx;
185906e8d1dfSYuval Basson }
186006e8d1dfSYuval Basson
186106e8d1dfSYuval Basson if (udata) {
186206e8d1dfSYuval Basson rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
186306e8d1dfSYuval Basson udata->inlen));
1864df158561SAmrani, Ram if (rc) {
1865df158561SAmrani, Ram DP_ERR(dev, "Problem copying data from user space\n");
1866df158561SAmrani, Ram return rc;
1867df158561SAmrani, Ram }
186806e8d1dfSYuval Basson }
1869cecbcddfSRam Amrani
187006e8d1dfSYuval Basson if (qedr_qp_has_sq(qp)) {
187172b894b0SChristoph Hellwig /* SQ - read access only (0) */
1872b0ea0fa5SJason Gunthorpe rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
187372b894b0SChristoph Hellwig ureq.sq_len, true, 0, alloc_and_init);
1874cecbcddfSRam Amrani if (rc)
1875cecbcddfSRam Amrani return rc;
187606e8d1dfSYuval Basson }
1877cecbcddfSRam Amrani
187806e8d1dfSYuval Basson if (qedr_qp_has_rq(qp)) {
187972b894b0SChristoph Hellwig /* RQ - read access only (0) */
1880b0ea0fa5SJason Gunthorpe rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
188172b894b0SChristoph Hellwig ureq.rq_len, true, 0, alloc_and_init);
1882*95175ddaSKamal Heib if (rc) {
1883*95175ddaSKamal Heib ib_umem_release(qp->usq.umem);
1884*95175ddaSKamal Heib qp->usq.umem = NULL;
1885*95175ddaSKamal Heib if (rdma_protocol_roce(&dev->ibdev, 1)) {
1886*95175ddaSKamal Heib qedr_free_pbl(dev, &qp->usq.pbl_info,
1887*95175ddaSKamal Heib qp->usq.pbl_tbl);
1888*95175ddaSKamal Heib } else {
1889*95175ddaSKamal Heib kfree(qp->usq.pbl_tbl);
1890*95175ddaSKamal Heib }
1891df158561SAmrani, Ram return rc;
189240b173ddSYuval Bason }
1893*95175ddaSKamal Heib }
1894df158561SAmrani, Ram
1895df158561SAmrani, Ram memset(&in_params, 0, sizeof(in_params));
1896df158561SAmrani, Ram qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1897df158561SAmrani, Ram in_params.qp_handle_lo = ureq.qp_handle_lo;
1898df158561SAmrani, Ram in_params.qp_handle_hi = ureq.qp_handle_hi;
189906e8d1dfSYuval Basson
190006e8d1dfSYuval Basson if (qp->qp_type == IB_QPT_XRC_TGT) {
190106e8d1dfSYuval Basson struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
190206e8d1dfSYuval Basson
190306e8d1dfSYuval Basson in_params.xrcd_id = xrcd->xrcd_id;
190406e8d1dfSYuval Basson in_params.qp_handle_lo = qp->qp_id;
190506e8d1dfSYuval Basson in_params.use_srq = 1;
190606e8d1dfSYuval Basson }
190706e8d1dfSYuval Basson
190806e8d1dfSYuval Basson if (qedr_qp_has_sq(qp)) {
1909df158561SAmrani, Ram in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1910df158561SAmrani, Ram in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
191106e8d1dfSYuval Basson }
191206e8d1dfSYuval Basson
191306e8d1dfSYuval Basson if (qedr_qp_has_rq(qp)) {
1914df158561SAmrani, Ram in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1915df158561SAmrani, Ram in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
191640b173ddSYuval Bason }
1917df158561SAmrani, Ram
1918bbe4f424SMichal Kalderon if (ctx)
1919bbe4f424SMichal Kalderon SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
1920bbe4f424SMichal Kalderon
1921df158561SAmrani, Ram qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1922df158561SAmrani, Ram &in_params, &out_params);
1923df158561SAmrani, Ram
1924df158561SAmrani, Ram if (!qp->qed_qp) {
1925df158561SAmrani, Ram rc = -ENOMEM;
1926df158561SAmrani, Ram goto err1;
1927df158561SAmrani, Ram }
1928df158561SAmrani, Ram
192969ad0e7fSKalderon, Michal if (rdma_protocol_iwarp(&dev->ibdev, 1))
193069ad0e7fSKalderon, Michal qedr_iwarp_populate_user_qp(dev, qp, &out_params);
193169ad0e7fSKalderon, Michal
1932df158561SAmrani, Ram qp->qp_id = out_params.qp_id;
1933df158561SAmrani, Ram qp->icid = out_params.icid;
1934df158561SAmrani, Ram
193506e8d1dfSYuval Basson if (udata) {
193697f61250SMichal Kalderon rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
193797f61250SMichal Kalderon if (rc)
193897f61250SMichal Kalderon goto err;
1939b4bc7660SMichal Kalderon }
1940b4bc7660SMichal Kalderon
194106e8d1dfSYuval Basson /* db offset was calculated in copy_qp_uresp, now set in the user q */
194206e8d1dfSYuval Basson if (qedr_qp_has_sq(qp)) {
194306e8d1dfSYuval Basson qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
1944b1a4da64SKamal Heib qp->sq.max_wr = attrs->cap.max_send_wr;
194597f61250SMichal Kalderon rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
194697f61250SMichal Kalderon &qp->usq.db_rec_data->db_data,
194797f61250SMichal Kalderon DB_REC_WIDTH_32B,
194897f61250SMichal Kalderon DB_REC_USER);
194997f61250SMichal Kalderon if (rc)
195097f61250SMichal Kalderon goto err;
195106e8d1dfSYuval Basson }
195297f61250SMichal Kalderon
195306e8d1dfSYuval Basson if (qedr_qp_has_rq(qp)) {
195406e8d1dfSYuval Basson qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
1955b1a4da64SKamal Heib qp->rq.max_wr = attrs->cap.max_recv_wr;
195697f61250SMichal Kalderon rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
195797f61250SMichal Kalderon &qp->urq.db_rec_data->db_data,
195897f61250SMichal Kalderon DB_REC_WIDTH_32B,
195997f61250SMichal Kalderon DB_REC_USER);
1960df158561SAmrani, Ram if (rc)
1961df158561SAmrani, Ram goto err;
196206e8d1dfSYuval Basson }
1963df158561SAmrani, Ram
1964b4bc7660SMichal Kalderon if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
19650191c271SAlok Prasad qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
19660191c271SAlok Prasad
19670191c271SAlok Prasad /* calculate the db_rec_db2 data since it is constant so no
19680191c271SAlok Prasad * need to reflect from user
19690191c271SAlok Prasad */
19700191c271SAlok Prasad qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
19710191c271SAlok Prasad qp->urq.db_rec_db2_data.data.value =
19720191c271SAlok Prasad cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
19730191c271SAlok Prasad
1974b4bc7660SMichal Kalderon rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1975b4bc7660SMichal Kalderon &qp->urq.db_rec_db2_data,
1976b4bc7660SMichal Kalderon DB_REC_WIDTH_32B,
1977b4bc7660SMichal Kalderon DB_REC_USER);
1978b4bc7660SMichal Kalderon if (rc)
1979b4bc7660SMichal Kalderon goto err;
1980b4bc7660SMichal Kalderon }
1981df158561SAmrani, Ram qedr_qp_user_print(dev, qp);
198297f61250SMichal Kalderon return rc;
1983df158561SAmrani, Ram err:
1984df158561SAmrani, Ram rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1985df158561SAmrani, Ram if (rc)
1986df158561SAmrani, Ram DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1987df158561SAmrani, Ram
1988df158561SAmrani, Ram err1:
198997f61250SMichal Kalderon qedr_cleanup_user(dev, ctx, qp);
1990cecbcddfSRam Amrani return rc;
1991cecbcddfSRam Amrani }
1992cecbcddfSRam Amrani
qedr_set_iwarp_db_info(struct qedr_dev * dev,struct qedr_qp * qp)199397f61250SMichal Kalderon static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1994f5b1b177SKalderon, Michal {
199597f61250SMichal Kalderon int rc;
199697f61250SMichal Kalderon
1997f5b1b177SKalderon, Michal qp->sq.db = dev->db_addr +
1998f5b1b177SKalderon, Michal DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1999f5b1b177SKalderon, Michal qp->sq.db_data.data.icid = qp->icid;
2000f5b1b177SKalderon, Michal
200197f61250SMichal Kalderon rc = qedr_db_recovery_add(dev, qp->sq.db,
200297f61250SMichal Kalderon &qp->sq.db_data,
200397f61250SMichal Kalderon DB_REC_WIDTH_32B,
200497f61250SMichal Kalderon DB_REC_KERNEL);
200597f61250SMichal Kalderon if (rc)
200697f61250SMichal Kalderon return rc;
200797f61250SMichal Kalderon
2008f5b1b177SKalderon, Michal qp->rq.db = dev->db_addr +
2009f5b1b177SKalderon, Michal DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2010f5b1b177SKalderon, Michal qp->rq.db_data.data.icid = qp->icid;
2011f5b1b177SKalderon, Michal qp->rq.iwarp_db2 = dev->db_addr +
2012f5b1b177SKalderon, Michal DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2013f5b1b177SKalderon, Michal qp->rq.iwarp_db2_data.data.icid = qp->icid;
2014f5b1b177SKalderon, Michal qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
201597f61250SMichal Kalderon
201697f61250SMichal Kalderon rc = qedr_db_recovery_add(dev, qp->rq.db,
201797f61250SMichal Kalderon &qp->rq.db_data,
201897f61250SMichal Kalderon DB_REC_WIDTH_32B,
201997f61250SMichal Kalderon DB_REC_KERNEL);
2020b4bc7660SMichal Kalderon if (rc)
2021b4bc7660SMichal Kalderon return rc;
2022b4bc7660SMichal Kalderon
2023b4bc7660SMichal Kalderon rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
2024b4bc7660SMichal Kalderon &qp->rq.iwarp_db2_data,
2025b4bc7660SMichal Kalderon DB_REC_WIDTH_32B,
2026b4bc7660SMichal Kalderon DB_REC_KERNEL);
202797f61250SMichal Kalderon return rc;
2028f5b1b177SKalderon, Michal }
2029f5b1b177SKalderon, Michal
2030df158561SAmrani, Ram static int
qedr_roce_create_kernel_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct qed_rdma_create_qp_in_params * in_params,u32 n_sq_elems,u32 n_rq_elems)2031df158561SAmrani, Ram qedr_roce_create_kernel_qp(struct qedr_dev *dev,
2032cecbcddfSRam Amrani struct qedr_qp *qp,
2033df158561SAmrani, Ram struct qed_rdma_create_qp_in_params *in_params,
2034df158561SAmrani, Ram u32 n_sq_elems, u32 n_rq_elems)
2035cecbcddfSRam Amrani {
2036df158561SAmrani, Ram struct qed_rdma_create_qp_out_params out_params;
2037b6db3f71SAlexander Lobakin struct qed_chain_init_params params = {
2038b6db3f71SAlexander Lobakin .mode = QED_CHAIN_MODE_PBL,
2039b6db3f71SAlexander Lobakin .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2040b6db3f71SAlexander Lobakin };
2041cecbcddfSRam Amrani int rc;
2042cecbcddfSRam Amrani
2043b6db3f71SAlexander Lobakin params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2044b6db3f71SAlexander Lobakin params.num_elems = n_sq_elems;
2045b6db3f71SAlexander Lobakin params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2046df158561SAmrani, Ram
2047b6db3f71SAlexander Lobakin rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms);
2048df158561SAmrani, Ram if (rc)
2049cecbcddfSRam Amrani return rc;
2050df158561SAmrani, Ram
2051df158561SAmrani, Ram in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
2052df158561SAmrani, Ram in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
2053df158561SAmrani, Ram
2054b6db3f71SAlexander Lobakin params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
205535406697SColin Ian King params.num_elems = n_rq_elems;
2056b6db3f71SAlexander Lobakin params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2057b6db3f71SAlexander Lobakin
2058b6db3f71SAlexander Lobakin rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
2059df158561SAmrani, Ram if (rc)
2060df158561SAmrani, Ram return rc;
2061df158561SAmrani, Ram
2062df158561SAmrani, Ram in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
2063df158561SAmrani, Ram in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
2064df158561SAmrani, Ram
2065df158561SAmrani, Ram qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2066df158561SAmrani, Ram in_params, &out_params);
2067df158561SAmrani, Ram
2068df158561SAmrani, Ram if (!qp->qed_qp)
2069df158561SAmrani, Ram return -EINVAL;
2070df158561SAmrani, Ram
2071df158561SAmrani, Ram qp->qp_id = out_params.qp_id;
2072df158561SAmrani, Ram qp->icid = out_params.icid;
2073df158561SAmrani, Ram
207497f61250SMichal Kalderon return qedr_set_roce_db_info(dev, qp);
2075f5b1b177SKalderon, Michal }
2076df158561SAmrani, Ram
2077f5b1b177SKalderon, Michal static int
qedr_iwarp_create_kernel_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct qed_rdma_create_qp_in_params * in_params,u32 n_sq_elems,u32 n_rq_elems)2078f5b1b177SKalderon, Michal qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
2079f5b1b177SKalderon, Michal struct qedr_qp *qp,
2080f5b1b177SKalderon, Michal struct qed_rdma_create_qp_in_params *in_params,
2081f5b1b177SKalderon, Michal u32 n_sq_elems, u32 n_rq_elems)
2082f5b1b177SKalderon, Michal {
2083f5b1b177SKalderon, Michal struct qed_rdma_create_qp_out_params out_params;
2084b6db3f71SAlexander Lobakin struct qed_chain_init_params params = {
2085b6db3f71SAlexander Lobakin .mode = QED_CHAIN_MODE_PBL,
2086b6db3f71SAlexander Lobakin .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2087b6db3f71SAlexander Lobakin };
2088f5b1b177SKalderon, Michal int rc;
2089f5b1b177SKalderon, Michal
2090f5b1b177SKalderon, Michal in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
2091f5b1b177SKalderon, Michal QEDR_SQE_ELEMENT_SIZE,
209215506586SAlexander Lobakin QED_CHAIN_PAGE_SIZE,
2093f5b1b177SKalderon, Michal QED_CHAIN_MODE_PBL);
2094f5b1b177SKalderon, Michal in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
2095f5b1b177SKalderon, Michal QEDR_RQE_ELEMENT_SIZE,
209615506586SAlexander Lobakin QED_CHAIN_PAGE_SIZE,
2097f5b1b177SKalderon, Michal QED_CHAIN_MODE_PBL);
2098f5b1b177SKalderon, Michal
2099f5b1b177SKalderon, Michal qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2100f5b1b177SKalderon, Michal in_params, &out_params);
2101f5b1b177SKalderon, Michal
2102f5b1b177SKalderon, Michal if (!qp->qed_qp)
2103f5b1b177SKalderon, Michal return -EINVAL;
2104f5b1b177SKalderon, Michal
2105f5b1b177SKalderon, Michal /* Now we allocate the chain */
2106f5b1b177SKalderon, Michal
2107b6db3f71SAlexander Lobakin params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2108b6db3f71SAlexander Lobakin params.num_elems = n_sq_elems;
2109b6db3f71SAlexander Lobakin params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2110b6db3f71SAlexander Lobakin params.ext_pbl_virt = out_params.sq_pbl_virt;
2111b6db3f71SAlexander Lobakin params.ext_pbl_phys = out_params.sq_pbl_phys;
2112f5b1b177SKalderon, Michal
2113b6db3f71SAlexander Lobakin rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms);
2114f5b1b177SKalderon, Michal if (rc)
2115f5b1b177SKalderon, Michal goto err;
2116f5b1b177SKalderon, Michal
2117b6db3f71SAlexander Lobakin params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2118b6db3f71SAlexander Lobakin params.num_elems = n_rq_elems;
2119b6db3f71SAlexander Lobakin params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2120b6db3f71SAlexander Lobakin params.ext_pbl_virt = out_params.rq_pbl_virt;
2121b6db3f71SAlexander Lobakin params.ext_pbl_phys = out_params.rq_pbl_phys;
2122f5b1b177SKalderon, Michal
2123b6db3f71SAlexander Lobakin rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
2124f5b1b177SKalderon, Michal if (rc)
2125f5b1b177SKalderon, Michal goto err;
2126f5b1b177SKalderon, Michal
2127f5b1b177SKalderon, Michal qp->qp_id = out_params.qp_id;
2128f5b1b177SKalderon, Michal qp->icid = out_params.icid;
2129f5b1b177SKalderon, Michal
213097f61250SMichal Kalderon return qedr_set_iwarp_db_info(dev, qp);
2131f5b1b177SKalderon, Michal
2132f5b1b177SKalderon, Michal err:
2133f5b1b177SKalderon, Michal dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2134f5b1b177SKalderon, Michal
2135f5b1b177SKalderon, Michal return rc;
2136cecbcddfSRam Amrani }
2137cecbcddfSRam Amrani
qedr_cleanup_kernel(struct qedr_dev * dev,struct qedr_qp * qp)2138df158561SAmrani, Ram static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
2139df158561SAmrani, Ram {
2140cecbcddfSRam Amrani dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
2141df158561SAmrani, Ram kfree(qp->wqe_wr_id);
2142cecbcddfSRam Amrani
2143cecbcddfSRam Amrani dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
2144df158561SAmrani, Ram kfree(qp->rqe_wr_id);
214597f61250SMichal Kalderon
214697f61250SMichal Kalderon /* GSI qp is not registered to db mechanism so no need to delete */
214797f61250SMichal Kalderon if (qp->qp_type == IB_QPT_GSI)
214897f61250SMichal Kalderon return;
214997f61250SMichal Kalderon
215097f61250SMichal Kalderon qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
215197f61250SMichal Kalderon
2152b4bc7660SMichal Kalderon if (!qp->srq) {
215397f61250SMichal Kalderon qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
2154b4bc7660SMichal Kalderon
2155b4bc7660SMichal Kalderon if (rdma_protocol_iwarp(&dev->ibdev, 1))
2156b4bc7660SMichal Kalderon qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2157b4bc7660SMichal Kalderon &qp->rq.iwarp_db2_data);
2158b4bc7660SMichal Kalderon }
2159cecbcddfSRam Amrani }
2160cecbcddfSRam Amrani
qedr_create_kernel_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct ib_pd * ibpd,struct ib_qp_init_attr * attrs)2161df158561SAmrani, Ram static int qedr_create_kernel_qp(struct qedr_dev *dev,
2162df158561SAmrani, Ram struct qedr_qp *qp,
2163df158561SAmrani, Ram struct ib_pd *ibpd,
2164df158561SAmrani, Ram struct ib_qp_init_attr *attrs)
2165df158561SAmrani, Ram {
2166df158561SAmrani, Ram struct qed_rdma_create_qp_in_params in_params;
2167df158561SAmrani, Ram struct qedr_pd *pd = get_qedr_pd(ibpd);
2168df158561SAmrani, Ram int rc = -EINVAL;
2169df158561SAmrani, Ram u32 n_rq_elems;
2170df158561SAmrani, Ram u32 n_sq_elems;
2171df158561SAmrani, Ram u32 n_sq_entries;
2172df158561SAmrani, Ram
2173df158561SAmrani, Ram memset(&in_params, 0, sizeof(in_params));
217482af6d19SMichal Kalderon qp->create_type = QEDR_QP_CREATE_KERNEL;
2175df158561SAmrani, Ram
2176df158561SAmrani, Ram /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
2177df158561SAmrani, Ram * the ring. The ring should allow at least a single WR, even if the
2178df158561SAmrani, Ram * user requested none, due to allocation issues.
2179df158561SAmrani, Ram * We should add an extra WR since the prod and cons indices of
2180df158561SAmrani, Ram * wqe_wr_id are managed in such a way that the WQ is considered full
2181df158561SAmrani, Ram * when (prod+1)%max_wr==cons. We currently don't do that because we
2182df158561SAmrani, Ram * double the number of entries due an iSER issue that pushes far more
2183df158561SAmrani, Ram * WRs than indicated. If we decline its ib_post_send() then we get
2184df158561SAmrani, Ram * error prints in the dmesg we'd like to avoid.
2185df158561SAmrani, Ram */
2186df158561SAmrani, Ram qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
2187df158561SAmrani, Ram dev->attr.max_sqe);
2188df158561SAmrani, Ram
21896396bb22SKees Cook qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
2190df158561SAmrani, Ram GFP_KERNEL);
2191df158561SAmrani, Ram if (!qp->wqe_wr_id) {
2192df158561SAmrani, Ram DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
2193df158561SAmrani, Ram return -ENOMEM;
2194df158561SAmrani, Ram }
2195df158561SAmrani, Ram
2196df158561SAmrani, Ram /* QP handle to be written in CQE */
2197df158561SAmrani, Ram in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
2198df158561SAmrani, Ram in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
2199df158561SAmrani, Ram
2200df158561SAmrani, Ram /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
2201df158561SAmrani, Ram * the ring. There ring should allow at least a single WR, even if the
2202df158561SAmrani, Ram * user requested none, due to allocation issues.
2203df158561SAmrani, Ram */
2204df158561SAmrani, Ram qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
2205df158561SAmrani, Ram
2206df158561SAmrani, Ram /* Allocate driver internal RQ array */
22076396bb22SKees Cook qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
2208df158561SAmrani, Ram GFP_KERNEL);
2209df158561SAmrani, Ram if (!qp->rqe_wr_id) {
2210df158561SAmrani, Ram DP_ERR(dev,
2211df158561SAmrani, Ram "create qp: failed RQ shadow memory allocation\n");
2212df158561SAmrani, Ram kfree(qp->wqe_wr_id);
2213df158561SAmrani, Ram return -ENOMEM;
2214df158561SAmrani, Ram }
2215df158561SAmrani, Ram
2216df158561SAmrani, Ram qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
2217df158561SAmrani, Ram
2218df158561SAmrani, Ram n_sq_entries = attrs->cap.max_send_wr;
2219df158561SAmrani, Ram n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
2220df158561SAmrani, Ram n_sq_entries = max_t(u32, n_sq_entries, 1);
2221df158561SAmrani, Ram n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2222df158561SAmrani, Ram
2223df158561SAmrani, Ram n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
2224df158561SAmrani, Ram
2225f5b1b177SKalderon, Michal if (rdma_protocol_iwarp(&dev->ibdev, 1))
2226f5b1b177SKalderon, Michal rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
2227f5b1b177SKalderon, Michal n_sq_elems, n_rq_elems);
2228f5b1b177SKalderon, Michal else
2229df158561SAmrani, Ram rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
2230df158561SAmrani, Ram n_sq_elems, n_rq_elems);
2231df158561SAmrani, Ram if (rc)
2232df158561SAmrani, Ram qedr_cleanup_kernel(dev, qp);
2233df158561SAmrani, Ram
2234cecbcddfSRam Amrani return rc;
2235cecbcddfSRam Amrani }
2236cecbcddfSRam Amrani
qedr_free_qp_resources(struct qedr_dev * dev,struct qedr_qp * qp,struct ib_udata * udata)22373e45410fSKeita Suzuki static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
22383e45410fSKeita Suzuki struct ib_udata *udata)
22393e45410fSKeita Suzuki {
22403e45410fSKeita Suzuki struct qedr_ucontext *ctx =
22413e45410fSKeita Suzuki rdma_udata_to_drv_context(udata, struct qedr_ucontext,
22423e45410fSKeita Suzuki ibucontext);
22433e45410fSKeita Suzuki int rc;
22443e45410fSKeita Suzuki
22453e45410fSKeita Suzuki if (qp->qp_type != IB_QPT_GSI) {
22463e45410fSKeita Suzuki rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
22473e45410fSKeita Suzuki if (rc)
22483e45410fSKeita Suzuki return rc;
22493e45410fSKeita Suzuki }
22503e45410fSKeita Suzuki
22513e45410fSKeita Suzuki if (qp->create_type == QEDR_QP_CREATE_USER)
22523e45410fSKeita Suzuki qedr_cleanup_user(dev, ctx, qp);
22533e45410fSKeita Suzuki else
22543e45410fSKeita Suzuki qedr_cleanup_kernel(dev, qp);
22553e45410fSKeita Suzuki
22563e45410fSKeita Suzuki return 0;
22573e45410fSKeita Suzuki }
22583e45410fSKeita Suzuki
qedr_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * attrs,struct ib_udata * udata)2259514aee66SLeon Romanovsky int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
2260cecbcddfSRam Amrani struct ib_udata *udata)
2261cecbcddfSRam Amrani {
226206e8d1dfSYuval Basson struct qedr_xrcd *xrcd = NULL;
2263514aee66SLeon Romanovsky struct ib_pd *ibpd = ibqp->pd;
2264514aee66SLeon Romanovsky struct qedr_pd *pd = get_qedr_pd(ibpd);
2265514aee66SLeon Romanovsky struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2266514aee66SLeon Romanovsky struct qedr_qp *qp = get_qedr_qp(ibqp);
2267cecbcddfSRam Amrani int rc = 0;
2268cecbcddfSRam Amrani
22691f11a761SJason Gunthorpe if (attrs->create_flags)
2270514aee66SLeon Romanovsky return -EOPNOTSUPP;
22711f11a761SJason Gunthorpe
2272514aee66SLeon Romanovsky if (attrs->qp_type == IB_QPT_XRC_TGT)
227306e8d1dfSYuval Basson xrcd = get_qedr_xrcd(attrs->xrcd);
2274514aee66SLeon Romanovsky else
227506e8d1dfSYuval Basson pd = get_qedr_pd(ibpd);
227606e8d1dfSYuval Basson
2277cecbcddfSRam Amrani DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
2278cecbcddfSRam Amrani udata ? "user library" : "kernel", pd);
2279cecbcddfSRam Amrani
2280e00b64f7SShamir Rabinovitch rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
2281cecbcddfSRam Amrani if (rc)
2282514aee66SLeon Romanovsky return rc;
2283cecbcddfSRam Amrani
2284cecbcddfSRam Amrani DP_DEBUG(dev, QEDR_MSG_QP,
2285df158561SAmrani, Ram "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
2286df158561SAmrani, Ram udata ? "user library" : "kernel", attrs->event_handler, pd,
2287cecbcddfSRam Amrani get_qedr_cq(attrs->send_cq),
2288cecbcddfSRam Amrani get_qedr_cq(attrs->send_cq)->icid,
2289cecbcddfSRam Amrani get_qedr_cq(attrs->recv_cq),
22903491c9e7SYuval Bason attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
2291cecbcddfSRam Amrani
2292df158561SAmrani, Ram qedr_set_common_qp_params(dev, qp, pd, attrs);
2293cecbcddfSRam Amrani
2294514aee66SLeon Romanovsky if (attrs->qp_type == IB_QPT_GSI)
2295514aee66SLeon Romanovsky return qedr_create_gsi_qp(dev, attrs, qp);
229604886779SRam Amrani
229706e8d1dfSYuval Basson if (udata || xrcd)
2298df158561SAmrani, Ram rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
2299df158561SAmrani, Ram else
2300df158561SAmrani, Ram rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
2301cecbcddfSRam Amrani
2302cecbcddfSRam Amrani if (rc)
2303514aee66SLeon Romanovsky return rc;
2304cecbcddfSRam Amrani
2305cecbcddfSRam Amrani qp->ibqp.qp_num = qp->qp_id;
2306cecbcddfSRam Amrani
23071212767eSYuval Bason if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
23085fdff18bSMichal Kalderon rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
2309de0089e6SKalderon, Michal if (rc)
23103e45410fSKeita Suzuki goto out_free_qp_resources;
23111212767eSYuval Bason }
2312de0089e6SKalderon, Michal
2313514aee66SLeon Romanovsky return 0;
2314cecbcddfSRam Amrani
23153e45410fSKeita Suzuki out_free_qp_resources:
23163e45410fSKeita Suzuki qedr_free_qp_resources(dev, qp, udata);
2317514aee66SLeon Romanovsky return -EFAULT;
2318cecbcddfSRam Amrani }
2319cecbcddfSRam Amrani
qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)232027a4b1a6SRam Amrani static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
2321cecbcddfSRam Amrani {
2322cecbcddfSRam Amrani switch (qp_state) {
2323cecbcddfSRam Amrani case QED_ROCE_QP_STATE_RESET:
2324cecbcddfSRam Amrani return IB_QPS_RESET;
2325cecbcddfSRam Amrani case QED_ROCE_QP_STATE_INIT:
2326cecbcddfSRam Amrani return IB_QPS_INIT;
2327cecbcddfSRam Amrani case QED_ROCE_QP_STATE_RTR:
2328cecbcddfSRam Amrani return IB_QPS_RTR;
2329cecbcddfSRam Amrani case QED_ROCE_QP_STATE_RTS:
2330cecbcddfSRam Amrani return IB_QPS_RTS;
2331cecbcddfSRam Amrani case QED_ROCE_QP_STATE_SQD:
2332cecbcddfSRam Amrani return IB_QPS_SQD;
2333cecbcddfSRam Amrani case QED_ROCE_QP_STATE_ERR:
2334cecbcddfSRam Amrani return IB_QPS_ERR;
2335cecbcddfSRam Amrani case QED_ROCE_QP_STATE_SQE:
2336cecbcddfSRam Amrani return IB_QPS_SQE;
2337cecbcddfSRam Amrani }
2338cecbcddfSRam Amrani return IB_QPS_ERR;
2339cecbcddfSRam Amrani }
2340cecbcddfSRam Amrani
qedr_get_state_from_ibqp(enum ib_qp_state qp_state)234127a4b1a6SRam Amrani static enum qed_roce_qp_state qedr_get_state_from_ibqp(
234227a4b1a6SRam Amrani enum ib_qp_state qp_state)
2343cecbcddfSRam Amrani {
2344cecbcddfSRam Amrani switch (qp_state) {
2345cecbcddfSRam Amrani case IB_QPS_RESET:
2346cecbcddfSRam Amrani return QED_ROCE_QP_STATE_RESET;
2347cecbcddfSRam Amrani case IB_QPS_INIT:
2348cecbcddfSRam Amrani return QED_ROCE_QP_STATE_INIT;
2349cecbcddfSRam Amrani case IB_QPS_RTR:
2350cecbcddfSRam Amrani return QED_ROCE_QP_STATE_RTR;
2351cecbcddfSRam Amrani case IB_QPS_RTS:
2352cecbcddfSRam Amrani return QED_ROCE_QP_STATE_RTS;
2353cecbcddfSRam Amrani case IB_QPS_SQD:
2354cecbcddfSRam Amrani return QED_ROCE_QP_STATE_SQD;
2355cecbcddfSRam Amrani case IB_QPS_ERR:
2356cecbcddfSRam Amrani return QED_ROCE_QP_STATE_ERR;
2357cecbcddfSRam Amrani default:
2358cecbcddfSRam Amrani return QED_ROCE_QP_STATE_ERR;
2359cecbcddfSRam Amrani }
2360cecbcddfSRam Amrani }
2361cecbcddfSRam Amrani
qedr_update_qp_state(struct qedr_dev * dev,struct qedr_qp * qp,enum qed_roce_qp_state cur_state,enum qed_roce_qp_state new_state)2362cecbcddfSRam Amrani static int qedr_update_qp_state(struct qedr_dev *dev,
2363cecbcddfSRam Amrani struct qedr_qp *qp,
2364caf61b1bSKalderon, Michal enum qed_roce_qp_state cur_state,
2365cecbcddfSRam Amrani enum qed_roce_qp_state new_state)
2366cecbcddfSRam Amrani {
2367cecbcddfSRam Amrani int status = 0;
2368cecbcddfSRam Amrani
2369caf61b1bSKalderon, Michal if (new_state == cur_state)
2370865cea40SRam Amrani return 0;
2371cecbcddfSRam Amrani
2372caf61b1bSKalderon, Michal switch (cur_state) {
2373cecbcddfSRam Amrani case QED_ROCE_QP_STATE_RESET:
2374cecbcddfSRam Amrani switch (new_state) {
2375cecbcddfSRam Amrani case QED_ROCE_QP_STATE_INIT:
2376cecbcddfSRam Amrani break;
2377cecbcddfSRam Amrani default:
2378cecbcddfSRam Amrani status = -EINVAL;
2379cecbcddfSRam Amrani break;
2380790b57f6SYueHaibing }
2381cecbcddfSRam Amrani break;
2382cecbcddfSRam Amrani case QED_ROCE_QP_STATE_INIT:
2383cecbcddfSRam Amrani switch (new_state) {
2384cecbcddfSRam Amrani case QED_ROCE_QP_STATE_RTR:
2385cecbcddfSRam Amrani /* Update doorbell (in case post_recv was
2386cecbcddfSRam Amrani * done before move to RTR)
2387cecbcddfSRam Amrani */
2388f5b1b177SKalderon, Michal
2389f5b1b177SKalderon, Michal if (rdma_protocol_roce(&dev->ibdev, 1)) {
2390cecbcddfSRam Amrani writel(qp->rq.db_data.raw, qp->rq.db);
2391f5b1b177SKalderon, Michal }
2392cecbcddfSRam Amrani break;
2393cecbcddfSRam Amrani case QED_ROCE_QP_STATE_ERR:
2394cecbcddfSRam Amrani break;
2395cecbcddfSRam Amrani default:
2396cecbcddfSRam Amrani /* Invalid state change. */
2397cecbcddfSRam Amrani status = -EINVAL;
2398cecbcddfSRam Amrani break;
2399790b57f6SYueHaibing }
2400cecbcddfSRam Amrani break;
2401cecbcddfSRam Amrani case QED_ROCE_QP_STATE_RTR:
2402cecbcddfSRam Amrani /* RTR->XXX */
2403cecbcddfSRam Amrani switch (new_state) {
2404cecbcddfSRam Amrani case QED_ROCE_QP_STATE_RTS:
2405cecbcddfSRam Amrani break;
2406cecbcddfSRam Amrani case QED_ROCE_QP_STATE_ERR:
2407cecbcddfSRam Amrani break;
2408cecbcddfSRam Amrani default:
2409cecbcddfSRam Amrani /* Invalid state change. */
2410cecbcddfSRam Amrani status = -EINVAL;
2411cecbcddfSRam Amrani break;
2412790b57f6SYueHaibing }
2413cecbcddfSRam Amrani break;
2414cecbcddfSRam Amrani case QED_ROCE_QP_STATE_RTS:
2415cecbcddfSRam Amrani /* RTS->XXX */
2416cecbcddfSRam Amrani switch (new_state) {
2417cecbcddfSRam Amrani case QED_ROCE_QP_STATE_SQD:
2418cecbcddfSRam Amrani break;
2419cecbcddfSRam Amrani case QED_ROCE_QP_STATE_ERR:
2420cecbcddfSRam Amrani break;
2421cecbcddfSRam Amrani default:
2422cecbcddfSRam Amrani /* Invalid state change. */
2423cecbcddfSRam Amrani status = -EINVAL;
2424cecbcddfSRam Amrani break;
2425790b57f6SYueHaibing }
2426cecbcddfSRam Amrani break;
2427cecbcddfSRam Amrani case QED_ROCE_QP_STATE_SQD:
2428cecbcddfSRam Amrani /* SQD->XXX */
2429cecbcddfSRam Amrani switch (new_state) {
2430cecbcddfSRam Amrani case QED_ROCE_QP_STATE_RTS:
2431cecbcddfSRam Amrani case QED_ROCE_QP_STATE_ERR:
2432cecbcddfSRam Amrani break;
2433cecbcddfSRam Amrani default:
2434cecbcddfSRam Amrani /* Invalid state change. */
2435cecbcddfSRam Amrani status = -EINVAL;
2436cecbcddfSRam Amrani break;
2437790b57f6SYueHaibing }
2438cecbcddfSRam Amrani break;
2439cecbcddfSRam Amrani case QED_ROCE_QP_STATE_ERR:
2440cecbcddfSRam Amrani /* ERR->XXX */
2441cecbcddfSRam Amrani switch (new_state) {
2442cecbcddfSRam Amrani case QED_ROCE_QP_STATE_RESET:
2443933e6dcaSRam Amrani if ((qp->rq.prod != qp->rq.cons) ||
2444933e6dcaSRam Amrani (qp->sq.prod != qp->sq.cons)) {
2445933e6dcaSRam Amrani DP_NOTICE(dev,
2446933e6dcaSRam Amrani "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2447933e6dcaSRam Amrani qp->rq.prod, qp->rq.cons, qp->sq.prod,
2448933e6dcaSRam Amrani qp->sq.cons);
2449933e6dcaSRam Amrani status = -EINVAL;
2450933e6dcaSRam Amrani }
2451cecbcddfSRam Amrani break;
2452cecbcddfSRam Amrani default:
2453cecbcddfSRam Amrani status = -EINVAL;
2454cecbcddfSRam Amrani break;
2455790b57f6SYueHaibing }
2456cecbcddfSRam Amrani break;
2457cecbcddfSRam Amrani default:
2458cecbcddfSRam Amrani status = -EINVAL;
2459cecbcddfSRam Amrani break;
2460790b57f6SYueHaibing }
2461cecbcddfSRam Amrani
2462cecbcddfSRam Amrani return status;
2463cecbcddfSRam Amrani }
2464cecbcddfSRam Amrani
qedr_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)2465cecbcddfSRam Amrani int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2466cecbcddfSRam Amrani int attr_mask, struct ib_udata *udata)
2467cecbcddfSRam Amrani {
2468cecbcddfSRam Amrani struct qedr_qp *qp = get_qedr_qp(ibqp);
2469cecbcddfSRam Amrani struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2470cecbcddfSRam Amrani struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2471d8966fcdSDasaratharaman Chandramouli const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2472cecbcddfSRam Amrani enum ib_qp_state old_qp_state, new_qp_state;
2473caf61b1bSKalderon, Michal enum qed_roce_qp_state cur_state;
2474cecbcddfSRam Amrani int rc = 0;
2475cecbcddfSRam Amrani
2476cecbcddfSRam Amrani DP_DEBUG(dev, QEDR_MSG_QP,
2477cecbcddfSRam Amrani "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2478cecbcddfSRam Amrani attr->qp_state);
2479cecbcddfSRam Amrani
248026e990baSJason Gunthorpe if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
248126e990baSJason Gunthorpe return -EOPNOTSUPP;
248226e990baSJason Gunthorpe
2483cecbcddfSRam Amrani old_qp_state = qedr_get_ibqp_state(qp->state);
2484cecbcddfSRam Amrani if (attr_mask & IB_QP_STATE)
2485cecbcddfSRam Amrani new_qp_state = attr->qp_state;
2486cecbcddfSRam Amrani else
2487cecbcddfSRam Amrani new_qp_state = old_qp_state;
2488cecbcddfSRam Amrani
2489f5b1b177SKalderon, Michal if (rdma_protocol_roce(&dev->ibdev, 1)) {
2490f5b1b177SKalderon, Michal if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2491d31131bbSKamal Heib ibqp->qp_type, attr_mask)) {
2492cecbcddfSRam Amrani DP_ERR(dev,
2493cecbcddfSRam Amrani "modify qp: invalid attribute mask=0x%x specified for\n"
2494cecbcddfSRam Amrani "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2495f5b1b177SKalderon, Michal attr_mask, qp->qp_id, ibqp->qp_type,
2496f5b1b177SKalderon, Michal old_qp_state, new_qp_state);
2497cecbcddfSRam Amrani rc = -EINVAL;
2498cecbcddfSRam Amrani goto err;
2499cecbcddfSRam Amrani }
2500f5b1b177SKalderon, Michal }
2501cecbcddfSRam Amrani
2502cecbcddfSRam Amrani /* Translate the masks... */
2503cecbcddfSRam Amrani if (attr_mask & IB_QP_STATE) {
2504cecbcddfSRam Amrani SET_FIELD(qp_params.modify_flags,
2505cecbcddfSRam Amrani QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2506cecbcddfSRam Amrani qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2507cecbcddfSRam Amrani }
2508cecbcddfSRam Amrani
2509cecbcddfSRam Amrani if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2510cecbcddfSRam Amrani qp_params.sqd_async = true;
2511cecbcddfSRam Amrani
2512cecbcddfSRam Amrani if (attr_mask & IB_QP_PKEY_INDEX) {
2513cecbcddfSRam Amrani SET_FIELD(qp_params.modify_flags,
2514cecbcddfSRam Amrani QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2515cecbcddfSRam Amrani if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2516cecbcddfSRam Amrani rc = -EINVAL;
2517cecbcddfSRam Amrani goto err;
2518cecbcddfSRam Amrani }
2519cecbcddfSRam Amrani
2520cecbcddfSRam Amrani qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2521cecbcddfSRam Amrani }
2522cecbcddfSRam Amrani
2523cecbcddfSRam Amrani if (attr_mask & IB_QP_QKEY)
2524cecbcddfSRam Amrani qp->qkey = attr->qkey;
2525cecbcddfSRam Amrani
2526cecbcddfSRam Amrani if (attr_mask & IB_QP_ACCESS_FLAGS) {
2527cecbcddfSRam Amrani SET_FIELD(qp_params.modify_flags,
2528cecbcddfSRam Amrani QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2529cecbcddfSRam Amrani qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2530cecbcddfSRam Amrani IB_ACCESS_REMOTE_READ;
2531cecbcddfSRam Amrani qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2532cecbcddfSRam Amrani IB_ACCESS_REMOTE_WRITE;
2533cecbcddfSRam Amrani qp_params.incoming_atomic_en = attr->qp_access_flags &
2534cecbcddfSRam Amrani IB_ACCESS_REMOTE_ATOMIC;
2535cecbcddfSRam Amrani }
2536cecbcddfSRam Amrani
2537cecbcddfSRam Amrani if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2538425cf5c1SKalderon, Michal if (rdma_protocol_iwarp(&dev->ibdev, 1))
2539425cf5c1SKalderon, Michal return -EINVAL;
2540425cf5c1SKalderon, Michal
2541cecbcddfSRam Amrani if (attr_mask & IB_QP_PATH_MTU) {
2542cecbcddfSRam Amrani if (attr->path_mtu < IB_MTU_256 ||
2543cecbcddfSRam Amrani attr->path_mtu > IB_MTU_4096) {
2544cecbcddfSRam Amrani pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2545cecbcddfSRam Amrani rc = -EINVAL;
2546cecbcddfSRam Amrani goto err;
2547cecbcddfSRam Amrani }
2548cecbcddfSRam Amrani qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2549cecbcddfSRam Amrani ib_mtu_enum_to_int(iboe_get_mtu
2550cecbcddfSRam Amrani (dev->ndev->mtu)));
2551cecbcddfSRam Amrani }
2552cecbcddfSRam Amrani
2553cecbcddfSRam Amrani if (!qp->mtu) {
2554cecbcddfSRam Amrani qp->mtu =
2555cecbcddfSRam Amrani ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2556cecbcddfSRam Amrani pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2557cecbcddfSRam Amrani }
2558cecbcddfSRam Amrani
2559cecbcddfSRam Amrani SET_FIELD(qp_params.modify_flags,
2560cecbcddfSRam Amrani QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2561cecbcddfSRam Amrani
2562d8966fcdSDasaratharaman Chandramouli qp_params.traffic_class_tos = grh->traffic_class;
2563d8966fcdSDasaratharaman Chandramouli qp_params.flow_label = grh->flow_label;
2564d8966fcdSDasaratharaman Chandramouli qp_params.hop_limit_ttl = grh->hop_limit;
2565cecbcddfSRam Amrani
2566d8966fcdSDasaratharaman Chandramouli qp->sgid_idx = grh->sgid_index;
2567cecbcddfSRam Amrani
2568cecbcddfSRam Amrani rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2569cecbcddfSRam Amrani if (rc) {
2570cecbcddfSRam Amrani DP_ERR(dev,
2571cecbcddfSRam Amrani "modify qp: problems with GID index %d (rc=%d)\n",
2572d8966fcdSDasaratharaman Chandramouli grh->sgid_index, rc);
2573cecbcddfSRam Amrani return rc;
2574cecbcddfSRam Amrani }
2575cecbcddfSRam Amrani
2576cecbcddfSRam Amrani rc = qedr_get_dmac(dev, &attr->ah_attr,
2577cecbcddfSRam Amrani qp_params.remote_mac_addr);
2578cecbcddfSRam Amrani if (rc)
2579cecbcddfSRam Amrani return rc;
2580cecbcddfSRam Amrani
2581cecbcddfSRam Amrani qp_params.use_local_mac = true;
2582cecbcddfSRam Amrani ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2583cecbcddfSRam Amrani
2584cecbcddfSRam Amrani DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2585cecbcddfSRam Amrani qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2586cecbcddfSRam Amrani qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2587cecbcddfSRam Amrani DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2588cecbcddfSRam Amrani qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2589cecbcddfSRam Amrani qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2590cecbcddfSRam Amrani DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2591cecbcddfSRam Amrani qp_params.remote_mac_addr);
2592cecbcddfSRam Amrani
2593cecbcddfSRam Amrani qp_params.mtu = qp->mtu;
2594cecbcddfSRam Amrani qp_params.lb_indication = false;
2595cecbcddfSRam Amrani }
2596cecbcddfSRam Amrani
2597cecbcddfSRam Amrani if (!qp_params.mtu) {
2598cecbcddfSRam Amrani /* Stay with current MTU */
2599cecbcddfSRam Amrani if (qp->mtu)
2600cecbcddfSRam Amrani qp_params.mtu = qp->mtu;
2601cecbcddfSRam Amrani else
2602cecbcddfSRam Amrani qp_params.mtu =
2603cecbcddfSRam Amrani ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2604cecbcddfSRam Amrani }
2605cecbcddfSRam Amrani
2606cecbcddfSRam Amrani if (attr_mask & IB_QP_TIMEOUT) {
2607cecbcddfSRam Amrani SET_FIELD(qp_params.modify_flags,
2608cecbcddfSRam Amrani QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2609cecbcddfSRam Amrani
2610c3594f22SKalderon, Michal /* The received timeout value is an exponent used like this:
2611c3594f22SKalderon, Michal * "12.7.34 LOCAL ACK TIMEOUT
2612c3594f22SKalderon, Michal * Value representing the transport (ACK) timeout for use by
2613c3594f22SKalderon, Michal * the remote, expressed as: 4.096 * 2^timeout [usec]"
2614c3594f22SKalderon, Michal * The FW expects timeout in msec so we need to divide the usec
2615c3594f22SKalderon, Michal * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2616c3594f22SKalderon, Michal * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2617c3594f22SKalderon, Michal * The value of zero means infinite so we use a 'max_t' to make
2618c3594f22SKalderon, Michal * sure that sub 1 msec values will be configured as 1 msec.
2619c3594f22SKalderon, Michal */
2620c3594f22SKalderon, Michal if (attr->timeout)
2621c3594f22SKalderon, Michal qp_params.ack_timeout =
2622c3594f22SKalderon, Michal 1 << max_t(int, attr->timeout - 8, 0);
2623c3594f22SKalderon, Michal else
2624cecbcddfSRam Amrani qp_params.ack_timeout = 0;
2625118f7674SKamal Heib
2626118f7674SKamal Heib qp->timeout = attr->timeout;
2627cecbcddfSRam Amrani }
2628c3594f22SKalderon, Michal
2629cecbcddfSRam Amrani if (attr_mask & IB_QP_RETRY_CNT) {
2630cecbcddfSRam Amrani SET_FIELD(qp_params.modify_flags,
2631cecbcddfSRam Amrani QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2632cecbcddfSRam Amrani qp_params.retry_cnt = attr->retry_cnt;
2633cecbcddfSRam Amrani }
2634cecbcddfSRam Amrani
2635cecbcddfSRam Amrani if (attr_mask & IB_QP_RNR_RETRY) {
2636cecbcddfSRam Amrani SET_FIELD(qp_params.modify_flags,
2637cecbcddfSRam Amrani QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2638cecbcddfSRam Amrani qp_params.rnr_retry_cnt = attr->rnr_retry;
2639cecbcddfSRam Amrani }
2640cecbcddfSRam Amrani
2641cecbcddfSRam Amrani if (attr_mask & IB_QP_RQ_PSN) {
2642cecbcddfSRam Amrani SET_FIELD(qp_params.modify_flags,
2643cecbcddfSRam Amrani QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2644cecbcddfSRam Amrani qp_params.rq_psn = attr->rq_psn;
2645cecbcddfSRam Amrani qp->rq_psn = attr->rq_psn;
2646cecbcddfSRam Amrani }
2647cecbcddfSRam Amrani
2648cecbcddfSRam Amrani if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2649cecbcddfSRam Amrani if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2650cecbcddfSRam Amrani rc = -EINVAL;
2651cecbcddfSRam Amrani DP_ERR(dev,
2652cecbcddfSRam Amrani "unsupported max_rd_atomic=%d, supported=%d\n",
2653cecbcddfSRam Amrani attr->max_rd_atomic,
2654cecbcddfSRam Amrani dev->attr.max_qp_req_rd_atomic_resc);
2655cecbcddfSRam Amrani goto err;
2656cecbcddfSRam Amrani }
2657cecbcddfSRam Amrani
2658cecbcddfSRam Amrani SET_FIELD(qp_params.modify_flags,
2659cecbcddfSRam Amrani QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2660cecbcddfSRam Amrani qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2661cecbcddfSRam Amrani }
2662cecbcddfSRam Amrani
2663cecbcddfSRam Amrani if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2664cecbcddfSRam Amrani SET_FIELD(qp_params.modify_flags,
2665cecbcddfSRam Amrani QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2666cecbcddfSRam Amrani qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2667cecbcddfSRam Amrani }
2668cecbcddfSRam Amrani
2669cecbcddfSRam Amrani if (attr_mask & IB_QP_SQ_PSN) {
2670cecbcddfSRam Amrani SET_FIELD(qp_params.modify_flags,
2671cecbcddfSRam Amrani QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2672cecbcddfSRam Amrani qp_params.sq_psn = attr->sq_psn;
2673cecbcddfSRam Amrani qp->sq_psn = attr->sq_psn;
2674cecbcddfSRam Amrani }
2675cecbcddfSRam Amrani
2676cecbcddfSRam Amrani if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2677cecbcddfSRam Amrani if (attr->max_dest_rd_atomic >
2678cecbcddfSRam Amrani dev->attr.max_qp_resp_rd_atomic_resc) {
2679cecbcddfSRam Amrani DP_ERR(dev,
2680cecbcddfSRam Amrani "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2681cecbcddfSRam Amrani attr->max_dest_rd_atomic,
2682cecbcddfSRam Amrani dev->attr.max_qp_resp_rd_atomic_resc);
2683cecbcddfSRam Amrani
2684cecbcddfSRam Amrani rc = -EINVAL;
2685cecbcddfSRam Amrani goto err;
2686cecbcddfSRam Amrani }
2687cecbcddfSRam Amrani
2688cecbcddfSRam Amrani SET_FIELD(qp_params.modify_flags,
2689cecbcddfSRam Amrani QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2690cecbcddfSRam Amrani qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2691cecbcddfSRam Amrani }
2692cecbcddfSRam Amrani
2693cecbcddfSRam Amrani if (attr_mask & IB_QP_DEST_QPN) {
2694cecbcddfSRam Amrani SET_FIELD(qp_params.modify_flags,
2695cecbcddfSRam Amrani QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2696cecbcddfSRam Amrani
2697cecbcddfSRam Amrani qp_params.dest_qp = attr->dest_qp_num;
2698cecbcddfSRam Amrani qp->dest_qp_num = attr->dest_qp_num;
2699cecbcddfSRam Amrani }
2700cecbcddfSRam Amrani
2701caf61b1bSKalderon, Michal cur_state = qp->state;
2702caf61b1bSKalderon, Michal
2703caf61b1bSKalderon, Michal /* Update the QP state before the actual ramrod to prevent a race with
2704caf61b1bSKalderon, Michal * fast path. Modifying the QP state to error will cause the device to
2705caf61b1bSKalderon, Michal * flush the CQEs and while polling the flushed CQEs will considered as
2706caf61b1bSKalderon, Michal * a potential issue if the QP isn't in error state.
2707caf61b1bSKalderon, Michal */
2708caf61b1bSKalderon, Michal if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2709caf61b1bSKalderon, Michal !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2710caf61b1bSKalderon, Michal qp->state = QED_ROCE_QP_STATE_ERR;
2711caf61b1bSKalderon, Michal
2712cecbcddfSRam Amrani if (qp->qp_type != IB_QPT_GSI)
2713cecbcddfSRam Amrani rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2714cecbcddfSRam Amrani qp->qed_qp, &qp_params);
2715cecbcddfSRam Amrani
2716cecbcddfSRam Amrani if (attr_mask & IB_QP_STATE) {
2717cecbcddfSRam Amrani if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2718caf61b1bSKalderon, Michal rc = qedr_update_qp_state(dev, qp, cur_state,
2719caf61b1bSKalderon, Michal qp_params.new_state);
2720cecbcddfSRam Amrani qp->state = qp_params.new_state;
2721cecbcddfSRam Amrani }
2722cecbcddfSRam Amrani
2723cecbcddfSRam Amrani err:
2724cecbcddfSRam Amrani return rc;
2725cecbcddfSRam Amrani }
2726cecbcddfSRam Amrani
qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params * params)2727cecbcddfSRam Amrani static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2728cecbcddfSRam Amrani {
2729cecbcddfSRam Amrani int ib_qp_acc_flags = 0;
2730cecbcddfSRam Amrani
2731cecbcddfSRam Amrani if (params->incoming_rdma_write_en)
2732cecbcddfSRam Amrani ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2733cecbcddfSRam Amrani if (params->incoming_rdma_read_en)
2734cecbcddfSRam Amrani ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2735cecbcddfSRam Amrani if (params->incoming_atomic_en)
2736cecbcddfSRam Amrani ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2737cecbcddfSRam Amrani ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2738cecbcddfSRam Amrani return ib_qp_acc_flags;
2739cecbcddfSRam Amrani }
2740cecbcddfSRam Amrani
qedr_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int attr_mask,struct ib_qp_init_attr * qp_init_attr)2741cecbcddfSRam Amrani int qedr_query_qp(struct ib_qp *ibqp,
2742cecbcddfSRam Amrani struct ib_qp_attr *qp_attr,
2743cecbcddfSRam Amrani int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2744cecbcddfSRam Amrani {
2745cecbcddfSRam Amrani struct qed_rdma_query_qp_out_params params;
2746cecbcddfSRam Amrani struct qedr_qp *qp = get_qedr_qp(ibqp);
2747cecbcddfSRam Amrani struct qedr_dev *dev = qp->dev;
2748cecbcddfSRam Amrani int rc = 0;
2749cecbcddfSRam Amrani
2750cecbcddfSRam Amrani memset(¶ms, 0, sizeof(params));
2751cecbcddfSRam Amrani memset(qp_attr, 0, sizeof(*qp_attr));
2752cecbcddfSRam Amrani memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2753cecbcddfSRam Amrani
27544f960393SAlok Prasad if (qp->qp_type != IB_QPT_GSI) {
27554f960393SAlok Prasad rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms);
27564f960393SAlok Prasad if (rc)
27574f960393SAlok Prasad goto err;
2758cecbcddfSRam Amrani qp_attr->qp_state = qedr_get_ibqp_state(params.state);
27594f960393SAlok Prasad } else {
27604f960393SAlok Prasad qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
27614f960393SAlok Prasad }
27624f960393SAlok Prasad
2763cecbcddfSRam Amrani qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2764097b6159SAmrani, Ram qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2765cecbcddfSRam Amrani qp_attr->path_mig_state = IB_MIG_MIGRATED;
2766cecbcddfSRam Amrani qp_attr->rq_psn = params.rq_psn;
2767cecbcddfSRam Amrani qp_attr->sq_psn = params.sq_psn;
2768cecbcddfSRam Amrani qp_attr->dest_qp_num = params.dest_qp;
2769cecbcddfSRam Amrani
2770cecbcddfSRam Amrani qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms);
2771cecbcddfSRam Amrani
2772cecbcddfSRam Amrani qp_attr->cap.max_send_wr = qp->sq.max_wr;
2773cecbcddfSRam Amrani qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2774cecbcddfSRam Amrani qp_attr->cap.max_send_sge = qp->sq.max_sges;
2775cecbcddfSRam Amrani qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2776fbf58026SMichal Kalderon qp_attr->cap.max_inline_data = dev->attr.max_inline;
2777cecbcddfSRam Amrani qp_init_attr->cap = qp_attr->cap;
2778cecbcddfSRam Amrani
277944c58487SDasaratharaman Chandramouli qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2780d8966fcdSDasaratharaman Chandramouli rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2781d8966fcdSDasaratharaman Chandramouli params.flow_label, qp->sgid_idx,
2782d8966fcdSDasaratharaman Chandramouli params.hop_limit_ttl, params.traffic_class_tos);
2783d8966fcdSDasaratharaman Chandramouli rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]);
2784d8966fcdSDasaratharaman Chandramouli rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2785d8966fcdSDasaratharaman Chandramouli rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2786118f7674SKamal Heib qp_attr->timeout = qp->timeout;
2787cecbcddfSRam Amrani qp_attr->rnr_retry = params.rnr_retry;
2788cecbcddfSRam Amrani qp_attr->retry_cnt = params.retry_cnt;
2789cecbcddfSRam Amrani qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2790cecbcddfSRam Amrani qp_attr->pkey_index = params.pkey_index;
2791cecbcddfSRam Amrani qp_attr->port_num = 1;
2792d8966fcdSDasaratharaman Chandramouli rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2793d8966fcdSDasaratharaman Chandramouli rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2794cecbcddfSRam Amrani qp_attr->alt_pkey_index = 0;
2795cecbcddfSRam Amrani qp_attr->alt_port_num = 0;
2796cecbcddfSRam Amrani qp_attr->alt_timeout = 0;
2797cecbcddfSRam Amrani memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2798cecbcddfSRam Amrani
2799cecbcddfSRam Amrani qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2800cecbcddfSRam Amrani qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2801cecbcddfSRam Amrani qp_attr->max_rd_atomic = params.max_rd_atomic;
2802cecbcddfSRam Amrani qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2803cecbcddfSRam Amrani
2804cecbcddfSRam Amrani DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2805cecbcddfSRam Amrani qp_attr->cap.max_inline_data);
2806cecbcddfSRam Amrani
2807cecbcddfSRam Amrani err:
2808cecbcddfSRam Amrani return rc;
2809cecbcddfSRam Amrani }
2810cecbcddfSRam Amrani
qedr_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)2811c4367a26SShamir Rabinovitch int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2812cecbcddfSRam Amrani {
2813cecbcddfSRam Amrani struct qedr_qp *qp = get_qedr_qp(ibqp);
2814cecbcddfSRam Amrani struct qedr_dev *dev = qp->dev;
2815cecbcddfSRam Amrani struct ib_qp_attr attr;
2816cecbcddfSRam Amrani int attr_mask = 0;
2817cecbcddfSRam Amrani
2818cecbcddfSRam Amrani DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2819cecbcddfSRam Amrani qp, qp->qp_type);
2820cecbcddfSRam Amrani
2821f5b1b177SKalderon, Michal if (rdma_protocol_roce(&dev->ibdev, 1)) {
2822b4c2cc48SAmrani, Ram if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2823b4c2cc48SAmrani, Ram (qp->state != QED_ROCE_QP_STATE_ERR) &&
2824b4c2cc48SAmrani, Ram (qp->state != QED_ROCE_QP_STATE_INIT)) {
2825b4c2cc48SAmrani, Ram
2826cecbcddfSRam Amrani attr.qp_state = IB_QPS_ERR;
2827cecbcddfSRam Amrani attr_mask |= IB_QP_STATE;
2828cecbcddfSRam Amrani
2829cecbcddfSRam Amrani /* Change the QP state to ERROR */
2830cecbcddfSRam Amrani qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2831cecbcddfSRam Amrani }
2832e411e058SKalderon, Michal } else {
283382af6d19SMichal Kalderon /* If connection establishment started the WAIT_FOR_CONNECT
283482af6d19SMichal Kalderon * bit will be on and we need to Wait for the establishment
283582af6d19SMichal Kalderon * to complete before destroying the qp.
283682af6d19SMichal Kalderon */
283782af6d19SMichal Kalderon if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
283882af6d19SMichal Kalderon &qp->iwarp_cm_flags))
283982af6d19SMichal Kalderon wait_for_completion(&qp->iwarp_cm_comp);
2840e411e058SKalderon, Michal
284182af6d19SMichal Kalderon /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
284282af6d19SMichal Kalderon * bit will be on, and we need to wait for the disconnect to
284382af6d19SMichal Kalderon * complete before continuing. We can use the same completion,
284482af6d19SMichal Kalderon * iwarp_cm_comp, since this is the only place that waits for
284582af6d19SMichal Kalderon * this completion and it is sequential. In addition,
284682af6d19SMichal Kalderon * disconnect can't occur before the connection is fully
284782af6d19SMichal Kalderon * established, therefore if WAIT_FOR_DISCONNECT is on it
284882af6d19SMichal Kalderon * means WAIT_FOR_CONNECT is also on and the completion for
284982af6d19SMichal Kalderon * CONNECT already occurred.
285082af6d19SMichal Kalderon */
285182af6d19SMichal Kalderon if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
285282af6d19SMichal Kalderon &qp->iwarp_cm_flags))
285382af6d19SMichal Kalderon wait_for_completion(&qp->iwarp_cm_comp);
2854f5b1b177SKalderon, Michal }
2855cecbcddfSRam Amrani
2856df158561SAmrani, Ram if (qp->qp_type == IB_QPT_GSI)
285704886779SRam Amrani qedr_destroy_gsi_qp(dev);
2858cecbcddfSRam Amrani
285982af6d19SMichal Kalderon /* We need to remove the entry from the xarray before we release the
286082af6d19SMichal Kalderon * qp_id to avoid a race of the qp_id being reallocated and failing
286182af6d19SMichal Kalderon * on xa_insert
286282af6d19SMichal Kalderon */
286382af6d19SMichal Kalderon if (rdma_protocol_iwarp(&dev->ibdev, 1))
286482af6d19SMichal Kalderon xa_erase(&dev->qps, qp->qp_id);
286582af6d19SMichal Kalderon
2866bdeacabdSShamir Rabinovitch qedr_free_qp_resources(dev, qp, udata);
2867cecbcddfSRam Amrani
286860fab107SPrabhakar Kushwaha if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
286982af6d19SMichal Kalderon qedr_iw_qp_rem_ref(&qp->ibqp);
287060fab107SPrabhakar Kushwaha wait_for_completion(&qp->qp_rel_comp);
287160fab107SPrabhakar Kushwaha }
287282af6d19SMichal Kalderon
2873cf167e5eSHariprasad Kelam return 0;
2874cecbcddfSRam Amrani }
2875e0290cceSRam Amrani
qedr_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)2876fa5d010cSMaor Gottlieb int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
2877d3456914SLeon Romanovsky struct ib_udata *udata)
287804886779SRam Amrani {
2879d3456914SLeon Romanovsky struct qedr_ah *ah = get_qedr_ah(ibah);
288004886779SRam Amrani
2881fa5d010cSMaor Gottlieb rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
288204886779SRam Amrani
2883d3456914SLeon Romanovsky return 0;
288404886779SRam Amrani }
288504886779SRam Amrani
qedr_destroy_ah(struct ib_ah * ibah,u32 flags)28869a9ebf8cSLeon Romanovsky int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
288704886779SRam Amrani {
288804886779SRam Amrani struct qedr_ah *ah = get_qedr_ah(ibah);
288904886779SRam Amrani
2890d97099feSJason Gunthorpe rdma_destroy_ah_attr(&ah->attr);
28919a9ebf8cSLeon Romanovsky return 0;
289204886779SRam Amrani }
289304886779SRam Amrani
free_mr_info(struct qedr_dev * dev,struct mr_info * info)2894e0290cceSRam Amrani static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2895e0290cceSRam Amrani {
2896e0290cceSRam Amrani struct qedr_pbl *pbl, *tmp;
2897e0290cceSRam Amrani
2898e0290cceSRam Amrani if (info->pbl_table)
2899e0290cceSRam Amrani list_add_tail(&info->pbl_table->list_entry,
2900e0290cceSRam Amrani &info->free_pbl_list);
2901e0290cceSRam Amrani
2902e0290cceSRam Amrani if (!list_empty(&info->inuse_pbl_list))
2903e0290cceSRam Amrani list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2904e0290cceSRam Amrani
2905e0290cceSRam Amrani list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2906e0290cceSRam Amrani list_del(&pbl->list_entry);
2907e0290cceSRam Amrani qedr_free_pbl(dev, &info->pbl_info, pbl);
2908e0290cceSRam Amrani }
2909e0290cceSRam Amrani }
2910e0290cceSRam Amrani
init_mr_info(struct qedr_dev * dev,struct mr_info * info,size_t page_list_len,bool two_layered)2911e0290cceSRam Amrani static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2912e0290cceSRam Amrani size_t page_list_len, bool two_layered)
2913e0290cceSRam Amrani {
2914e0290cceSRam Amrani struct qedr_pbl *tmp;
2915e0290cceSRam Amrani int rc;
2916e0290cceSRam Amrani
2917e0290cceSRam Amrani INIT_LIST_HEAD(&info->free_pbl_list);
2918e0290cceSRam Amrani INIT_LIST_HEAD(&info->inuse_pbl_list);
2919e0290cceSRam Amrani
2920e0290cceSRam Amrani rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2921e0290cceSRam Amrani page_list_len, two_layered);
2922e0290cceSRam Amrani if (rc)
2923e0290cceSRam Amrani goto done;
2924e0290cceSRam Amrani
2925e0290cceSRam Amrani info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
29264cd33aafSChristophe Jaillet if (IS_ERR(info->pbl_table)) {
29274cd33aafSChristophe Jaillet rc = PTR_ERR(info->pbl_table);
2928e0290cceSRam Amrani goto done;
2929e0290cceSRam Amrani }
2930e0290cceSRam Amrani
2931e0290cceSRam Amrani DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2932e0290cceSRam Amrani &info->pbl_table->pa);
2933e0290cceSRam Amrani
2934e0290cceSRam Amrani /* in usual case we use 2 PBLs, so we add one to free
2935e0290cceSRam Amrani * list and allocating another one
2936e0290cceSRam Amrani */
2937e0290cceSRam Amrani tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
29384cd33aafSChristophe Jaillet if (IS_ERR(tmp)) {
2939e0290cceSRam Amrani DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2940e0290cceSRam Amrani goto done;
2941e0290cceSRam Amrani }
2942e0290cceSRam Amrani
2943e0290cceSRam Amrani list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2944e0290cceSRam Amrani
2945e0290cceSRam Amrani DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2946e0290cceSRam Amrani
2947e0290cceSRam Amrani done:
2948e0290cceSRam Amrani if (rc)
2949e0290cceSRam Amrani free_mr_info(dev, info);
2950e0290cceSRam Amrani
2951e0290cceSRam Amrani return rc;
2952e0290cceSRam Amrani }
2953e0290cceSRam Amrani
qedr_reg_user_mr(struct ib_pd * ibpd,u64 start,u64 len,u64 usr_addr,int acc,struct ib_udata * udata)2954e0290cceSRam Amrani struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2955e0290cceSRam Amrani u64 usr_addr, int acc, struct ib_udata *udata)
2956e0290cceSRam Amrani {
2957e0290cceSRam Amrani struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2958e0290cceSRam Amrani struct qedr_mr *mr;
2959e0290cceSRam Amrani struct qedr_pd *pd;
2960e0290cceSRam Amrani int rc = -ENOMEM;
2961e0290cceSRam Amrani
2962e0290cceSRam Amrani pd = get_qedr_pd(ibpd);
2963e0290cceSRam Amrani DP_DEBUG(dev, QEDR_MSG_MR,
2964e0290cceSRam Amrani "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2965e0290cceSRam Amrani pd->pd_id, start, len, usr_addr, acc);
2966e0290cceSRam Amrani
2967e0290cceSRam Amrani if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2968e0290cceSRam Amrani return ERR_PTR(-EINVAL);
2969e0290cceSRam Amrani
2970e0290cceSRam Amrani mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2971e0290cceSRam Amrani if (!mr)
2972e0290cceSRam Amrani return ERR_PTR(rc);
2973e0290cceSRam Amrani
2974e0290cceSRam Amrani mr->type = QEDR_MR_USER;
2975e0290cceSRam Amrani
2976c320e527SMoni Shoua mr->umem = ib_umem_get(ibpd->device, start, len, acc);
2977e0290cceSRam Amrani if (IS_ERR(mr->umem)) {
2978e0290cceSRam Amrani rc = -EFAULT;
2979e0290cceSRam Amrani goto err0;
2980e0290cceSRam Amrani }
2981e0290cceSRam Amrani
2982901bca71SJason Gunthorpe rc = init_mr_info(dev, &mr->info,
2983901bca71SJason Gunthorpe ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
2984e0290cceSRam Amrani if (rc)
2985e0290cceSRam Amrani goto err1;
2986e0290cceSRam Amrani
2987e0290cceSRam Amrani qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
298895ad233fSShiraz, Saleem &mr->info.pbl_info, PAGE_SHIFT);
2989e0290cceSRam Amrani
2990e0290cceSRam Amrani rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2991e0290cceSRam Amrani if (rc) {
29920050a576SPrabhakar Kushwaha if (rc == -EINVAL)
29930050a576SPrabhakar Kushwaha DP_ERR(dev, "Out of MR resources\n");
29940050a576SPrabhakar Kushwaha else
29950050a576SPrabhakar Kushwaha DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
29960050a576SPrabhakar Kushwaha
2997e0290cceSRam Amrani goto err1;
2998e0290cceSRam Amrani }
2999e0290cceSRam Amrani
3000e0290cceSRam Amrani /* Index only, 18 bit long, lkey = itid << 8 | key */
3001e0290cceSRam Amrani mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3002e0290cceSRam Amrani mr->hw_mr.key = 0;
3003e0290cceSRam Amrani mr->hw_mr.pd = pd->pd_id;
3004e0290cceSRam Amrani mr->hw_mr.local_read = 1;
3005e0290cceSRam Amrani mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3006e0290cceSRam Amrani mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3007e0290cceSRam Amrani mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3008e0290cceSRam Amrani mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3009e0290cceSRam Amrani mr->hw_mr.mw_bind = false;
3010e0290cceSRam Amrani mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
3011e0290cceSRam Amrani mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3012e0290cceSRam Amrani mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
301395ad233fSShiraz, Saleem mr->hw_mr.page_size_log = PAGE_SHIFT;
3014e0290cceSRam Amrani mr->hw_mr.length = len;
3015e0290cceSRam Amrani mr->hw_mr.vaddr = usr_addr;
3016e0290cceSRam Amrani mr->hw_mr.phy_mr = false;
3017e0290cceSRam Amrani mr->hw_mr.dma_mr = false;
3018e0290cceSRam Amrani
3019e0290cceSRam Amrani rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3020e0290cceSRam Amrani if (rc) {
3021e0290cceSRam Amrani DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3022e0290cceSRam Amrani goto err2;
3023e0290cceSRam Amrani }
3024e0290cceSRam Amrani
3025e0290cceSRam Amrani mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3026e0290cceSRam Amrani if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3027e0290cceSRam Amrani mr->hw_mr.remote_atomic)
3028e0290cceSRam Amrani mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3029e0290cceSRam Amrani
3030e0290cceSRam Amrani DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
3031e0290cceSRam Amrani mr->ibmr.lkey);
3032e0290cceSRam Amrani return &mr->ibmr;
3033e0290cceSRam Amrani
3034e0290cceSRam Amrani err2:
3035e0290cceSRam Amrani dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3036e0290cceSRam Amrani err1:
3037e0290cceSRam Amrani qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3038e0290cceSRam Amrani err0:
3039e0290cceSRam Amrani kfree(mr);
3040e0290cceSRam Amrani return ERR_PTR(rc);
3041e0290cceSRam Amrani }
3042e0290cceSRam Amrani
qedr_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)3043c4367a26SShamir Rabinovitch int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3044e0290cceSRam Amrani {
3045e0290cceSRam Amrani struct qedr_mr *mr = get_qedr_mr(ib_mr);
3046e0290cceSRam Amrani struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
3047e0290cceSRam Amrani int rc = 0;
3048e0290cceSRam Amrani
3049e0290cceSRam Amrani rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
3050e0290cceSRam Amrani if (rc)
3051e0290cceSRam Amrani return rc;
3052e0290cceSRam Amrani
3053e0290cceSRam Amrani dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3054e0290cceSRam Amrani
305524e412c1SMichal Kalderon if (mr->type != QEDR_MR_DMA)
305624e412c1SMichal Kalderon free_mr_info(dev, &mr->info);
3057e0290cceSRam Amrani
3058e0290cceSRam Amrani /* it could be user registered memory. */
3059e0290cceSRam Amrani ib_umem_release(mr->umem);
3060e0290cceSRam Amrani
3061e0290cceSRam Amrani kfree(mr);
3062e0290cceSRam Amrani
3063e0290cceSRam Amrani return rc;
3064e0290cceSRam Amrani }
3065e0290cceSRam Amrani
__qedr_alloc_mr(struct ib_pd * ibpd,int max_page_list_len)306627a4b1a6SRam Amrani static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
306727a4b1a6SRam Amrani int max_page_list_len)
3068e0290cceSRam Amrani {
3069e0290cceSRam Amrani struct qedr_pd *pd = get_qedr_pd(ibpd);
3070e0290cceSRam Amrani struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3071e0290cceSRam Amrani struct qedr_mr *mr;
3072e0290cceSRam Amrani int rc = -ENOMEM;
3073e0290cceSRam Amrani
3074e0290cceSRam Amrani DP_DEBUG(dev, QEDR_MSG_MR,
3075e0290cceSRam Amrani "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
3076e0290cceSRam Amrani max_page_list_len);
3077e0290cceSRam Amrani
3078e0290cceSRam Amrani mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3079e0290cceSRam Amrani if (!mr)
3080e0290cceSRam Amrani return ERR_PTR(rc);
3081e0290cceSRam Amrani
3082e0290cceSRam Amrani mr->dev = dev;
3083e0290cceSRam Amrani mr->type = QEDR_MR_FRMR;
3084e0290cceSRam Amrani
3085e0290cceSRam Amrani rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
3086e0290cceSRam Amrani if (rc)
3087e0290cceSRam Amrani goto err0;
3088e0290cceSRam Amrani
3089e0290cceSRam Amrani rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3090e0290cceSRam Amrani if (rc) {
30910050a576SPrabhakar Kushwaha if (rc == -EINVAL)
30920050a576SPrabhakar Kushwaha DP_ERR(dev, "Out of MR resources\n");
30930050a576SPrabhakar Kushwaha else
30940050a576SPrabhakar Kushwaha DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
30950050a576SPrabhakar Kushwaha
3096b3236a64SJianglei Nie goto err1;
3097e0290cceSRam Amrani }
3098e0290cceSRam Amrani
3099e0290cceSRam Amrani /* Index only, 18 bit long, lkey = itid << 8 | key */
3100e0290cceSRam Amrani mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
3101e0290cceSRam Amrani mr->hw_mr.key = 0;
3102e0290cceSRam Amrani mr->hw_mr.pd = pd->pd_id;
3103e0290cceSRam Amrani mr->hw_mr.local_read = 1;
3104e0290cceSRam Amrani mr->hw_mr.local_write = 0;
3105e0290cceSRam Amrani mr->hw_mr.remote_read = 0;
3106e0290cceSRam Amrani mr->hw_mr.remote_write = 0;
3107e0290cceSRam Amrani mr->hw_mr.remote_atomic = 0;
3108e0290cceSRam Amrani mr->hw_mr.mw_bind = false;
3109e0290cceSRam Amrani mr->hw_mr.pbl_ptr = 0;
3110e0290cceSRam Amrani mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3111e0290cceSRam Amrani mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3112e0290cceSRam Amrani mr->hw_mr.length = 0;
3113e0290cceSRam Amrani mr->hw_mr.vaddr = 0;
3114e0290cceSRam Amrani mr->hw_mr.phy_mr = true;
3115e0290cceSRam Amrani mr->hw_mr.dma_mr = false;
3116e0290cceSRam Amrani
3117e0290cceSRam Amrani rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3118e0290cceSRam Amrani if (rc) {
3119e0290cceSRam Amrani DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3120b3236a64SJianglei Nie goto err2;
3121e0290cceSRam Amrani }
3122e0290cceSRam Amrani
3123e0290cceSRam Amrani mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3124e0290cceSRam Amrani mr->ibmr.rkey = mr->ibmr.lkey;
3125e0290cceSRam Amrani
3126e0290cceSRam Amrani DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
3127e0290cceSRam Amrani return mr;
3128e0290cceSRam Amrani
3129b3236a64SJianglei Nie err2:
3130e0290cceSRam Amrani dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3131b3236a64SJianglei Nie err1:
3132b3236a64SJianglei Nie qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3133e0290cceSRam Amrani err0:
3134e0290cceSRam Amrani kfree(mr);
3135e0290cceSRam Amrani return ERR_PTR(rc);
3136e0290cceSRam Amrani }
3137e0290cceSRam Amrani
qedr_alloc_mr(struct ib_pd * ibpd,enum ib_mr_type mr_type,u32 max_num_sg)3138c4367a26SShamir Rabinovitch struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
313942a3b153SGal Pressman u32 max_num_sg)
3140e0290cceSRam Amrani {
3141e0290cceSRam Amrani struct qedr_mr *mr;
3142e0290cceSRam Amrani
3143e0290cceSRam Amrani if (mr_type != IB_MR_TYPE_MEM_REG)
3144e0290cceSRam Amrani return ERR_PTR(-EINVAL);
3145e0290cceSRam Amrani
3146e0290cceSRam Amrani mr = __qedr_alloc_mr(ibpd, max_num_sg);
3147e0290cceSRam Amrani
3148e0290cceSRam Amrani if (IS_ERR(mr))
3149e0290cceSRam Amrani return ERR_PTR(-EINVAL);
3150e0290cceSRam Amrani
3151e0290cceSRam Amrani return &mr->ibmr;
3152e0290cceSRam Amrani }
3153e0290cceSRam Amrani
qedr_set_page(struct ib_mr * ibmr,u64 addr)3154e0290cceSRam Amrani static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
3155e0290cceSRam Amrani {
3156e0290cceSRam Amrani struct qedr_mr *mr = get_qedr_mr(ibmr);
3157e0290cceSRam Amrani struct qedr_pbl *pbl_table;
3158e0290cceSRam Amrani struct regpair *pbe;
3159e0290cceSRam Amrani u32 pbes_in_page;
3160e0290cceSRam Amrani
3161e0290cceSRam Amrani if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
3162ffab8c89SColin Ian King DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
3163e0290cceSRam Amrani return -ENOMEM;
3164e0290cceSRam Amrani }
3165e0290cceSRam Amrani
3166e0290cceSRam Amrani DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
3167e0290cceSRam Amrani mr->npages, addr);
3168e0290cceSRam Amrani
3169e0290cceSRam Amrani pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
3170e0290cceSRam Amrani pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
3171e0290cceSRam Amrani pbe = (struct regpair *)pbl_table->va;
3172e0290cceSRam Amrani pbe += mr->npages % pbes_in_page;
3173e0290cceSRam Amrani pbe->lo = cpu_to_le32((u32)addr);
3174e0290cceSRam Amrani pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
3175e0290cceSRam Amrani
3176e0290cceSRam Amrani mr->npages++;
3177e0290cceSRam Amrani
3178e0290cceSRam Amrani return 0;
3179e0290cceSRam Amrani }
3180e0290cceSRam Amrani
handle_completed_mrs(struct qedr_dev * dev,struct mr_info * info)3181e0290cceSRam Amrani static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
3182e0290cceSRam Amrani {
3183e0290cceSRam Amrani int work = info->completed - info->completed_handled - 1;
3184e0290cceSRam Amrani
3185e0290cceSRam Amrani DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
3186e0290cceSRam Amrani while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
3187e0290cceSRam Amrani struct qedr_pbl *pbl;
3188e0290cceSRam Amrani
3189e0290cceSRam Amrani /* Free all the page list that are possible to be freed
3190e0290cceSRam Amrani * (all the ones that were invalidated), under the assumption
3191e0290cceSRam Amrani * that if an FMR was completed successfully that means that
3192e0290cceSRam Amrani * if there was an invalidate operation before it also ended
3193e0290cceSRam Amrani */
3194e0290cceSRam Amrani pbl = list_first_entry(&info->inuse_pbl_list,
3195e0290cceSRam Amrani struct qedr_pbl, list_entry);
3196aafec388SWei Yongjun list_move_tail(&pbl->list_entry, &info->free_pbl_list);
3197e0290cceSRam Amrani info->completed_handled++;
3198e0290cceSRam Amrani }
3199e0290cceSRam Amrani }
3200e0290cceSRam Amrani
qedr_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)3201e0290cceSRam Amrani int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3202e0290cceSRam Amrani int sg_nents, unsigned int *sg_offset)
3203e0290cceSRam Amrani {
3204e0290cceSRam Amrani struct qedr_mr *mr = get_qedr_mr(ibmr);
3205e0290cceSRam Amrani
3206e0290cceSRam Amrani mr->npages = 0;
3207e0290cceSRam Amrani
3208e0290cceSRam Amrani handle_completed_mrs(mr->dev, &mr->info);
3209e0290cceSRam Amrani return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
3210e0290cceSRam Amrani }
3211e0290cceSRam Amrani
qedr_get_dma_mr(struct ib_pd * ibpd,int acc)3212e0290cceSRam Amrani struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
3213e0290cceSRam Amrani {
3214e0290cceSRam Amrani struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3215e0290cceSRam Amrani struct qedr_pd *pd = get_qedr_pd(ibpd);
3216e0290cceSRam Amrani struct qedr_mr *mr;
3217e0290cceSRam Amrani int rc;
3218e0290cceSRam Amrani
3219e0290cceSRam Amrani mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3220e0290cceSRam Amrani if (!mr)
3221e0290cceSRam Amrani return ERR_PTR(-ENOMEM);
3222e0290cceSRam Amrani
3223e0290cceSRam Amrani mr->type = QEDR_MR_DMA;
3224e0290cceSRam Amrani
3225e0290cceSRam Amrani rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3226e0290cceSRam Amrani if (rc) {
32270050a576SPrabhakar Kushwaha if (rc == -EINVAL)
32280050a576SPrabhakar Kushwaha DP_ERR(dev, "Out of MR resources\n");
32290050a576SPrabhakar Kushwaha else
32300050a576SPrabhakar Kushwaha DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
32310050a576SPrabhakar Kushwaha
3232e0290cceSRam Amrani goto err1;
3233e0290cceSRam Amrani }
3234e0290cceSRam Amrani
3235e0290cceSRam Amrani /* index only, 18 bit long, lkey = itid << 8 | key */
3236e0290cceSRam Amrani mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3237e0290cceSRam Amrani mr->hw_mr.pd = pd->pd_id;
3238e0290cceSRam Amrani mr->hw_mr.local_read = 1;
3239e0290cceSRam Amrani mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3240e0290cceSRam Amrani mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3241e0290cceSRam Amrani mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3242e0290cceSRam Amrani mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3243e0290cceSRam Amrani mr->hw_mr.dma_mr = true;
3244e0290cceSRam Amrani
3245e0290cceSRam Amrani rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3246e0290cceSRam Amrani if (rc) {
3247e0290cceSRam Amrani DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3248e0290cceSRam Amrani goto err2;
3249e0290cceSRam Amrani }
3250e0290cceSRam Amrani
3251e0290cceSRam Amrani mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3252e0290cceSRam Amrani if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3253e0290cceSRam Amrani mr->hw_mr.remote_atomic)
3254e0290cceSRam Amrani mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3255e0290cceSRam Amrani
3256e0290cceSRam Amrani DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3257e0290cceSRam Amrani return &mr->ibmr;
3258e0290cceSRam Amrani
3259e0290cceSRam Amrani err2:
3260e0290cceSRam Amrani dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3261e0290cceSRam Amrani err1:
3262e0290cceSRam Amrani kfree(mr);
3263e0290cceSRam Amrani return ERR_PTR(rc);
3264e0290cceSRam Amrani }
3265afa0e13bSRam Amrani
qedr_wq_is_full(struct qedr_qp_hwq_info * wq)3266afa0e13bSRam Amrani static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
3267afa0e13bSRam Amrani {
3268afa0e13bSRam Amrani return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3269afa0e13bSRam Amrani }
3270afa0e13bSRam Amrani
sge_data_len(struct ib_sge * sg_list,int num_sge)3271afa0e13bSRam Amrani static int sge_data_len(struct ib_sge *sg_list, int num_sge)
3272afa0e13bSRam Amrani {
3273afa0e13bSRam Amrani int i, len = 0;
3274afa0e13bSRam Amrani
3275afa0e13bSRam Amrani for (i = 0; i < num_sge; i++)
3276afa0e13bSRam Amrani len += sg_list[i].length;
3277afa0e13bSRam Amrani
3278afa0e13bSRam Amrani return len;
3279afa0e13bSRam Amrani }
3280afa0e13bSRam Amrani
swap_wqe_data64(u64 * p)3281afa0e13bSRam Amrani static void swap_wqe_data64(u64 *p)
3282afa0e13bSRam Amrani {
3283afa0e13bSRam Amrani int i;
3284afa0e13bSRam Amrani
3285afa0e13bSRam Amrani for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
3286afa0e13bSRam Amrani *p = cpu_to_be64(cpu_to_le64(*p));
3287afa0e13bSRam Amrani }
3288afa0e13bSRam Amrani
qedr_prepare_sq_inline_data(struct qedr_dev * dev,struct qedr_qp * qp,u8 * wqe_size,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr,u8 * bits,u8 bit)3289afa0e13bSRam Amrani static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
3290afa0e13bSRam Amrani struct qedr_qp *qp, u8 *wqe_size,
3291d34ac5cdSBart Van Assche const struct ib_send_wr *wr,
3292d34ac5cdSBart Van Assche const struct ib_send_wr **bad_wr,
3293d34ac5cdSBart Van Assche u8 *bits, u8 bit)
3294afa0e13bSRam Amrani {
3295afa0e13bSRam Amrani u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3296afa0e13bSRam Amrani char *seg_prt, *wqe;
3297afa0e13bSRam Amrani int i, seg_siz;
3298afa0e13bSRam Amrani
3299afa0e13bSRam Amrani if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
3300afa0e13bSRam Amrani DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3301afa0e13bSRam Amrani *bad_wr = wr;
3302afa0e13bSRam Amrani return 0;
3303afa0e13bSRam Amrani }
3304afa0e13bSRam Amrani
3305afa0e13bSRam Amrani if (!data_size)
3306afa0e13bSRam Amrani return data_size;
3307afa0e13bSRam Amrani
3308afa0e13bSRam Amrani *bits |= bit;
3309afa0e13bSRam Amrani
3310afa0e13bSRam Amrani seg_prt = NULL;
3311afa0e13bSRam Amrani wqe = NULL;
3312afa0e13bSRam Amrani seg_siz = 0;
3313afa0e13bSRam Amrani
3314afa0e13bSRam Amrani /* Copy data inline */
3315afa0e13bSRam Amrani for (i = 0; i < wr->num_sge; i++) {
3316afa0e13bSRam Amrani u32 len = wr->sg_list[i].length;
3317afa0e13bSRam Amrani void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3318afa0e13bSRam Amrani
3319afa0e13bSRam Amrani while (len > 0) {
3320afa0e13bSRam Amrani u32 cur;
3321afa0e13bSRam Amrani
3322afa0e13bSRam Amrani /* New segment required */
3323afa0e13bSRam Amrani if (!seg_siz) {
3324afa0e13bSRam Amrani wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3325afa0e13bSRam Amrani seg_prt = wqe;
3326afa0e13bSRam Amrani seg_siz = sizeof(struct rdma_sq_common_wqe);
3327afa0e13bSRam Amrani (*wqe_size)++;
3328afa0e13bSRam Amrani }
3329afa0e13bSRam Amrani
3330afa0e13bSRam Amrani /* Calculate currently allowed length */
3331afa0e13bSRam Amrani cur = min_t(u32, len, seg_siz);
3332afa0e13bSRam Amrani memcpy(seg_prt, src, cur);
3333afa0e13bSRam Amrani
3334afa0e13bSRam Amrani /* Update segment variables */
3335afa0e13bSRam Amrani seg_prt += cur;
3336afa0e13bSRam Amrani seg_siz -= cur;
3337afa0e13bSRam Amrani
3338afa0e13bSRam Amrani /* Update sge variables */
3339afa0e13bSRam Amrani src += cur;
3340afa0e13bSRam Amrani len -= cur;
3341afa0e13bSRam Amrani
3342afa0e13bSRam Amrani /* Swap fully-completed segments */
3343afa0e13bSRam Amrani if (!seg_siz)
3344afa0e13bSRam Amrani swap_wqe_data64((u64 *)wqe);
3345afa0e13bSRam Amrani }
3346afa0e13bSRam Amrani }
3347afa0e13bSRam Amrani
3348afa0e13bSRam Amrani /* swap last not completed segment */
3349afa0e13bSRam Amrani if (seg_siz)
3350afa0e13bSRam Amrani swap_wqe_data64((u64 *)wqe);
3351afa0e13bSRam Amrani
3352afa0e13bSRam Amrani return data_size;
3353afa0e13bSRam Amrani }
3354afa0e13bSRam Amrani
3355afa0e13bSRam Amrani #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
3356afa0e13bSRam Amrani do { \
3357afa0e13bSRam Amrani DMA_REGPAIR_LE(sge->addr, vaddr); \
3358afa0e13bSRam Amrani (sge)->length = cpu_to_le32(vlength); \
3359afa0e13bSRam Amrani (sge)->flags = cpu_to_le32(vflags); \
3360afa0e13bSRam Amrani } while (0)
3361afa0e13bSRam Amrani
3362afa0e13bSRam Amrani #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
3363afa0e13bSRam Amrani do { \
3364afa0e13bSRam Amrani DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
3365afa0e13bSRam Amrani (hdr)->num_sges = num_sge; \
3366afa0e13bSRam Amrani } while (0)
3367afa0e13bSRam Amrani
3368afa0e13bSRam Amrani #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
3369afa0e13bSRam Amrani do { \
3370afa0e13bSRam Amrani DMA_REGPAIR_LE(sge->addr, vaddr); \
3371afa0e13bSRam Amrani (sge)->length = cpu_to_le32(vlength); \
3372afa0e13bSRam Amrani (sge)->l_key = cpu_to_le32(vlkey); \
3373afa0e13bSRam Amrani } while (0)
3374afa0e13bSRam Amrani
qedr_prepare_sq_sges(struct qedr_qp * qp,u8 * wqe_size,const struct ib_send_wr * wr)3375afa0e13bSRam Amrani static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
3376f696bf6dSBart Van Assche const struct ib_send_wr *wr)
3377afa0e13bSRam Amrani {
3378afa0e13bSRam Amrani u32 data_size = 0;
3379afa0e13bSRam Amrani int i;
3380afa0e13bSRam Amrani
3381afa0e13bSRam Amrani for (i = 0; i < wr->num_sge; i++) {
3382afa0e13bSRam Amrani struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3383afa0e13bSRam Amrani
3384afa0e13bSRam Amrani DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3385afa0e13bSRam Amrani sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3386afa0e13bSRam Amrani sge->length = cpu_to_le32(wr->sg_list[i].length);
3387afa0e13bSRam Amrani data_size += wr->sg_list[i].length;
3388afa0e13bSRam Amrani }
3389afa0e13bSRam Amrani
3390afa0e13bSRam Amrani if (wqe_size)
3391afa0e13bSRam Amrani *wqe_size += wr->num_sge;
3392afa0e13bSRam Amrani
3393afa0e13bSRam Amrani return data_size;
3394afa0e13bSRam Amrani }
3395afa0e13bSRam Amrani
qedr_prepare_sq_rdma_data(struct qedr_dev * dev,struct qedr_qp * qp,struct rdma_sq_rdma_wqe_1st * rwqe,struct rdma_sq_rdma_wqe_2nd * rwqe2,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3396afa0e13bSRam Amrani static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3397afa0e13bSRam Amrani struct qedr_qp *qp,
3398afa0e13bSRam Amrani struct rdma_sq_rdma_wqe_1st *rwqe,
3399afa0e13bSRam Amrani struct rdma_sq_rdma_wqe_2nd *rwqe2,
3400d34ac5cdSBart Van Assche const struct ib_send_wr *wr,
3401d34ac5cdSBart Van Assche const struct ib_send_wr **bad_wr)
3402afa0e13bSRam Amrani {
3403afa0e13bSRam Amrani rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3404afa0e13bSRam Amrani DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3405afa0e13bSRam Amrani
34068b0cabc6SAmrani, Ram if (wr->send_flags & IB_SEND_INLINE &&
34078b0cabc6SAmrani, Ram (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
34088b0cabc6SAmrani, Ram wr->opcode == IB_WR_RDMA_WRITE)) {
3409afa0e13bSRam Amrani u8 flags = 0;
3410afa0e13bSRam Amrani
3411afa0e13bSRam Amrani SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3412afa0e13bSRam Amrani return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3413afa0e13bSRam Amrani bad_wr, &rwqe->flags, flags);
3414afa0e13bSRam Amrani }
3415afa0e13bSRam Amrani
3416afa0e13bSRam Amrani return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3417afa0e13bSRam Amrani }
3418afa0e13bSRam Amrani
qedr_prepare_sq_send_data(struct qedr_dev * dev,struct qedr_qp * qp,struct rdma_sq_send_wqe_1st * swqe,struct rdma_sq_send_wqe_2st * swqe2,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3419afa0e13bSRam Amrani static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3420afa0e13bSRam Amrani struct qedr_qp *qp,
3421afa0e13bSRam Amrani struct rdma_sq_send_wqe_1st *swqe,
3422afa0e13bSRam Amrani struct rdma_sq_send_wqe_2st *swqe2,
3423d34ac5cdSBart Van Assche const struct ib_send_wr *wr,
3424d34ac5cdSBart Van Assche const struct ib_send_wr **bad_wr)
3425afa0e13bSRam Amrani {
3426afa0e13bSRam Amrani memset(swqe2, 0, sizeof(*swqe2));
3427afa0e13bSRam Amrani if (wr->send_flags & IB_SEND_INLINE) {
3428afa0e13bSRam Amrani u8 flags = 0;
3429afa0e13bSRam Amrani
3430afa0e13bSRam Amrani SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3431afa0e13bSRam Amrani return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3432afa0e13bSRam Amrani bad_wr, &swqe->flags, flags);
3433afa0e13bSRam Amrani }
3434afa0e13bSRam Amrani
3435afa0e13bSRam Amrani return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3436afa0e13bSRam Amrani }
3437afa0e13bSRam Amrani
qedr_prepare_reg(struct qedr_qp * qp,struct rdma_sq_fmr_wqe_1st * fwqe1,const struct ib_reg_wr * wr)3438afa0e13bSRam Amrani static int qedr_prepare_reg(struct qedr_qp *qp,
3439afa0e13bSRam Amrani struct rdma_sq_fmr_wqe_1st *fwqe1,
3440f696bf6dSBart Van Assche const struct ib_reg_wr *wr)
3441afa0e13bSRam Amrani {
3442afa0e13bSRam Amrani struct qedr_mr *mr = get_qedr_mr(wr->mr);
3443afa0e13bSRam Amrani struct rdma_sq_fmr_wqe_2nd *fwqe2;
3444afa0e13bSRam Amrani
3445afa0e13bSRam Amrani fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3446afa0e13bSRam Amrani fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3447afa0e13bSRam Amrani fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3448afa0e13bSRam Amrani fwqe1->l_key = wr->key;
3449afa0e13bSRam Amrani
345008c4cf51SAmrani, Ram fwqe2->access_ctrl = 0;
345108c4cf51SAmrani, Ram
3452afa0e13bSRam Amrani SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3453afa0e13bSRam Amrani !!(wr->access & IB_ACCESS_REMOTE_READ));
3454afa0e13bSRam Amrani SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3455afa0e13bSRam Amrani !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3456afa0e13bSRam Amrani SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3457afa0e13bSRam Amrani !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3458afa0e13bSRam Amrani SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3459afa0e13bSRam Amrani SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3460afa0e13bSRam Amrani !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3461afa0e13bSRam Amrani fwqe2->fmr_ctrl = 0;
3462afa0e13bSRam Amrani
3463afa0e13bSRam Amrani SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3464afa0e13bSRam Amrani ilog2(mr->ibmr.page_size) - 12);
3465afa0e13bSRam Amrani
3466afa0e13bSRam Amrani fwqe2->length_hi = 0;
3467afa0e13bSRam Amrani fwqe2->length_lo = mr->ibmr.length;
3468afa0e13bSRam Amrani fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3469afa0e13bSRam Amrani fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3470afa0e13bSRam Amrani
3471afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].mr = mr;
3472afa0e13bSRam Amrani
3473afa0e13bSRam Amrani return 0;
3474afa0e13bSRam Amrani }
3475afa0e13bSRam Amrani
qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)347627a4b1a6SRam Amrani static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
3477afa0e13bSRam Amrani {
3478afa0e13bSRam Amrani switch (opcode) {
3479afa0e13bSRam Amrani case IB_WR_RDMA_WRITE:
3480afa0e13bSRam Amrani case IB_WR_RDMA_WRITE_WITH_IMM:
3481afa0e13bSRam Amrani return IB_WC_RDMA_WRITE;
3482afa0e13bSRam Amrani case IB_WR_SEND_WITH_IMM:
3483afa0e13bSRam Amrani case IB_WR_SEND:
3484afa0e13bSRam Amrani case IB_WR_SEND_WITH_INV:
3485afa0e13bSRam Amrani return IB_WC_SEND;
3486afa0e13bSRam Amrani case IB_WR_RDMA_READ:
3487fb1a22beSKalderon, Michal case IB_WR_RDMA_READ_WITH_INV:
3488afa0e13bSRam Amrani return IB_WC_RDMA_READ;
3489afa0e13bSRam Amrani case IB_WR_ATOMIC_CMP_AND_SWP:
3490afa0e13bSRam Amrani return IB_WC_COMP_SWAP;
3491afa0e13bSRam Amrani case IB_WR_ATOMIC_FETCH_AND_ADD:
3492afa0e13bSRam Amrani return IB_WC_FETCH_ADD;
3493afa0e13bSRam Amrani case IB_WR_REG_MR:
3494afa0e13bSRam Amrani return IB_WC_REG_MR;
3495afa0e13bSRam Amrani case IB_WR_LOCAL_INV:
3496afa0e13bSRam Amrani return IB_WC_LOCAL_INV;
3497afa0e13bSRam Amrani default:
3498afa0e13bSRam Amrani return IB_WC_SEND;
3499afa0e13bSRam Amrani }
3500afa0e13bSRam Amrani }
3501afa0e13bSRam Amrani
qedr_can_post_send(struct qedr_qp * qp,const struct ib_send_wr * wr)3502f696bf6dSBart Van Assche static inline bool qedr_can_post_send(struct qedr_qp *qp,
3503f696bf6dSBart Van Assche const struct ib_send_wr *wr)
3504afa0e13bSRam Amrani {
3505afa0e13bSRam Amrani int wq_is_full, err_wr, pbl_is_full;
3506afa0e13bSRam Amrani struct qedr_dev *dev = qp->dev;
3507afa0e13bSRam Amrani
3508afa0e13bSRam Amrani /* prevent SQ overflow and/or processing of a bad WR */
3509afa0e13bSRam Amrani err_wr = wr->num_sge > qp->sq.max_sges;
3510afa0e13bSRam Amrani wq_is_full = qedr_wq_is_full(&qp->sq);
3511afa0e13bSRam Amrani pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3512afa0e13bSRam Amrani QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3513afa0e13bSRam Amrani if (wq_is_full || err_wr || pbl_is_full) {
3514afa0e13bSRam Amrani if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3515afa0e13bSRam Amrani DP_ERR(dev,
3516afa0e13bSRam Amrani "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3517afa0e13bSRam Amrani qp);
3518afa0e13bSRam Amrani qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3519afa0e13bSRam Amrani }
3520afa0e13bSRam Amrani
3521afa0e13bSRam Amrani if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3522afa0e13bSRam Amrani DP_ERR(dev,
3523afa0e13bSRam Amrani "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3524afa0e13bSRam Amrani qp);
3525afa0e13bSRam Amrani qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3526afa0e13bSRam Amrani }
3527afa0e13bSRam Amrani
3528afa0e13bSRam Amrani if (pbl_is_full &&
3529afa0e13bSRam Amrani !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3530afa0e13bSRam Amrani DP_ERR(dev,
3531afa0e13bSRam Amrani "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3532afa0e13bSRam Amrani qp);
3533afa0e13bSRam Amrani qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3534afa0e13bSRam Amrani }
3535afa0e13bSRam Amrani return false;
3536afa0e13bSRam Amrani }
3537afa0e13bSRam Amrani return true;
3538afa0e13bSRam Amrani }
3539afa0e13bSRam Amrani
__qedr_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3540d34ac5cdSBart Van Assche static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3541d34ac5cdSBart Van Assche const struct ib_send_wr **bad_wr)
3542afa0e13bSRam Amrani {
3543afa0e13bSRam Amrani struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3544afa0e13bSRam Amrani struct qedr_qp *qp = get_qedr_qp(ibqp);
3545afa0e13bSRam Amrani struct rdma_sq_atomic_wqe_1st *awqe1;
3546afa0e13bSRam Amrani struct rdma_sq_atomic_wqe_2nd *awqe2;
3547afa0e13bSRam Amrani struct rdma_sq_atomic_wqe_3rd *awqe3;
3548afa0e13bSRam Amrani struct rdma_sq_send_wqe_2st *swqe2;
3549afa0e13bSRam Amrani struct rdma_sq_local_inv_wqe *iwqe;
3550afa0e13bSRam Amrani struct rdma_sq_rdma_wqe_2nd *rwqe2;
3551afa0e13bSRam Amrani struct rdma_sq_send_wqe_1st *swqe;
3552afa0e13bSRam Amrani struct rdma_sq_rdma_wqe_1st *rwqe;
3553afa0e13bSRam Amrani struct rdma_sq_fmr_wqe_1st *fwqe1;
3554afa0e13bSRam Amrani struct rdma_sq_common_wqe *wqe;
3555afa0e13bSRam Amrani u32 length;
3556afa0e13bSRam Amrani int rc = 0;
3557afa0e13bSRam Amrani bool comp;
3558afa0e13bSRam Amrani
3559afa0e13bSRam Amrani if (!qedr_can_post_send(qp, wr)) {
3560afa0e13bSRam Amrani *bad_wr = wr;
3561afa0e13bSRam Amrani return -ENOMEM;
3562afa0e13bSRam Amrani }
3563afa0e13bSRam Amrani
3564afa0e13bSRam Amrani wqe = qed_chain_produce(&qp->sq.pbl);
3565afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].signaled =
3566afa0e13bSRam Amrani !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3567afa0e13bSRam Amrani
3568afa0e13bSRam Amrani wqe->flags = 0;
3569afa0e13bSRam Amrani SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3570afa0e13bSRam Amrani !!(wr->send_flags & IB_SEND_SOLICITED));
3571afa0e13bSRam Amrani comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3572afa0e13bSRam Amrani SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3573afa0e13bSRam Amrani SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3574afa0e13bSRam Amrani !!(wr->send_flags & IB_SEND_FENCE));
3575afa0e13bSRam Amrani wqe->prev_wqe_size = qp->prev_wqe_size;
3576afa0e13bSRam Amrani
3577afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3578afa0e13bSRam Amrani
3579afa0e13bSRam Amrani switch (wr->opcode) {
3580afa0e13bSRam Amrani case IB_WR_SEND_WITH_IMM:
3581551e1c67SKalderon, Michal if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3582551e1c67SKalderon, Michal rc = -EINVAL;
3583551e1c67SKalderon, Michal *bad_wr = wr;
3584551e1c67SKalderon, Michal break;
3585551e1c67SKalderon, Michal }
3586afa0e13bSRam Amrani wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3587afa0e13bSRam Amrani swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3588afa0e13bSRam Amrani swqe->wqe_size = 2;
3589afa0e13bSRam Amrani swqe2 = qed_chain_produce(&qp->sq.pbl);
3590afa0e13bSRam Amrani
35917bed7ebcSJason Gunthorpe swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3592afa0e13bSRam Amrani length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3593afa0e13bSRam Amrani wr, bad_wr);
3594afa0e13bSRam Amrani swqe->length = cpu_to_le32(length);
3595afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3596afa0e13bSRam Amrani qp->prev_wqe_size = swqe->wqe_size;
3597afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3598afa0e13bSRam Amrani break;
3599afa0e13bSRam Amrani case IB_WR_SEND:
3600afa0e13bSRam Amrani wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3601afa0e13bSRam Amrani swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3602afa0e13bSRam Amrani
3603afa0e13bSRam Amrani swqe->wqe_size = 2;
3604afa0e13bSRam Amrani swqe2 = qed_chain_produce(&qp->sq.pbl);
3605afa0e13bSRam Amrani length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3606afa0e13bSRam Amrani wr, bad_wr);
3607afa0e13bSRam Amrani swqe->length = cpu_to_le32(length);
3608afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3609afa0e13bSRam Amrani qp->prev_wqe_size = swqe->wqe_size;
3610afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3611afa0e13bSRam Amrani break;
3612afa0e13bSRam Amrani case IB_WR_SEND_WITH_INV:
3613afa0e13bSRam Amrani wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3614afa0e13bSRam Amrani swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3615afa0e13bSRam Amrani swqe2 = qed_chain_produce(&qp->sq.pbl);
3616afa0e13bSRam Amrani swqe->wqe_size = 2;
3617afa0e13bSRam Amrani swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3618afa0e13bSRam Amrani length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3619afa0e13bSRam Amrani wr, bad_wr);
3620afa0e13bSRam Amrani swqe->length = cpu_to_le32(length);
3621afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3622afa0e13bSRam Amrani qp->prev_wqe_size = swqe->wqe_size;
3623afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3624afa0e13bSRam Amrani break;
3625afa0e13bSRam Amrani
3626afa0e13bSRam Amrani case IB_WR_RDMA_WRITE_WITH_IMM:
3627551e1c67SKalderon, Michal if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3628551e1c67SKalderon, Michal rc = -EINVAL;
3629551e1c67SKalderon, Michal *bad_wr = wr;
3630551e1c67SKalderon, Michal break;
3631551e1c67SKalderon, Michal }
3632afa0e13bSRam Amrani wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3633afa0e13bSRam Amrani rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3634afa0e13bSRam Amrani
3635afa0e13bSRam Amrani rwqe->wqe_size = 2;
3636afa0e13bSRam Amrani rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3637afa0e13bSRam Amrani rwqe2 = qed_chain_produce(&qp->sq.pbl);
3638afa0e13bSRam Amrani length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3639afa0e13bSRam Amrani wr, bad_wr);
3640afa0e13bSRam Amrani rwqe->length = cpu_to_le32(length);
3641afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3642afa0e13bSRam Amrani qp->prev_wqe_size = rwqe->wqe_size;
3643afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3644afa0e13bSRam Amrani break;
3645afa0e13bSRam Amrani case IB_WR_RDMA_WRITE:
3646afa0e13bSRam Amrani wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3647afa0e13bSRam Amrani rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3648afa0e13bSRam Amrani
3649afa0e13bSRam Amrani rwqe->wqe_size = 2;
3650afa0e13bSRam Amrani rwqe2 = qed_chain_produce(&qp->sq.pbl);
3651afa0e13bSRam Amrani length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3652afa0e13bSRam Amrani wr, bad_wr);
3653afa0e13bSRam Amrani rwqe->length = cpu_to_le32(length);
3654afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3655afa0e13bSRam Amrani qp->prev_wqe_size = rwqe->wqe_size;
3656afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3657afa0e13bSRam Amrani break;
3658afa0e13bSRam Amrani case IB_WR_RDMA_READ_WITH_INV:
3659fb1a22beSKalderon, Michal SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3660df561f66SGustavo A. R. Silva fallthrough; /* same is identical to RDMA READ */
3661afa0e13bSRam Amrani
3662afa0e13bSRam Amrani case IB_WR_RDMA_READ:
3663afa0e13bSRam Amrani wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3664afa0e13bSRam Amrani rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3665afa0e13bSRam Amrani
3666afa0e13bSRam Amrani rwqe->wqe_size = 2;
3667afa0e13bSRam Amrani rwqe2 = qed_chain_produce(&qp->sq.pbl);
3668afa0e13bSRam Amrani length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3669afa0e13bSRam Amrani wr, bad_wr);
3670afa0e13bSRam Amrani rwqe->length = cpu_to_le32(length);
3671afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3672afa0e13bSRam Amrani qp->prev_wqe_size = rwqe->wqe_size;
3673afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3674afa0e13bSRam Amrani break;
3675afa0e13bSRam Amrani
3676afa0e13bSRam Amrani case IB_WR_ATOMIC_CMP_AND_SWP:
3677afa0e13bSRam Amrani case IB_WR_ATOMIC_FETCH_AND_ADD:
3678afa0e13bSRam Amrani awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3679afa0e13bSRam Amrani awqe1->wqe_size = 4;
3680afa0e13bSRam Amrani
3681afa0e13bSRam Amrani awqe2 = qed_chain_produce(&qp->sq.pbl);
3682afa0e13bSRam Amrani DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3683afa0e13bSRam Amrani awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3684afa0e13bSRam Amrani
3685afa0e13bSRam Amrani awqe3 = qed_chain_produce(&qp->sq.pbl);
3686afa0e13bSRam Amrani
3687afa0e13bSRam Amrani if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3688afa0e13bSRam Amrani wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3689afa0e13bSRam Amrani DMA_REGPAIR_LE(awqe3->swap_data,
3690afa0e13bSRam Amrani atomic_wr(wr)->compare_add);
3691afa0e13bSRam Amrani } else {
3692afa0e13bSRam Amrani wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3693afa0e13bSRam Amrani DMA_REGPAIR_LE(awqe3->swap_data,
3694afa0e13bSRam Amrani atomic_wr(wr)->swap);
3695afa0e13bSRam Amrani DMA_REGPAIR_LE(awqe3->cmp_data,
3696afa0e13bSRam Amrani atomic_wr(wr)->compare_add);
3697afa0e13bSRam Amrani }
3698afa0e13bSRam Amrani
3699afa0e13bSRam Amrani qedr_prepare_sq_sges(qp, NULL, wr);
3700afa0e13bSRam Amrani
3701afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3702afa0e13bSRam Amrani qp->prev_wqe_size = awqe1->wqe_size;
3703afa0e13bSRam Amrani break;
3704afa0e13bSRam Amrani
3705afa0e13bSRam Amrani case IB_WR_LOCAL_INV:
3706afa0e13bSRam Amrani iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3707afa0e13bSRam Amrani iwqe->wqe_size = 1;
3708afa0e13bSRam Amrani
3709afa0e13bSRam Amrani iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3710afa0e13bSRam Amrani iwqe->inv_l_key = wr->ex.invalidate_rkey;
3711afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3712afa0e13bSRam Amrani qp->prev_wqe_size = iwqe->wqe_size;
3713afa0e13bSRam Amrani break;
3714afa0e13bSRam Amrani case IB_WR_REG_MR:
3715afa0e13bSRam Amrani DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3716afa0e13bSRam Amrani wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3717afa0e13bSRam Amrani fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3718afa0e13bSRam Amrani fwqe1->wqe_size = 2;
3719afa0e13bSRam Amrani
3720afa0e13bSRam Amrani rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3721afa0e13bSRam Amrani if (rc) {
3722afa0e13bSRam Amrani DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3723afa0e13bSRam Amrani *bad_wr = wr;
3724afa0e13bSRam Amrani break;
3725afa0e13bSRam Amrani }
3726afa0e13bSRam Amrani
3727afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3728afa0e13bSRam Amrani qp->prev_wqe_size = fwqe1->wqe_size;
3729afa0e13bSRam Amrani break;
3730afa0e13bSRam Amrani default:
3731afa0e13bSRam Amrani DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3732afa0e13bSRam Amrani rc = -EINVAL;
3733afa0e13bSRam Amrani *bad_wr = wr;
3734afa0e13bSRam Amrani break;
3735afa0e13bSRam Amrani }
3736afa0e13bSRam Amrani
3737afa0e13bSRam Amrani if (*bad_wr) {
3738afa0e13bSRam Amrani u16 value;
3739afa0e13bSRam Amrani
3740afa0e13bSRam Amrani /* Restore prod to its position before
3741afa0e13bSRam Amrani * this WR was processed
3742afa0e13bSRam Amrani */
3743afa0e13bSRam Amrani value = le16_to_cpu(qp->sq.db_data.data.value);
3744afa0e13bSRam Amrani qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3745afa0e13bSRam Amrani
3746afa0e13bSRam Amrani /* Restore prev_wqe_size */
3747afa0e13bSRam Amrani qp->prev_wqe_size = wqe->prev_wqe_size;
3748afa0e13bSRam Amrani rc = -EINVAL;
3749afa0e13bSRam Amrani DP_ERR(dev, "POST SEND FAILED\n");
3750afa0e13bSRam Amrani }
3751afa0e13bSRam Amrani
3752afa0e13bSRam Amrani return rc;
3753afa0e13bSRam Amrani }
3754afa0e13bSRam Amrani
qedr_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3755d34ac5cdSBart Van Assche int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3756d34ac5cdSBart Van Assche const struct ib_send_wr **bad_wr)
3757afa0e13bSRam Amrani {
3758afa0e13bSRam Amrani struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3759afa0e13bSRam Amrani struct qedr_qp *qp = get_qedr_qp(ibqp);
3760afa0e13bSRam Amrani unsigned long flags;
3761afa0e13bSRam Amrani int rc = 0;
3762afa0e13bSRam Amrani
3763afa0e13bSRam Amrani *bad_wr = NULL;
3764afa0e13bSRam Amrani
376504886779SRam Amrani if (qp->qp_type == IB_QPT_GSI)
376604886779SRam Amrani return qedr_gsi_post_send(ibqp, wr, bad_wr);
376704886779SRam Amrani
3768afa0e13bSRam Amrani spin_lock_irqsave(&qp->q_lock, flags);
3769afa0e13bSRam Amrani
3770f5b1b177SKalderon, Michal if (rdma_protocol_roce(&dev->ibdev, 1)) {
3771922d9a40SAmrani, Ram if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3772922d9a40SAmrani, Ram (qp->state != QED_ROCE_QP_STATE_ERR) &&
3773922d9a40SAmrani, Ram (qp->state != QED_ROCE_QP_STATE_SQD)) {
3774afa0e13bSRam Amrani spin_unlock_irqrestore(&qp->q_lock, flags);
3775afa0e13bSRam Amrani *bad_wr = wr;
3776afa0e13bSRam Amrani DP_DEBUG(dev, QEDR_MSG_CQ,
3777afa0e13bSRam Amrani "QP in wrong state! QP icid=0x%x state %d\n",
3778afa0e13bSRam Amrani qp->icid, qp->state);
3779afa0e13bSRam Amrani return -EINVAL;
3780afa0e13bSRam Amrani }
3781f5b1b177SKalderon, Michal }
3782afa0e13bSRam Amrani
3783afa0e13bSRam Amrani while (wr) {
3784afa0e13bSRam Amrani rc = __qedr_post_send(ibqp, wr, bad_wr);
3785afa0e13bSRam Amrani if (rc)
3786afa0e13bSRam Amrani break;
3787afa0e13bSRam Amrani
3788afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3789afa0e13bSRam Amrani
3790afa0e13bSRam Amrani qedr_inc_sw_prod(&qp->sq);
3791afa0e13bSRam Amrani
3792afa0e13bSRam Amrani qp->sq.db_data.data.value++;
3793afa0e13bSRam Amrani
3794afa0e13bSRam Amrani wr = wr->next;
3795afa0e13bSRam Amrani }
3796afa0e13bSRam Amrani
3797afa0e13bSRam Amrani /* Trigger doorbell
3798afa0e13bSRam Amrani * If there was a failure in the first WR then it will be triggered in
3799afa0e13bSRam Amrani * vane. However this is not harmful (as long as the producer value is
3800afa0e13bSRam Amrani * unchanged). For performance reasons we avoid checking for this
3801afa0e13bSRam Amrani * redundant doorbell.
380209c4854fSKalderon, Michal *
380309c4854fSKalderon, Michal * qp->wqe_wr_id is accessed during qedr_poll_cq, as
380409c4854fSKalderon, Michal * soon as we give the doorbell, we could get a completion
380509c4854fSKalderon, Michal * for this wr, therefore we need to make sure that the
380609c4854fSKalderon, Michal * memory is updated before giving the doorbell.
380709c4854fSKalderon, Michal * During qedr_poll_cq, rmb is called before accessing the
380809c4854fSKalderon, Michal * cqe. This covers for the smp_rmb as well.
3809afa0e13bSRam Amrani */
381009c4854fSKalderon, Michal smp_wmb();
3811afa0e13bSRam Amrani writel(qp->sq.db_data.raw, qp->sq.db);
3812afa0e13bSRam Amrani
3813afa0e13bSRam Amrani spin_unlock_irqrestore(&qp->q_lock, flags);
3814afa0e13bSRam Amrani
3815afa0e13bSRam Amrani return rc;
3816afa0e13bSRam Amrani }
3817afa0e13bSRam Amrani
qedr_srq_elem_left(struct qedr_srq_hwq_info * hw_srq)38183491c9e7SYuval Bason static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
38193491c9e7SYuval Bason {
38203491c9e7SYuval Bason u32 used;
38213491c9e7SYuval Bason
38223491c9e7SYuval Bason /* Calculate number of elements used based on producer
38233491c9e7SYuval Bason * count and consumer count and subtract it from max
38243491c9e7SYuval Bason * work request supported so that we get elements left.
38253491c9e7SYuval Bason */
3826acca72e2SYuval Basson used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
38273491c9e7SYuval Bason
38283491c9e7SYuval Bason return hw_srq->max_wr - used;
38293491c9e7SYuval Bason }
38303491c9e7SYuval Bason
qedr_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)38313491c9e7SYuval Bason int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
38323491c9e7SYuval Bason const struct ib_recv_wr **bad_wr)
38333491c9e7SYuval Bason {
38343491c9e7SYuval Bason struct qedr_srq *srq = get_qedr_srq(ibsrq);
38353491c9e7SYuval Bason struct qedr_srq_hwq_info *hw_srq;
38363491c9e7SYuval Bason struct qedr_dev *dev = srq->dev;
38373491c9e7SYuval Bason struct qed_chain *pbl;
38383491c9e7SYuval Bason unsigned long flags;
38393491c9e7SYuval Bason int status = 0;
38403491c9e7SYuval Bason u32 num_sge;
38413491c9e7SYuval Bason
38423491c9e7SYuval Bason spin_lock_irqsave(&srq->lock, flags);
38433491c9e7SYuval Bason
38443491c9e7SYuval Bason hw_srq = &srq->hw_srq;
38453491c9e7SYuval Bason pbl = &srq->hw_srq.pbl;
38463491c9e7SYuval Bason while (wr) {
38473491c9e7SYuval Bason struct rdma_srq_wqe_header *hdr;
38483491c9e7SYuval Bason int i;
38493491c9e7SYuval Bason
38503491c9e7SYuval Bason if (!qedr_srq_elem_left(hw_srq) ||
38513491c9e7SYuval Bason wr->num_sge > srq->hw_srq.max_sges) {
38523491c9e7SYuval Bason DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
3853acca72e2SYuval Basson hw_srq->wr_prod_cnt,
3854acca72e2SYuval Basson atomic_read(&hw_srq->wr_cons_cnt),
38553491c9e7SYuval Bason wr->num_sge, srq->hw_srq.max_sges);
38563491c9e7SYuval Bason status = -ENOMEM;
38573491c9e7SYuval Bason *bad_wr = wr;
38583491c9e7SYuval Bason break;
38593491c9e7SYuval Bason }
38603491c9e7SYuval Bason
38613491c9e7SYuval Bason hdr = qed_chain_produce(pbl);
38623491c9e7SYuval Bason num_sge = wr->num_sge;
38633491c9e7SYuval Bason /* Set number of sge and work request id in header */
38643491c9e7SYuval Bason SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
38653491c9e7SYuval Bason
38663491c9e7SYuval Bason srq->hw_srq.wr_prod_cnt++;
38673491c9e7SYuval Bason hw_srq->wqe_prod++;
38683491c9e7SYuval Bason hw_srq->sge_prod++;
38693491c9e7SYuval Bason
38703491c9e7SYuval Bason DP_DEBUG(dev, QEDR_MSG_SRQ,
38713491c9e7SYuval Bason "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
38723491c9e7SYuval Bason wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
38733491c9e7SYuval Bason
38743491c9e7SYuval Bason for (i = 0; i < wr->num_sge; i++) {
38753491c9e7SYuval Bason struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
38763491c9e7SYuval Bason
38773491c9e7SYuval Bason /* Set SGE length, lkey and address */
38783491c9e7SYuval Bason SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
38793491c9e7SYuval Bason wr->sg_list[i].length, wr->sg_list[i].lkey);
38803491c9e7SYuval Bason
38813491c9e7SYuval Bason DP_DEBUG(dev, QEDR_MSG_SRQ,
38823491c9e7SYuval Bason "[%d]: len %d key %x addr %x:%x\n",
38833491c9e7SYuval Bason i, srq_sge->length, srq_sge->l_key,
38843491c9e7SYuval Bason srq_sge->addr.hi, srq_sge->addr.lo);
38853491c9e7SYuval Bason hw_srq->sge_prod++;
38863491c9e7SYuval Bason }
38873491c9e7SYuval Bason
3888acca72e2SYuval Basson /* Update WQE and SGE information before
38893491c9e7SYuval Bason * updating producer.
38903491c9e7SYuval Bason */
3891acca72e2SYuval Basson dma_wmb();
38923491c9e7SYuval Bason
38933491c9e7SYuval Bason /* SRQ producer is 8 bytes. Need to update SGE producer index
38943491c9e7SYuval Bason * in first 4 bytes and need to update WQE producer in
38953491c9e7SYuval Bason * next 4 bytes.
38963491c9e7SYuval Bason */
3897f45271acSAlok Prasad srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
3898acca72e2SYuval Basson /* Make sure sge producer is updated first */
3899acca72e2SYuval Basson dma_wmb();
3900f45271acSAlok Prasad srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
39013491c9e7SYuval Bason
39023491c9e7SYuval Bason wr = wr->next;
39033491c9e7SYuval Bason }
39043491c9e7SYuval Bason
39053491c9e7SYuval Bason DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
39063491c9e7SYuval Bason qed_chain_get_elem_left(pbl));
39073491c9e7SYuval Bason spin_unlock_irqrestore(&srq->lock, flags);
39083491c9e7SYuval Bason
39093491c9e7SYuval Bason return status;
39103491c9e7SYuval Bason }
39113491c9e7SYuval Bason
qedr_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)3912d34ac5cdSBart Van Assche int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3913d34ac5cdSBart Van Assche const struct ib_recv_wr **bad_wr)
3914afa0e13bSRam Amrani {
3915afa0e13bSRam Amrani struct qedr_qp *qp = get_qedr_qp(ibqp);
3916afa0e13bSRam Amrani struct qedr_dev *dev = qp->dev;
3917afa0e13bSRam Amrani unsigned long flags;
3918afa0e13bSRam Amrani int status = 0;
3919afa0e13bSRam Amrani
392004886779SRam Amrani if (qp->qp_type == IB_QPT_GSI)
392104886779SRam Amrani return qedr_gsi_post_recv(ibqp, wr, bad_wr);
392204886779SRam Amrani
3923afa0e13bSRam Amrani spin_lock_irqsave(&qp->q_lock, flags);
3924afa0e13bSRam Amrani
3925afa0e13bSRam Amrani while (wr) {
3926afa0e13bSRam Amrani int i;
3927afa0e13bSRam Amrani
3928afa0e13bSRam Amrani if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3929afa0e13bSRam Amrani QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3930afa0e13bSRam Amrani wr->num_sge > qp->rq.max_sges) {
3931afa0e13bSRam Amrani DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3932afa0e13bSRam Amrani qed_chain_get_elem_left_u32(&qp->rq.pbl),
3933afa0e13bSRam Amrani QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3934afa0e13bSRam Amrani qp->rq.max_sges);
3935afa0e13bSRam Amrani status = -ENOMEM;
3936afa0e13bSRam Amrani *bad_wr = wr;
3937afa0e13bSRam Amrani break;
3938afa0e13bSRam Amrani }
3939afa0e13bSRam Amrani for (i = 0; i < wr->num_sge; i++) {
3940afa0e13bSRam Amrani u32 flags = 0;
3941afa0e13bSRam Amrani struct rdma_rq_sge *rqe =
3942afa0e13bSRam Amrani qed_chain_produce(&qp->rq.pbl);
3943afa0e13bSRam Amrani
3944afa0e13bSRam Amrani /* First one must include the number
3945afa0e13bSRam Amrani * of SGE in the list
3946afa0e13bSRam Amrani */
3947afa0e13bSRam Amrani if (!i)
3948afa0e13bSRam Amrani SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3949afa0e13bSRam Amrani wr->num_sge);
3950afa0e13bSRam Amrani
3951d52c89f1SMichal Kalderon SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
3952afa0e13bSRam Amrani wr->sg_list[i].lkey);
3953afa0e13bSRam Amrani
3954afa0e13bSRam Amrani RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3955afa0e13bSRam Amrani wr->sg_list[i].length, flags);
3956afa0e13bSRam Amrani }
3957afa0e13bSRam Amrani
3958afa0e13bSRam Amrani /* Special case of no sges. FW requires between 1-4 sges...
3959afa0e13bSRam Amrani * in this case we need to post 1 sge with length zero. this is
3960afa0e13bSRam Amrani * because rdma write with immediate consumes an RQ.
3961afa0e13bSRam Amrani */
3962afa0e13bSRam Amrani if (!wr->num_sge) {
3963afa0e13bSRam Amrani u32 flags = 0;
3964afa0e13bSRam Amrani struct rdma_rq_sge *rqe =
3965afa0e13bSRam Amrani qed_chain_produce(&qp->rq.pbl);
3966afa0e13bSRam Amrani
3967afa0e13bSRam Amrani /* First one must include the number
3968afa0e13bSRam Amrani * of SGE in the list
3969afa0e13bSRam Amrani */
3970d52c89f1SMichal Kalderon SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
3971afa0e13bSRam Amrani SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3972afa0e13bSRam Amrani
3973afa0e13bSRam Amrani RQ_SGE_SET(rqe, 0, 0, flags);
3974afa0e13bSRam Amrani i = 1;
3975afa0e13bSRam Amrani }
3976afa0e13bSRam Amrani
3977afa0e13bSRam Amrani qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3978afa0e13bSRam Amrani qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3979afa0e13bSRam Amrani
3980afa0e13bSRam Amrani qedr_inc_sw_prod(&qp->rq);
3981afa0e13bSRam Amrani
398209c4854fSKalderon, Michal /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
398309c4854fSKalderon, Michal * soon as we give the doorbell, we could get a completion
398409c4854fSKalderon, Michal * for this wr, therefore we need to make sure that the
398509c4854fSKalderon, Michal * memory is update before giving the doorbell.
398609c4854fSKalderon, Michal * During qedr_poll_cq, rmb is called before accessing the
398709c4854fSKalderon, Michal * cqe. This covers for the smp_rmb as well.
398809c4854fSKalderon, Michal */
398909c4854fSKalderon, Michal smp_wmb();
3990afa0e13bSRam Amrani
3991afa0e13bSRam Amrani qp->rq.db_data.data.value++;
3992afa0e13bSRam Amrani
3993afa0e13bSRam Amrani writel(qp->rq.db_data.raw, qp->rq.db);
3994afa0e13bSRam Amrani
3995f5b1b177SKalderon, Michal if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3996f5b1b177SKalderon, Michal writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3997f5b1b177SKalderon, Michal }
3998f5b1b177SKalderon, Michal
3999afa0e13bSRam Amrani wr = wr->next;
4000afa0e13bSRam Amrani }
4001afa0e13bSRam Amrani
4002afa0e13bSRam Amrani spin_unlock_irqrestore(&qp->q_lock, flags);
4003afa0e13bSRam Amrani
4004afa0e13bSRam Amrani return status;
4005afa0e13bSRam Amrani }
4006afa0e13bSRam Amrani
is_valid_cqe(struct qedr_cq * cq,union rdma_cqe * cqe)4007afa0e13bSRam Amrani static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
4008afa0e13bSRam Amrani {
4009afa0e13bSRam Amrani struct rdma_cqe_requester *resp_cqe = &cqe->req;
4010afa0e13bSRam Amrani
4011afa0e13bSRam Amrani return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
4012afa0e13bSRam Amrani cq->pbl_toggle;
4013afa0e13bSRam Amrani }
4014afa0e13bSRam Amrani
cqe_get_qp(union rdma_cqe * cqe)4015afa0e13bSRam Amrani static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
4016afa0e13bSRam Amrani {
4017afa0e13bSRam Amrani struct rdma_cqe_requester *resp_cqe = &cqe->req;
4018afa0e13bSRam Amrani struct qedr_qp *qp;
4019afa0e13bSRam Amrani
4020afa0e13bSRam Amrani qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
4021afa0e13bSRam Amrani resp_cqe->qp_handle.lo,
4022afa0e13bSRam Amrani u64);
4023afa0e13bSRam Amrani return qp;
4024afa0e13bSRam Amrani }
4025afa0e13bSRam Amrani
cqe_get_type(union rdma_cqe * cqe)4026afa0e13bSRam Amrani static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
4027afa0e13bSRam Amrani {
4028afa0e13bSRam Amrani struct rdma_cqe_requester *resp_cqe = &cqe->req;
4029afa0e13bSRam Amrani
4030afa0e13bSRam Amrani return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
4031afa0e13bSRam Amrani }
4032afa0e13bSRam Amrani
4033afa0e13bSRam Amrani /* Return latest CQE (needs processing) */
get_cqe(struct qedr_cq * cq)4034afa0e13bSRam Amrani static union rdma_cqe *get_cqe(struct qedr_cq *cq)
4035afa0e13bSRam Amrani {
4036afa0e13bSRam Amrani return cq->latest_cqe;
4037afa0e13bSRam Amrani }
4038afa0e13bSRam Amrani
4039afa0e13bSRam Amrani /* In fmr we need to increase the number of fmr completed counter for the fmr
4040afa0e13bSRam Amrani * algorithm determining whether we can free a pbl or not.
4041afa0e13bSRam Amrani * we need to perform this whether the work request was signaled or not. for
4042afa0e13bSRam Amrani * this purpose we call this function from the condition that checks if a wr
4043afa0e13bSRam Amrani * should be skipped, to make sure we don't miss it ( possibly this fmr
4044afa0e13bSRam Amrani * operation was not signalted)
4045afa0e13bSRam Amrani */
qedr_chk_if_fmr(struct qedr_qp * qp)4046afa0e13bSRam Amrani static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
4047afa0e13bSRam Amrani {
4048afa0e13bSRam Amrani if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
4049afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4050afa0e13bSRam Amrani }
4051afa0e13bSRam Amrani
process_req(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,u16 hw_cons,enum ib_wc_status status,int force)4052afa0e13bSRam Amrani static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
4053afa0e13bSRam Amrani struct qedr_cq *cq, int num_entries,
4054afa0e13bSRam Amrani struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
4055afa0e13bSRam Amrani int force)
4056afa0e13bSRam Amrani {
4057afa0e13bSRam Amrani u16 cnt = 0;
4058afa0e13bSRam Amrani
4059afa0e13bSRam Amrani while (num_entries && qp->sq.wqe_cons != hw_cons) {
4060afa0e13bSRam Amrani if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
4061afa0e13bSRam Amrani qedr_chk_if_fmr(qp);
4062afa0e13bSRam Amrani /* skip WC */
4063afa0e13bSRam Amrani goto next_cqe;
4064afa0e13bSRam Amrani }
4065afa0e13bSRam Amrani
4066afa0e13bSRam Amrani /* fill WC */
4067afa0e13bSRam Amrani wc->status = status;
406827035a1bSAmrani, Ram wc->vendor_err = 0;
4069afa0e13bSRam Amrani wc->wc_flags = 0;
4070afa0e13bSRam Amrani wc->src_qp = qp->id;
4071afa0e13bSRam Amrani wc->qp = &qp->ibqp;
4072afa0e13bSRam Amrani
4073afa0e13bSRam Amrani wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
4074afa0e13bSRam Amrani wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
4075afa0e13bSRam Amrani
4076afa0e13bSRam Amrani switch (wc->opcode) {
4077afa0e13bSRam Amrani case IB_WC_RDMA_WRITE:
4078afa0e13bSRam Amrani wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4079afa0e13bSRam Amrani break;
4080afa0e13bSRam Amrani case IB_WC_COMP_SWAP:
4081afa0e13bSRam Amrani case IB_WC_FETCH_ADD:
4082afa0e13bSRam Amrani wc->byte_len = 8;
4083afa0e13bSRam Amrani break;
4084afa0e13bSRam Amrani case IB_WC_REG_MR:
4085afa0e13bSRam Amrani qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4086afa0e13bSRam Amrani break;
4087dac27386SMichal Kalderon case IB_WC_RDMA_READ:
4088dac27386SMichal Kalderon case IB_WC_SEND:
4089dac27386SMichal Kalderon wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4090dac27386SMichal Kalderon break;
4091afa0e13bSRam Amrani default:
4092afa0e13bSRam Amrani break;
4093afa0e13bSRam Amrani }
4094afa0e13bSRam Amrani
4095afa0e13bSRam Amrani num_entries--;
4096afa0e13bSRam Amrani wc++;
4097afa0e13bSRam Amrani cnt++;
4098afa0e13bSRam Amrani next_cqe:
4099afa0e13bSRam Amrani while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
4100afa0e13bSRam Amrani qed_chain_consume(&qp->sq.pbl);
4101afa0e13bSRam Amrani qedr_inc_sw_cons(&qp->sq);
4102afa0e13bSRam Amrani }
4103afa0e13bSRam Amrani
4104afa0e13bSRam Amrani return cnt;
4105afa0e13bSRam Amrani }
4106afa0e13bSRam Amrani
qedr_poll_cq_req(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,struct rdma_cqe_requester * req)4107afa0e13bSRam Amrani static int qedr_poll_cq_req(struct qedr_dev *dev,
4108afa0e13bSRam Amrani struct qedr_qp *qp, struct qedr_cq *cq,
4109afa0e13bSRam Amrani int num_entries, struct ib_wc *wc,
4110afa0e13bSRam Amrani struct rdma_cqe_requester *req)
4111afa0e13bSRam Amrani {
4112afa0e13bSRam Amrani int cnt = 0;
4113afa0e13bSRam Amrani
4114afa0e13bSRam Amrani switch (req->status) {
4115afa0e13bSRam Amrani case RDMA_CQE_REQ_STS_OK:
4116afa0e13bSRam Amrani cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4117afa0e13bSRam Amrani IB_WC_SUCCESS, 0);
4118afa0e13bSRam Amrani break;
4119afa0e13bSRam Amrani case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
4120c78c3149SRam Amrani if (qp->state != QED_ROCE_QP_STATE_ERR)
4121dc728f77SKalderon, Michal DP_DEBUG(dev, QEDR_MSG_CQ,
4122afa0e13bSRam Amrani "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4123afa0e13bSRam Amrani cq->icid, qp->icid);
4124afa0e13bSRam Amrani cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
412574c3875cSAmrani, Ram IB_WC_WR_FLUSH_ERR, 1);
4126afa0e13bSRam Amrani break;
4127afa0e13bSRam Amrani default:
4128afa0e13bSRam Amrani /* process all WQE before the cosumer */
4129afa0e13bSRam Amrani qp->state = QED_ROCE_QP_STATE_ERR;
4130afa0e13bSRam Amrani cnt = process_req(dev, qp, cq, num_entries, wc,
4131afa0e13bSRam Amrani req->sq_cons - 1, IB_WC_SUCCESS, 0);
4132afa0e13bSRam Amrani wc += cnt;
4133afa0e13bSRam Amrani /* if we have extra WC fill it with actual error info */
4134afa0e13bSRam Amrani if (cnt < num_entries) {
4135afa0e13bSRam Amrani enum ib_wc_status wc_status;
4136afa0e13bSRam Amrani
4137afa0e13bSRam Amrani switch (req->status) {
4138afa0e13bSRam Amrani case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
4139afa0e13bSRam Amrani DP_ERR(dev,
4140afa0e13bSRam Amrani "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4141afa0e13bSRam Amrani cq->icid, qp->icid);
4142afa0e13bSRam Amrani wc_status = IB_WC_BAD_RESP_ERR;
4143afa0e13bSRam Amrani break;
4144afa0e13bSRam Amrani case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
4145afa0e13bSRam Amrani DP_ERR(dev,
4146afa0e13bSRam Amrani "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4147afa0e13bSRam Amrani cq->icid, qp->icid);
4148afa0e13bSRam Amrani wc_status = IB_WC_LOC_LEN_ERR;
4149afa0e13bSRam Amrani break;
4150afa0e13bSRam Amrani case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
4151afa0e13bSRam Amrani DP_ERR(dev,
4152afa0e13bSRam Amrani "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4153afa0e13bSRam Amrani cq->icid, qp->icid);
4154afa0e13bSRam Amrani wc_status = IB_WC_LOC_QP_OP_ERR;
4155afa0e13bSRam Amrani break;
4156afa0e13bSRam Amrani case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
4157afa0e13bSRam Amrani DP_ERR(dev,
4158afa0e13bSRam Amrani "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4159afa0e13bSRam Amrani cq->icid, qp->icid);
4160afa0e13bSRam Amrani wc_status = IB_WC_LOC_PROT_ERR;
4161afa0e13bSRam Amrani break;
4162afa0e13bSRam Amrani case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
4163afa0e13bSRam Amrani DP_ERR(dev,
4164afa0e13bSRam Amrani "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4165afa0e13bSRam Amrani cq->icid, qp->icid);
4166afa0e13bSRam Amrani wc_status = IB_WC_MW_BIND_ERR;
4167afa0e13bSRam Amrani break;
4168afa0e13bSRam Amrani case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
4169afa0e13bSRam Amrani DP_ERR(dev,
4170afa0e13bSRam Amrani "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4171afa0e13bSRam Amrani cq->icid, qp->icid);
4172afa0e13bSRam Amrani wc_status = IB_WC_REM_INV_REQ_ERR;
4173afa0e13bSRam Amrani break;
4174afa0e13bSRam Amrani case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
4175afa0e13bSRam Amrani DP_ERR(dev,
4176afa0e13bSRam Amrani "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4177afa0e13bSRam Amrani cq->icid, qp->icid);
4178afa0e13bSRam Amrani wc_status = IB_WC_REM_ACCESS_ERR;
4179afa0e13bSRam Amrani break;
4180afa0e13bSRam Amrani case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
4181afa0e13bSRam Amrani DP_ERR(dev,
4182afa0e13bSRam Amrani "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4183afa0e13bSRam Amrani cq->icid, qp->icid);
4184afa0e13bSRam Amrani wc_status = IB_WC_REM_OP_ERR;
4185afa0e13bSRam Amrani break;
4186afa0e13bSRam Amrani case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
4187afa0e13bSRam Amrani DP_ERR(dev,
4188afa0e13bSRam Amrani "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4189afa0e13bSRam Amrani cq->icid, qp->icid);
4190afa0e13bSRam Amrani wc_status = IB_WC_RNR_RETRY_EXC_ERR;
4191afa0e13bSRam Amrani break;
4192afa0e13bSRam Amrani case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
4193afa0e13bSRam Amrani DP_ERR(dev,
4194afa0e13bSRam Amrani "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4195afa0e13bSRam Amrani cq->icid, qp->icid);
4196afa0e13bSRam Amrani wc_status = IB_WC_RETRY_EXC_ERR;
4197afa0e13bSRam Amrani break;
4198afa0e13bSRam Amrani default:
4199afa0e13bSRam Amrani DP_ERR(dev,
4200afa0e13bSRam Amrani "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4201afa0e13bSRam Amrani cq->icid, qp->icid);
4202afa0e13bSRam Amrani wc_status = IB_WC_GENERAL_ERR;
4203afa0e13bSRam Amrani }
4204afa0e13bSRam Amrani cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
4205afa0e13bSRam Amrani wc_status, 1);
4206afa0e13bSRam Amrani }
4207afa0e13bSRam Amrani }
4208afa0e13bSRam Amrani
4209afa0e13bSRam Amrani return cnt;
4210afa0e13bSRam Amrani }
4211afa0e13bSRam Amrani
qedr_cqe_resp_status_to_ib(u8 status)4212b6acd71fSAmrani, Ram static inline int qedr_cqe_resp_status_to_ib(u8 status)
4213b6acd71fSAmrani, Ram {
4214b6acd71fSAmrani, Ram switch (status) {
4215b6acd71fSAmrani, Ram case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
4216b6acd71fSAmrani, Ram return IB_WC_LOC_ACCESS_ERR;
4217b6acd71fSAmrani, Ram case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
4218b6acd71fSAmrani, Ram return IB_WC_LOC_LEN_ERR;
4219b6acd71fSAmrani, Ram case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
4220b6acd71fSAmrani, Ram return IB_WC_LOC_QP_OP_ERR;
4221b6acd71fSAmrani, Ram case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
4222b6acd71fSAmrani, Ram return IB_WC_LOC_PROT_ERR;
4223b6acd71fSAmrani, Ram case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
4224b6acd71fSAmrani, Ram return IB_WC_MW_BIND_ERR;
4225b6acd71fSAmrani, Ram case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
4226b6acd71fSAmrani, Ram return IB_WC_REM_INV_RD_REQ_ERR;
4227b6acd71fSAmrani, Ram case RDMA_CQE_RESP_STS_OK:
4228b6acd71fSAmrani, Ram return IB_WC_SUCCESS;
4229b6acd71fSAmrani, Ram default:
4230b6acd71fSAmrani, Ram return IB_WC_GENERAL_ERR;
4231b6acd71fSAmrani, Ram }
4232b6acd71fSAmrani, Ram }
4233b6acd71fSAmrani, Ram
qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder * resp,struct ib_wc * wc)4234b6acd71fSAmrani, Ram static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
4235b6acd71fSAmrani, Ram struct ib_wc *wc)
4236b6acd71fSAmrani, Ram {
4237b6acd71fSAmrani, Ram wc->status = IB_WC_SUCCESS;
4238b6acd71fSAmrani, Ram wc->byte_len = le32_to_cpu(resp->length);
4239b6acd71fSAmrani, Ram
4240b6acd71fSAmrani, Ram if (resp->flags & QEDR_RESP_IMM) {
42417bed7ebcSJason Gunthorpe wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
4242b6acd71fSAmrani, Ram wc->wc_flags |= IB_WC_WITH_IMM;
4243b6acd71fSAmrani, Ram
4244b6acd71fSAmrani, Ram if (resp->flags & QEDR_RESP_RDMA)
4245b6acd71fSAmrani, Ram wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4246b6acd71fSAmrani, Ram
4247b6acd71fSAmrani, Ram if (resp->flags & QEDR_RESP_INV)
4248b6acd71fSAmrani, Ram return -EINVAL;
4249b6acd71fSAmrani, Ram
4250b6acd71fSAmrani, Ram } else if (resp->flags & QEDR_RESP_INV) {
4251b6acd71fSAmrani, Ram wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4252b6acd71fSAmrani, Ram wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4253b6acd71fSAmrani, Ram
4254b6acd71fSAmrani, Ram if (resp->flags & QEDR_RESP_RDMA)
4255b6acd71fSAmrani, Ram return -EINVAL;
4256b6acd71fSAmrani, Ram
4257b6acd71fSAmrani, Ram } else if (resp->flags & QEDR_RESP_RDMA) {
4258b6acd71fSAmrani, Ram return -EINVAL;
4259b6acd71fSAmrani, Ram }
4260b6acd71fSAmrani, Ram
4261b6acd71fSAmrani, Ram return 0;
4262b6acd71fSAmrani, Ram }
4263b6acd71fSAmrani, Ram
__process_resp_one(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,struct ib_wc * wc,struct rdma_cqe_responder * resp,u64 wr_id)4264afa0e13bSRam Amrani static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4265afa0e13bSRam Amrani struct qedr_cq *cq, struct ib_wc *wc,
4266afa0e13bSRam Amrani struct rdma_cqe_responder *resp, u64 wr_id)
4267afa0e13bSRam Amrani {
4268b6acd71fSAmrani, Ram /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
4269afa0e13bSRam Amrani wc->opcode = IB_WC_RECV;
4270afa0e13bSRam Amrani wc->wc_flags = 0;
4271afa0e13bSRam Amrani
4272b6acd71fSAmrani, Ram if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4273b6acd71fSAmrani, Ram if (qedr_set_ok_cqe_resp_wc(resp, wc))
4274b6acd71fSAmrani, Ram DP_ERR(dev,
4275b6acd71fSAmrani, Ram "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4276b6acd71fSAmrani, Ram cq, cq->icid, resp->flags);
4277afa0e13bSRam Amrani
4278b6acd71fSAmrani, Ram } else {
4279b6acd71fSAmrani, Ram wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4280b6acd71fSAmrani, Ram if (wc->status == IB_WC_GENERAL_ERR)
4281b6acd71fSAmrani, Ram DP_ERR(dev,
4282b6acd71fSAmrani, Ram "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4283b6acd71fSAmrani, Ram cq, cq->icid, resp->status);
4284afa0e13bSRam Amrani }
4285afa0e13bSRam Amrani
4286b6acd71fSAmrani, Ram /* Fill the rest of the WC */
428727035a1bSAmrani, Ram wc->vendor_err = 0;
4288afa0e13bSRam Amrani wc->src_qp = qp->id;
4289afa0e13bSRam Amrani wc->qp = &qp->ibqp;
4290afa0e13bSRam Amrani wc->wr_id = wr_id;
4291afa0e13bSRam Amrani }
4292afa0e13bSRam Amrani
process_resp_one_srq(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,struct ib_wc * wc,struct rdma_cqe_responder * resp)42933491c9e7SYuval Bason static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
42943491c9e7SYuval Bason struct qedr_cq *cq, struct ib_wc *wc,
42953491c9e7SYuval Bason struct rdma_cqe_responder *resp)
42963491c9e7SYuval Bason {
42973491c9e7SYuval Bason struct qedr_srq *srq = qp->srq;
42983491c9e7SYuval Bason u64 wr_id;
42993491c9e7SYuval Bason
43003491c9e7SYuval Bason wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
43013491c9e7SYuval Bason le32_to_cpu(resp->srq_wr_id.lo), u64);
43023491c9e7SYuval Bason
43033491c9e7SYuval Bason if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
43043491c9e7SYuval Bason wc->status = IB_WC_WR_FLUSH_ERR;
43053491c9e7SYuval Bason wc->vendor_err = 0;
43063491c9e7SYuval Bason wc->wr_id = wr_id;
43073491c9e7SYuval Bason wc->byte_len = 0;
43083491c9e7SYuval Bason wc->src_qp = qp->id;
43093491c9e7SYuval Bason wc->qp = &qp->ibqp;
43103491c9e7SYuval Bason wc->wr_id = wr_id;
43113491c9e7SYuval Bason } else {
43123491c9e7SYuval Bason __process_resp_one(dev, qp, cq, wc, resp, wr_id);
43133491c9e7SYuval Bason }
4314acca72e2SYuval Basson atomic_inc(&srq->hw_srq.wr_cons_cnt);
43153491c9e7SYuval Bason
43163491c9e7SYuval Bason return 1;
43173491c9e7SYuval Bason }
process_resp_one(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,struct ib_wc * wc,struct rdma_cqe_responder * resp)4318afa0e13bSRam Amrani static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4319afa0e13bSRam Amrani struct qedr_cq *cq, struct ib_wc *wc,
4320afa0e13bSRam Amrani struct rdma_cqe_responder *resp)
4321afa0e13bSRam Amrani {
4322afa0e13bSRam Amrani u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4323afa0e13bSRam Amrani
4324afa0e13bSRam Amrani __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4325afa0e13bSRam Amrani
4326afa0e13bSRam Amrani while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4327afa0e13bSRam Amrani qed_chain_consume(&qp->rq.pbl);
4328afa0e13bSRam Amrani qedr_inc_sw_cons(&qp->rq);
4329afa0e13bSRam Amrani
4330afa0e13bSRam Amrani return 1;
4331afa0e13bSRam Amrani }
4332afa0e13bSRam Amrani
process_resp_flush(struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,u16 hw_cons)4333afa0e13bSRam Amrani static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4334afa0e13bSRam Amrani int num_entries, struct ib_wc *wc, u16 hw_cons)
4335afa0e13bSRam Amrani {
4336afa0e13bSRam Amrani u16 cnt = 0;
4337afa0e13bSRam Amrani
4338afa0e13bSRam Amrani while (num_entries && qp->rq.wqe_cons != hw_cons) {
4339afa0e13bSRam Amrani /* fill WC */
4340afa0e13bSRam Amrani wc->status = IB_WC_WR_FLUSH_ERR;
434127035a1bSAmrani, Ram wc->vendor_err = 0;
4342afa0e13bSRam Amrani wc->wc_flags = 0;
4343afa0e13bSRam Amrani wc->src_qp = qp->id;
4344afa0e13bSRam Amrani wc->byte_len = 0;
4345afa0e13bSRam Amrani wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4346afa0e13bSRam Amrani wc->qp = &qp->ibqp;
4347afa0e13bSRam Amrani num_entries--;
4348afa0e13bSRam Amrani wc++;
4349afa0e13bSRam Amrani cnt++;
4350afa0e13bSRam Amrani while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4351afa0e13bSRam Amrani qed_chain_consume(&qp->rq.pbl);
4352afa0e13bSRam Amrani qedr_inc_sw_cons(&qp->rq);
4353afa0e13bSRam Amrani }
4354afa0e13bSRam Amrani
4355afa0e13bSRam Amrani return cnt;
4356afa0e13bSRam Amrani }
4357afa0e13bSRam Amrani
try_consume_resp_cqe(struct qedr_cq * cq,struct qedr_qp * qp,struct rdma_cqe_responder * resp,int * update)4358afa0e13bSRam Amrani static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4359afa0e13bSRam Amrani struct rdma_cqe_responder *resp, int *update)
4360afa0e13bSRam Amrani {
436150bc60cbSMichal Kalderon if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4362afa0e13bSRam Amrani consume_cqe(cq);
4363afa0e13bSRam Amrani *update |= 1;
4364afa0e13bSRam Amrani }
4365afa0e13bSRam Amrani }
4366afa0e13bSRam Amrani
qedr_poll_cq_resp_srq(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,struct rdma_cqe_responder * resp)43673491c9e7SYuval Bason static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
43683491c9e7SYuval Bason struct qedr_cq *cq, int num_entries,
43693491c9e7SYuval Bason struct ib_wc *wc,
43703491c9e7SYuval Bason struct rdma_cqe_responder *resp)
43713491c9e7SYuval Bason {
43723491c9e7SYuval Bason int cnt;
43733491c9e7SYuval Bason
43743491c9e7SYuval Bason cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
43753491c9e7SYuval Bason consume_cqe(cq);
43763491c9e7SYuval Bason
43773491c9e7SYuval Bason return cnt;
43783491c9e7SYuval Bason }
43793491c9e7SYuval Bason
qedr_poll_cq_resp(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,struct rdma_cqe_responder * resp,int * update)4380afa0e13bSRam Amrani static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4381afa0e13bSRam Amrani struct qedr_cq *cq, int num_entries,
4382afa0e13bSRam Amrani struct ib_wc *wc, struct rdma_cqe_responder *resp,
4383afa0e13bSRam Amrani int *update)
4384afa0e13bSRam Amrani {
4385afa0e13bSRam Amrani int cnt;
4386afa0e13bSRam Amrani
4387afa0e13bSRam Amrani if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4388afa0e13bSRam Amrani cnt = process_resp_flush(qp, cq, num_entries, wc,
438950bc60cbSMichal Kalderon resp->rq_cons_or_srq_id);
4390afa0e13bSRam Amrani try_consume_resp_cqe(cq, qp, resp, update);
4391afa0e13bSRam Amrani } else {
4392afa0e13bSRam Amrani cnt = process_resp_one(dev, qp, cq, wc, resp);
4393afa0e13bSRam Amrani consume_cqe(cq);
4394afa0e13bSRam Amrani *update |= 1;
4395afa0e13bSRam Amrani }
4396afa0e13bSRam Amrani
4397afa0e13bSRam Amrani return cnt;
4398afa0e13bSRam Amrani }
4399afa0e13bSRam Amrani
try_consume_req_cqe(struct qedr_cq * cq,struct qedr_qp * qp,struct rdma_cqe_requester * req,int * update)4400afa0e13bSRam Amrani static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4401afa0e13bSRam Amrani struct rdma_cqe_requester *req, int *update)
4402afa0e13bSRam Amrani {
4403afa0e13bSRam Amrani if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4404afa0e13bSRam Amrani consume_cqe(cq);
4405afa0e13bSRam Amrani *update |= 1;
4406afa0e13bSRam Amrani }
4407afa0e13bSRam Amrani }
4408afa0e13bSRam Amrani
qedr_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)4409afa0e13bSRam Amrani int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4410afa0e13bSRam Amrani {
4411afa0e13bSRam Amrani struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4412afa0e13bSRam Amrani struct qedr_cq *cq = get_qedr_cq(ibcq);
4413e3fd112cSKalderon, Michal union rdma_cqe *cqe;
4414afa0e13bSRam Amrani u32 old_cons, new_cons;
4415afa0e13bSRam Amrani unsigned long flags;
4416afa0e13bSRam Amrani int update = 0;
4417afa0e13bSRam Amrani int done = 0;
4418afa0e13bSRam Amrani
44194dd72636SAmrani, Ram if (cq->destroyed) {
44204dd72636SAmrani, Ram DP_ERR(dev,
44214dd72636SAmrani, Ram "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
44224dd72636SAmrani, Ram cq, cq->icid);
44234dd72636SAmrani, Ram return 0;
44244dd72636SAmrani, Ram }
44254dd72636SAmrani, Ram
442604886779SRam Amrani if (cq->cq_type == QEDR_CQ_TYPE_GSI)
442704886779SRam Amrani return qedr_gsi_poll_cq(ibcq, num_entries, wc);
442804886779SRam Amrani
4429afa0e13bSRam Amrani spin_lock_irqsave(&cq->cq_lock, flags);
4430e3fd112cSKalderon, Michal cqe = cq->latest_cqe;
4431afa0e13bSRam Amrani old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4432afa0e13bSRam Amrani while (num_entries && is_valid_cqe(cq, cqe)) {
4433afa0e13bSRam Amrani struct qedr_qp *qp;
4434afa0e13bSRam Amrani int cnt = 0;
4435afa0e13bSRam Amrani
4436afa0e13bSRam Amrani /* prevent speculative reads of any field of CQE */
4437afa0e13bSRam Amrani rmb();
4438afa0e13bSRam Amrani
4439afa0e13bSRam Amrani qp = cqe_get_qp(cqe);
4440afa0e13bSRam Amrani if (!qp) {
4441afa0e13bSRam Amrani WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4442afa0e13bSRam Amrani break;
4443afa0e13bSRam Amrani }
4444afa0e13bSRam Amrani
4445afa0e13bSRam Amrani wc->qp = &qp->ibqp;
4446afa0e13bSRam Amrani
4447afa0e13bSRam Amrani switch (cqe_get_type(cqe)) {
4448afa0e13bSRam Amrani case RDMA_CQE_TYPE_REQUESTER:
4449afa0e13bSRam Amrani cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4450afa0e13bSRam Amrani &cqe->req);
4451afa0e13bSRam Amrani try_consume_req_cqe(cq, qp, &cqe->req, &update);
4452afa0e13bSRam Amrani break;
4453afa0e13bSRam Amrani case RDMA_CQE_TYPE_RESPONDER_RQ:
4454afa0e13bSRam Amrani cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4455afa0e13bSRam Amrani &cqe->resp, &update);
4456afa0e13bSRam Amrani break;
44573491c9e7SYuval Bason case RDMA_CQE_TYPE_RESPONDER_SRQ:
44583491c9e7SYuval Bason cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
44593491c9e7SYuval Bason wc, &cqe->resp);
44603491c9e7SYuval Bason update = 1;
44613491c9e7SYuval Bason break;
4462afa0e13bSRam Amrani case RDMA_CQE_TYPE_INVALID:
4463afa0e13bSRam Amrani default:
4464afa0e13bSRam Amrani DP_ERR(dev, "Error: invalid CQE type = %d\n",
4465afa0e13bSRam Amrani cqe_get_type(cqe));
4466afa0e13bSRam Amrani }
4467afa0e13bSRam Amrani num_entries -= cnt;
4468afa0e13bSRam Amrani wc += cnt;
4469afa0e13bSRam Amrani done += cnt;
4470afa0e13bSRam Amrani
4471afa0e13bSRam Amrani cqe = get_cqe(cq);
4472afa0e13bSRam Amrani }
4473afa0e13bSRam Amrani new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4474afa0e13bSRam Amrani
4475afa0e13bSRam Amrani cq->cq_cons += new_cons - old_cons;
4476afa0e13bSRam Amrani
4477afa0e13bSRam Amrani if (update)
4478afa0e13bSRam Amrani /* doorbell notifies abount latest VALID entry,
4479afa0e13bSRam Amrani * but chain already point to the next INVALID one
4480afa0e13bSRam Amrani */
4481afa0e13bSRam Amrani doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4482afa0e13bSRam Amrani
4483afa0e13bSRam Amrani spin_unlock_irqrestore(&cq->cq_lock, flags);
4484afa0e13bSRam Amrani return done;
4485afa0e13bSRam Amrani }
4486993d1b52SRam Amrani
qedr_process_mad(struct ib_device * ibdev,int process_mad_flags,u32 port_num,const struct ib_wc * in_wc,const struct ib_grh * in_grh,const struct ib_mad * in,struct ib_mad * out_mad,size_t * out_mad_size,u16 * out_mad_pkey_index)4487993d1b52SRam Amrani int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
44881fb7f897SMark Bloch u32 port_num, const struct ib_wc *in_wc,
4489e26e7b88SLeon Romanovsky const struct ib_grh *in_grh, const struct ib_mad *in,
4490e26e7b88SLeon Romanovsky struct ib_mad *out_mad, size_t *out_mad_size,
4491e26e7b88SLeon Romanovsky u16 *out_mad_pkey_index)
4492993d1b52SRam Amrani {
4493993d1b52SRam Amrani return IB_MAD_RESULT_SUCCESS;
4494993d1b52SRam Amrani }
4495