1225c7b1fSRoland Dreier /*
2225c7b1fSRoland Dreier * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
351a379d0SJack Morgenstein * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4225c7b1fSRoland Dreier *
5225c7b1fSRoland Dreier * This software is available to you under a choice of one of two
6225c7b1fSRoland Dreier * licenses. You may choose to be licensed under the terms of the GNU
7225c7b1fSRoland Dreier * General Public License (GPL) Version 2, available from the file
8225c7b1fSRoland Dreier * COPYING in the main directory of this source tree, or the
9225c7b1fSRoland Dreier * OpenIB.org BSD license below:
10225c7b1fSRoland Dreier *
11225c7b1fSRoland Dreier * Redistribution and use in source and binary forms, with or
12225c7b1fSRoland Dreier * without modification, are permitted provided that the following
13225c7b1fSRoland Dreier * conditions are met:
14225c7b1fSRoland Dreier *
15225c7b1fSRoland Dreier * - Redistributions of source code must retain the above
16225c7b1fSRoland Dreier * copyright notice, this list of conditions and the following
17225c7b1fSRoland Dreier * disclaimer.
18225c7b1fSRoland Dreier *
19225c7b1fSRoland Dreier * - Redistributions in binary form must reproduce the above
20225c7b1fSRoland Dreier * copyright notice, this list of conditions and the following
21225c7b1fSRoland Dreier * disclaimer in the documentation and/or other materials
22225c7b1fSRoland Dreier * provided with the distribution.
23225c7b1fSRoland Dreier *
24225c7b1fSRoland Dreier * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25225c7b1fSRoland Dreier * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26225c7b1fSRoland Dreier * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27225c7b1fSRoland Dreier * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28225c7b1fSRoland Dreier * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29225c7b1fSRoland Dreier * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30225c7b1fSRoland Dreier * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31225c7b1fSRoland Dreier * SOFTWARE.
32225c7b1fSRoland Dreier */
33225c7b1fSRoland Dreier
34225c7b1fSRoland Dreier #include <linux/mlx4/cq.h>
35225c7b1fSRoland Dreier #include <linux/mlx4/qp.h>
36f3cca4b1SShlomo Pongratz #include <linux/mlx4/srq.h>
375a0e3ad6STejun Heo #include <linux/slab.h>
38225c7b1fSRoland Dreier
39225c7b1fSRoland Dreier #include "mlx4_ib.h"
409ce28a20SLeon Romanovsky #include <rdma/mlx4-abi.h>
41bdeacabdSShamir Rabinovitch #include <rdma/uverbs_ioctl.h>
42225c7b1fSRoland Dreier
mlx4_ib_cq_comp(struct mlx4_cq * cq)43225c7b1fSRoland Dreier static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
44225c7b1fSRoland Dreier {
45225c7b1fSRoland Dreier struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
46225c7b1fSRoland Dreier ibcq->comp_handler(ibcq, ibcq->cq_context);
47225c7b1fSRoland Dreier }
48225c7b1fSRoland Dreier
mlx4_ib_cq_event(struct mlx4_cq * cq,enum mlx4_event type)49225c7b1fSRoland Dreier static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
50225c7b1fSRoland Dreier {
51225c7b1fSRoland Dreier struct ib_event event;
52225c7b1fSRoland Dreier struct ib_cq *ibcq;
53225c7b1fSRoland Dreier
54225c7b1fSRoland Dreier if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
55987c8f8fSShlomo Pongratz pr_warn("Unexpected event type %d "
56225c7b1fSRoland Dreier "on CQ %06x\n", type, cq->cqn);
57225c7b1fSRoland Dreier return;
58225c7b1fSRoland Dreier }
59225c7b1fSRoland Dreier
60225c7b1fSRoland Dreier ibcq = &to_mibcq(cq)->ibcq;
61225c7b1fSRoland Dreier if (ibcq->event_handler) {
62225c7b1fSRoland Dreier event.device = ibcq->device;
63225c7b1fSRoland Dreier event.event = IB_EVENT_CQ_ERR;
64225c7b1fSRoland Dreier event.element.cq = ibcq;
65225c7b1fSRoland Dreier ibcq->event_handler(&event, ibcq->cq_context);
66225c7b1fSRoland Dreier }
67225c7b1fSRoland Dreier }
68225c7b1fSRoland Dreier
get_cqe_from_buf(struct mlx4_ib_cq_buf * buf,int n)69225c7b1fSRoland Dreier static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
70225c7b1fSRoland Dreier {
7108ff3235SOr Gerlitz return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
72225c7b1fSRoland Dreier }
73225c7b1fSRoland Dreier
get_cqe(struct mlx4_ib_cq * cq,int n)74225c7b1fSRoland Dreier static void *get_cqe(struct mlx4_ib_cq *cq, int n)
75225c7b1fSRoland Dreier {
76225c7b1fSRoland Dreier return get_cqe_from_buf(&cq->buf, n);
77225c7b1fSRoland Dreier }
78225c7b1fSRoland Dreier
get_sw_cqe(struct mlx4_ib_cq * cq,int n)79225c7b1fSRoland Dreier static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
80225c7b1fSRoland Dreier {
81225c7b1fSRoland Dreier struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
8208ff3235SOr Gerlitz struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
83225c7b1fSRoland Dreier
8408ff3235SOr Gerlitz return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
85225c7b1fSRoland Dreier !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
86225c7b1fSRoland Dreier }
87225c7b1fSRoland Dreier
next_cqe_sw(struct mlx4_ib_cq * cq)88225c7b1fSRoland Dreier static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
89225c7b1fSRoland Dreier {
90225c7b1fSRoland Dreier return get_sw_cqe(cq, cq->mcq.cons_index);
91225c7b1fSRoland Dreier }
92225c7b1fSRoland Dreier
mlx4_ib_modify_cq(struct ib_cq * cq,u16 cq_count,u16 cq_period)933fdcb97fSEli Cohen int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
943fdcb97fSEli Cohen {
953fdcb97fSEli Cohen struct mlx4_ib_cq *mcq = to_mcq(cq);
963fdcb97fSEli Cohen struct mlx4_ib_dev *dev = to_mdev(cq->device);
973fdcb97fSEli Cohen
983fdcb97fSEli Cohen return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
993fdcb97fSEli Cohen }
1003fdcb97fSEli Cohen
mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev * dev,struct mlx4_ib_cq_buf * buf,int nent)101bbf8eed1SVladimir Sokolovsky static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
102bbf8eed1SVladimir Sokolovsky {
103bbf8eed1SVladimir Sokolovsky int err;
104bbf8eed1SVladimir Sokolovsky
10508ff3235SOr Gerlitz err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
1068900b894SLeon Romanovsky PAGE_SIZE * 2, &buf->buf);
107bbf8eed1SVladimir Sokolovsky
108bbf8eed1SVladimir Sokolovsky if (err)
109bbf8eed1SVladimir Sokolovsky goto out;
110bbf8eed1SVladimir Sokolovsky
11108ff3235SOr Gerlitz buf->entry_size = dev->dev->caps.cqe_size;
112bbf8eed1SVladimir Sokolovsky err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
113bbf8eed1SVladimir Sokolovsky &buf->mtt);
114bbf8eed1SVladimir Sokolovsky if (err)
115bbf8eed1SVladimir Sokolovsky goto err_buf;
116bbf8eed1SVladimir Sokolovsky
1178900b894SLeon Romanovsky err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
118bbf8eed1SVladimir Sokolovsky if (err)
119bbf8eed1SVladimir Sokolovsky goto err_mtt;
120bbf8eed1SVladimir Sokolovsky
121bbf8eed1SVladimir Sokolovsky return 0;
122bbf8eed1SVladimir Sokolovsky
123bbf8eed1SVladimir Sokolovsky err_mtt:
124bbf8eed1SVladimir Sokolovsky mlx4_mtt_cleanup(dev->dev, &buf->mtt);
125bbf8eed1SVladimir Sokolovsky
126bbf8eed1SVladimir Sokolovsky err_buf:
12708ff3235SOr Gerlitz mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
128bbf8eed1SVladimir Sokolovsky
129bbf8eed1SVladimir Sokolovsky out:
130bbf8eed1SVladimir Sokolovsky return err;
131bbf8eed1SVladimir Sokolovsky }
132bbf8eed1SVladimir Sokolovsky
mlx4_ib_free_cq_buf(struct mlx4_ib_dev * dev,struct mlx4_ib_cq_buf * buf,int cqe)133bbf8eed1SVladimir Sokolovsky static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
134bbf8eed1SVladimir Sokolovsky {
13508ff3235SOr Gerlitz mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
136bbf8eed1SVladimir Sokolovsky }
137bbf8eed1SVladimir Sokolovsky
mlx4_ib_get_cq_umem(struct mlx4_ib_dev * dev,struct mlx4_ib_cq_buf * buf,struct ib_umem ** umem,u64 buf_addr,int cqe)138*aca496fbSLang Cheng static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev,
139b0ea0fa5SJason Gunthorpe struct mlx4_ib_cq_buf *buf,
140b0ea0fa5SJason Gunthorpe struct ib_umem **umem, u64 buf_addr, int cqe)
141bbf8eed1SVladimir Sokolovsky {
142bbf8eed1SVladimir Sokolovsky int err;
14308ff3235SOr Gerlitz int cqe_size = dev->dev->caps.cqe_size;
144ed8637d3SGuy Levi int shift;
145ed8637d3SGuy Levi int n;
146bbf8eed1SVladimir Sokolovsky
147c320e527SMoni Shoua *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size,
14872b894b0SChristoph Hellwig IB_ACCESS_LOCAL_WRITE);
149bbf8eed1SVladimir Sokolovsky if (IS_ERR(*umem))
150bbf8eed1SVladimir Sokolovsky return PTR_ERR(*umem);
151bbf8eed1SVladimir Sokolovsky
152ed8637d3SGuy Levi shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
153ed8637d3SGuy Levi err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
154ed8637d3SGuy Levi
155bbf8eed1SVladimir Sokolovsky if (err)
156bbf8eed1SVladimir Sokolovsky goto err_buf;
157bbf8eed1SVladimir Sokolovsky
158bbf8eed1SVladimir Sokolovsky err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
159bbf8eed1SVladimir Sokolovsky if (err)
160bbf8eed1SVladimir Sokolovsky goto err_mtt;
161bbf8eed1SVladimir Sokolovsky
162bbf8eed1SVladimir Sokolovsky return 0;
163bbf8eed1SVladimir Sokolovsky
164bbf8eed1SVladimir Sokolovsky err_mtt:
165bbf8eed1SVladimir Sokolovsky mlx4_mtt_cleanup(dev->dev, &buf->mtt);
166bbf8eed1SVladimir Sokolovsky
167bbf8eed1SVladimir Sokolovsky err_buf:
168bbf8eed1SVladimir Sokolovsky ib_umem_release(*umem);
169bbf8eed1SVladimir Sokolovsky
170bbf8eed1SVladimir Sokolovsky return err;
171bbf8eed1SVladimir Sokolovsky }
172bbf8eed1SVladimir Sokolovsky
173beb801acSJason Gunthorpe #define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
mlx4_ib_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)174e39afe3dSLeon Romanovsky int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
175225c7b1fSRoland Dreier struct ib_udata *udata)
176225c7b1fSRoland Dreier {
177e39afe3dSLeon Romanovsky struct ib_device *ibdev = ibcq->device;
178bcf4c1eaSMatan Barak int entries = attr->cqe;
179bcf4c1eaSMatan Barak int vector = attr->comp_vector;
180225c7b1fSRoland Dreier struct mlx4_ib_dev *dev = to_mdev(ibdev);
181e39afe3dSLeon Romanovsky struct mlx4_ib_cq *cq = to_mcq(ibcq);
182225c7b1fSRoland Dreier struct mlx4_uar *uar;
183e4567897SDaniel Jurgens void *buf_addr;
184225c7b1fSRoland Dreier int err;
185ff23dfa1SShamir Rabinovitch struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
186ff23dfa1SShamir Rabinovitch udata, struct mlx4_ib_ucontext, ibucontext);
187225c7b1fSRoland Dreier
1884b664c43SMatan Barak if (entries < 1 || entries > dev->dev->caps.max_cqes)
189e39afe3dSLeon Romanovsky return -EINVAL;
190bcf4c1eaSMatan Barak
1914b664c43SMatan Barak if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
192e39afe3dSLeon Romanovsky return -EINVAL;
193225c7b1fSRoland Dreier
194225c7b1fSRoland Dreier entries = roundup_pow_of_two(entries + 1);
195225c7b1fSRoland Dreier cq->ibcq.cqe = entries - 1;
196bbf8eed1SVladimir Sokolovsky mutex_init(&cq->resize_mutex);
197225c7b1fSRoland Dreier spin_lock_init(&cq->lock);
198bbf8eed1SVladimir Sokolovsky cq->resize_buf = NULL;
199bbf8eed1SVladimir Sokolovsky cq->resize_umem = NULL;
2004b664c43SMatan Barak cq->create_flags = attr->flags;
20135f05dabSYishai Hadas INIT_LIST_HEAD(&cq->send_qp_list);
20235f05dabSYishai Hadas INIT_LIST_HEAD(&cq->recv_qp_list);
203225c7b1fSRoland Dreier
204ff23dfa1SShamir Rabinovitch if (udata) {
205225c7b1fSRoland Dreier struct mlx4_ib_create_cq ucmd;
206225c7b1fSRoland Dreier
207225c7b1fSRoland Dreier if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
208225c7b1fSRoland Dreier err = -EFAULT;
209225c7b1fSRoland Dreier goto err_cq;
210225c7b1fSRoland Dreier }
211225c7b1fSRoland Dreier
212e4567897SDaniel Jurgens buf_addr = (void *)(unsigned long)ucmd.buf_addr;
213*aca496fbSLang Cheng err = mlx4_ib_get_cq_umem(dev, &cq->buf, &cq->umem,
214bbf8eed1SVladimir Sokolovsky ucmd.buf_addr, entries);
215bbf8eed1SVladimir Sokolovsky if (err)
216225c7b1fSRoland Dreier goto err_cq;
217225c7b1fSRoland Dreier
218ff23dfa1SShamir Rabinovitch err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db);
219225c7b1fSRoland Dreier if (err)
220225c7b1fSRoland Dreier goto err_mtt;
221225c7b1fSRoland Dreier
222ff23dfa1SShamir Rabinovitch uar = &context->uar;
223f3301870SMoshe Shemesh cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
224225c7b1fSRoland Dreier } else {
2258900b894SLeon Romanovsky err = mlx4_db_alloc(dev->dev, &cq->db, 1);
226225c7b1fSRoland Dreier if (err)
227225c7b1fSRoland Dreier goto err_cq;
228225c7b1fSRoland Dreier
229225c7b1fSRoland Dreier cq->mcq.set_ci_db = cq->db.db;
230225c7b1fSRoland Dreier cq->mcq.arm_db = cq->db.db + 1;
231225c7b1fSRoland Dreier *cq->mcq.set_ci_db = 0;
232225c7b1fSRoland Dreier *cq->mcq.arm_db = 0;
233225c7b1fSRoland Dreier
234bbf8eed1SVladimir Sokolovsky err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
235bbf8eed1SVladimir Sokolovsky if (err)
236225c7b1fSRoland Dreier goto err_db;
237225c7b1fSRoland Dreier
238e4567897SDaniel Jurgens buf_addr = &cq->buf.buf;
239e4567897SDaniel Jurgens
240225c7b1fSRoland Dreier uar = &dev->priv_uar;
241f3301870SMoshe Shemesh cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
242225c7b1fSRoland Dreier }
243225c7b1fSRoland Dreier
244e605b743SShlomo Pongratz if (dev->eq_table)
245e605b743SShlomo Pongratz vector = dev->eq_table[vector % ibdev->num_comp_vectors];
246e605b743SShlomo Pongratz
247ff23dfa1SShamir Rabinovitch err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma,
248ff23dfa1SShamir Rabinovitch &cq->mcq, vector, 0,
249e4567897SDaniel Jurgens !!(cq->create_flags &
250e4567897SDaniel Jurgens IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION),
251ff23dfa1SShamir Rabinovitch buf_addr, !!udata);
252225c7b1fSRoland Dreier if (err)
253225c7b1fSRoland Dreier goto err_dbmap;
254225c7b1fSRoland Dreier
255ff23dfa1SShamir Rabinovitch if (udata)
2563dca0f42SMatan Barak cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
2573dca0f42SMatan Barak else
258225c7b1fSRoland Dreier cq->mcq.comp = mlx4_ib_cq_comp;
259225c7b1fSRoland Dreier cq->mcq.event = mlx4_ib_cq_event;
260225c7b1fSRoland Dreier
261ff23dfa1SShamir Rabinovitch if (udata)
262225c7b1fSRoland Dreier if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
263225c7b1fSRoland Dreier err = -EFAULT;
264593ff73bSMatan Barak goto err_cq_free;
265225c7b1fSRoland Dreier }
266225c7b1fSRoland Dreier
267e39afe3dSLeon Romanovsky return 0;
268225c7b1fSRoland Dreier
269593ff73bSMatan Barak err_cq_free:
270593ff73bSMatan Barak mlx4_cq_free(dev->dev, &cq->mcq);
271593ff73bSMatan Barak
272225c7b1fSRoland Dreier err_dbmap:
273ff23dfa1SShamir Rabinovitch if (udata)
274ff23dfa1SShamir Rabinovitch mlx4_ib_db_unmap_user(context, &cq->db);
275225c7b1fSRoland Dreier
276225c7b1fSRoland Dreier err_mtt:
277225c7b1fSRoland Dreier mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
278225c7b1fSRoland Dreier
279225c7b1fSRoland Dreier ib_umem_release(cq->umem);
280836a0fbbSLeon Romanovsky if (!udata)
2813ae15e16SRoland Dreier mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
282225c7b1fSRoland Dreier
283225c7b1fSRoland Dreier err_db:
284ff23dfa1SShamir Rabinovitch if (!udata)
2856296883cSYevgeny Petrilin mlx4_db_free(dev->dev, &cq->db);
286225c7b1fSRoland Dreier err_cq:
287e39afe3dSLeon Romanovsky return err;
288225c7b1fSRoland Dreier }
289225c7b1fSRoland Dreier
mlx4_alloc_resize_buf(struct mlx4_ib_dev * dev,struct mlx4_ib_cq * cq,int entries)290bbf8eed1SVladimir Sokolovsky static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
291bbf8eed1SVladimir Sokolovsky int entries)
292bbf8eed1SVladimir Sokolovsky {
293bbf8eed1SVladimir Sokolovsky int err;
294bbf8eed1SVladimir Sokolovsky
295bbf8eed1SVladimir Sokolovsky if (cq->resize_buf)
296bbf8eed1SVladimir Sokolovsky return -EBUSY;
297bbf8eed1SVladimir Sokolovsky
2980c87b672SRoland Dreier cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
299bbf8eed1SVladimir Sokolovsky if (!cq->resize_buf)
300bbf8eed1SVladimir Sokolovsky return -ENOMEM;
301bbf8eed1SVladimir Sokolovsky
302bbf8eed1SVladimir Sokolovsky err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
303bbf8eed1SVladimir Sokolovsky if (err) {
304bbf8eed1SVladimir Sokolovsky kfree(cq->resize_buf);
305bbf8eed1SVladimir Sokolovsky cq->resize_buf = NULL;
306bbf8eed1SVladimir Sokolovsky return err;
307bbf8eed1SVladimir Sokolovsky }
308bbf8eed1SVladimir Sokolovsky
309bbf8eed1SVladimir Sokolovsky cq->resize_buf->cqe = entries - 1;
310bbf8eed1SVladimir Sokolovsky
311bbf8eed1SVladimir Sokolovsky return 0;
312bbf8eed1SVladimir Sokolovsky }
313bbf8eed1SVladimir Sokolovsky
mlx4_alloc_resize_umem(struct mlx4_ib_dev * dev,struct mlx4_ib_cq * cq,int entries,struct ib_udata * udata)314bbf8eed1SVladimir Sokolovsky static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
315bbf8eed1SVladimir Sokolovsky int entries, struct ib_udata *udata)
316bbf8eed1SVladimir Sokolovsky {
317bbf8eed1SVladimir Sokolovsky struct mlx4_ib_resize_cq ucmd;
318bbf8eed1SVladimir Sokolovsky int err;
319bbf8eed1SVladimir Sokolovsky
320bbf8eed1SVladimir Sokolovsky if (cq->resize_umem)
321bbf8eed1SVladimir Sokolovsky return -EBUSY;
322bbf8eed1SVladimir Sokolovsky
323bbf8eed1SVladimir Sokolovsky if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
324bbf8eed1SVladimir Sokolovsky return -EFAULT;
325bbf8eed1SVladimir Sokolovsky
3260c87b672SRoland Dreier cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
327bbf8eed1SVladimir Sokolovsky if (!cq->resize_buf)
328bbf8eed1SVladimir Sokolovsky return -ENOMEM;
329bbf8eed1SVladimir Sokolovsky
330*aca496fbSLang Cheng err = mlx4_ib_get_cq_umem(dev, &cq->resize_buf->buf, &cq->resize_umem,
331*aca496fbSLang Cheng ucmd.buf_addr, entries);
332bbf8eed1SVladimir Sokolovsky if (err) {
333bbf8eed1SVladimir Sokolovsky kfree(cq->resize_buf);
334bbf8eed1SVladimir Sokolovsky cq->resize_buf = NULL;
335bbf8eed1SVladimir Sokolovsky return err;
336bbf8eed1SVladimir Sokolovsky }
337bbf8eed1SVladimir Sokolovsky
338bbf8eed1SVladimir Sokolovsky cq->resize_buf->cqe = entries - 1;
339bbf8eed1SVladimir Sokolovsky
340bbf8eed1SVladimir Sokolovsky return 0;
341bbf8eed1SVladimir Sokolovsky }
342bbf8eed1SVladimir Sokolovsky
mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq * cq)343bbf8eed1SVladimir Sokolovsky static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
344bbf8eed1SVladimir Sokolovsky {
345bbf8eed1SVladimir Sokolovsky u32 i;
346bbf8eed1SVladimir Sokolovsky
347bbf8eed1SVladimir Sokolovsky i = cq->mcq.cons_index;
34893b80ac2SEli Cohen while (get_sw_cqe(cq, i))
349bbf8eed1SVladimir Sokolovsky ++i;
350bbf8eed1SVladimir Sokolovsky
351bbf8eed1SVladimir Sokolovsky return i - cq->mcq.cons_index;
352bbf8eed1SVladimir Sokolovsky }
353bbf8eed1SVladimir Sokolovsky
mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq * cq)354bbf8eed1SVladimir Sokolovsky static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
355bbf8eed1SVladimir Sokolovsky {
3567798dbf4SJack Morgenstein struct mlx4_cqe *cqe, *new_cqe;
357bbf8eed1SVladimir Sokolovsky int i;
35808ff3235SOr Gerlitz int cqe_size = cq->buf.entry_size;
35908ff3235SOr Gerlitz int cqe_inc = cqe_size == 64 ? 1 : 0;
360bbf8eed1SVladimir Sokolovsky
361bbf8eed1SVladimir Sokolovsky i = cq->mcq.cons_index;
362bbf8eed1SVladimir Sokolovsky cqe = get_cqe(cq, i & cq->ibcq.cqe);
36308ff3235SOr Gerlitz cqe += cqe_inc;
36408ff3235SOr Gerlitz
365bbf8eed1SVladimir Sokolovsky while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
3667798dbf4SJack Morgenstein new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
3677798dbf4SJack Morgenstein (i + 1) & cq->resize_buf->cqe);
36808ff3235SOr Gerlitz memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
36908ff3235SOr Gerlitz new_cqe += cqe_inc;
37008ff3235SOr Gerlitz
3717798dbf4SJack Morgenstein new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
3727798dbf4SJack Morgenstein (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
373bbf8eed1SVladimir Sokolovsky cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
37408ff3235SOr Gerlitz cqe += cqe_inc;
375bbf8eed1SVladimir Sokolovsky }
376bbf8eed1SVladimir Sokolovsky ++cq->mcq.cons_index;
377bbf8eed1SVladimir Sokolovsky }
378bbf8eed1SVladimir Sokolovsky
mlx4_ib_resize_cq(struct ib_cq * ibcq,int entries,struct ib_udata * udata)379bbf8eed1SVladimir Sokolovsky int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
380bbf8eed1SVladimir Sokolovsky {
381bbf8eed1SVladimir Sokolovsky struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
382bbf8eed1SVladimir Sokolovsky struct mlx4_ib_cq *cq = to_mcq(ibcq);
38342ab01c3SJack Morgenstein struct mlx4_mtt mtt;
384bbf8eed1SVladimir Sokolovsky int outst_cqe;
385bbf8eed1SVladimir Sokolovsky int err;
386bbf8eed1SVladimir Sokolovsky
387bbf8eed1SVladimir Sokolovsky mutex_lock(&cq->resize_mutex);
3888ab9406aSMajd Dibbiny if (entries < 1 || entries > dev->dev->caps.max_cqes) {
389bbf8eed1SVladimir Sokolovsky err = -EINVAL;
390bbf8eed1SVladimir Sokolovsky goto out;
391bbf8eed1SVladimir Sokolovsky }
392bbf8eed1SVladimir Sokolovsky
393bbf8eed1SVladimir Sokolovsky entries = roundup_pow_of_two(entries + 1);
394bbf8eed1SVladimir Sokolovsky if (entries == ibcq->cqe + 1) {
395bbf8eed1SVladimir Sokolovsky err = 0;
396bbf8eed1SVladimir Sokolovsky goto out;
397bbf8eed1SVladimir Sokolovsky }
398bbf8eed1SVladimir Sokolovsky
3998ab9406aSMajd Dibbiny if (entries > dev->dev->caps.max_cqes + 1) {
40079d3da9cSEli Cohen err = -EINVAL;
40179d3da9cSEli Cohen goto out;
40279d3da9cSEli Cohen }
40379d3da9cSEli Cohen
404bbf8eed1SVladimir Sokolovsky if (ibcq->uobject) {
405bbf8eed1SVladimir Sokolovsky err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
406bbf8eed1SVladimir Sokolovsky if (err)
407bbf8eed1SVladimir Sokolovsky goto out;
408bbf8eed1SVladimir Sokolovsky } else {
409025dfdafSFrederik Schwarzer /* Can't be smaller than the number of outstanding CQEs */
410bbf8eed1SVladimir Sokolovsky outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
411bbf8eed1SVladimir Sokolovsky if (entries < outst_cqe + 1) {
4128ab9406aSMajd Dibbiny err = -EINVAL;
413bbf8eed1SVladimir Sokolovsky goto out;
414bbf8eed1SVladimir Sokolovsky }
415bbf8eed1SVladimir Sokolovsky
416bbf8eed1SVladimir Sokolovsky err = mlx4_alloc_resize_buf(dev, cq, entries);
417bbf8eed1SVladimir Sokolovsky if (err)
418bbf8eed1SVladimir Sokolovsky goto out;
419bbf8eed1SVladimir Sokolovsky }
420bbf8eed1SVladimir Sokolovsky
42142ab01c3SJack Morgenstein mtt = cq->buf.mtt;
42242ab01c3SJack Morgenstein
423bbf8eed1SVladimir Sokolovsky err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
424bbf8eed1SVladimir Sokolovsky if (err)
425bbf8eed1SVladimir Sokolovsky goto err_buf;
426bbf8eed1SVladimir Sokolovsky
42742ab01c3SJack Morgenstein mlx4_mtt_cleanup(dev->dev, &mtt);
428bbf8eed1SVladimir Sokolovsky if (ibcq->uobject) {
429bbf8eed1SVladimir Sokolovsky cq->buf = cq->resize_buf->buf;
430bbf8eed1SVladimir Sokolovsky cq->ibcq.cqe = cq->resize_buf->cqe;
431bbf8eed1SVladimir Sokolovsky ib_umem_release(cq->umem);
432bbf8eed1SVladimir Sokolovsky cq->umem = cq->resize_umem;
433bbf8eed1SVladimir Sokolovsky
434bbf8eed1SVladimir Sokolovsky kfree(cq->resize_buf);
435bbf8eed1SVladimir Sokolovsky cq->resize_buf = NULL;
436bbf8eed1SVladimir Sokolovsky cq->resize_umem = NULL;
437bbf8eed1SVladimir Sokolovsky } else {
4383afa9f19SVladimir Sokolovsky struct mlx4_ib_cq_buf tmp_buf;
4393afa9f19SVladimir Sokolovsky int tmp_cqe = 0;
4403afa9f19SVladimir Sokolovsky
441bbf8eed1SVladimir Sokolovsky spin_lock_irq(&cq->lock);
442bbf8eed1SVladimir Sokolovsky if (cq->resize_buf) {
443bbf8eed1SVladimir Sokolovsky mlx4_ib_cq_resize_copy_cqes(cq);
4443afa9f19SVladimir Sokolovsky tmp_buf = cq->buf;
4453afa9f19SVladimir Sokolovsky tmp_cqe = cq->ibcq.cqe;
446bbf8eed1SVladimir Sokolovsky cq->buf = cq->resize_buf->buf;
447bbf8eed1SVladimir Sokolovsky cq->ibcq.cqe = cq->resize_buf->cqe;
448bbf8eed1SVladimir Sokolovsky
449bbf8eed1SVladimir Sokolovsky kfree(cq->resize_buf);
450bbf8eed1SVladimir Sokolovsky cq->resize_buf = NULL;
451bbf8eed1SVladimir Sokolovsky }
452bbf8eed1SVladimir Sokolovsky spin_unlock_irq(&cq->lock);
4533afa9f19SVladimir Sokolovsky
4543afa9f19SVladimir Sokolovsky if (tmp_cqe)
4553afa9f19SVladimir Sokolovsky mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
456bbf8eed1SVladimir Sokolovsky }
457bbf8eed1SVladimir Sokolovsky
458bbf8eed1SVladimir Sokolovsky goto out;
459bbf8eed1SVladimir Sokolovsky
460bbf8eed1SVladimir Sokolovsky err_buf:
46142ab01c3SJack Morgenstein mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
462bbf8eed1SVladimir Sokolovsky if (!ibcq->uobject)
463bbf8eed1SVladimir Sokolovsky mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
464bbf8eed1SVladimir Sokolovsky cq->resize_buf->cqe);
465bbf8eed1SVladimir Sokolovsky
466bbf8eed1SVladimir Sokolovsky kfree(cq->resize_buf);
467bbf8eed1SVladimir Sokolovsky cq->resize_buf = NULL;
468bbf8eed1SVladimir Sokolovsky
469bbf8eed1SVladimir Sokolovsky ib_umem_release(cq->resize_umem);
470bbf8eed1SVladimir Sokolovsky cq->resize_umem = NULL;
471bbf8eed1SVladimir Sokolovsky out:
472bbf8eed1SVladimir Sokolovsky mutex_unlock(&cq->resize_mutex);
47308ff3235SOr Gerlitz
474bbf8eed1SVladimir Sokolovsky return err;
475bbf8eed1SVladimir Sokolovsky }
476bbf8eed1SVladimir Sokolovsky
mlx4_ib_destroy_cq(struct ib_cq * cq,struct ib_udata * udata)47743d781b9SLeon Romanovsky int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
478225c7b1fSRoland Dreier {
479225c7b1fSRoland Dreier struct mlx4_ib_dev *dev = to_mdev(cq->device);
480225c7b1fSRoland Dreier struct mlx4_ib_cq *mcq = to_mcq(cq);
481225c7b1fSRoland Dreier
482225c7b1fSRoland Dreier mlx4_cq_free(dev->dev, &mcq->mcq);
483225c7b1fSRoland Dreier mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
484225c7b1fSRoland Dreier
485bdeacabdSShamir Rabinovitch if (udata) {
486bdeacabdSShamir Rabinovitch mlx4_ib_db_unmap_user(
487bdeacabdSShamir Rabinovitch rdma_udata_to_drv_context(
488bdeacabdSShamir Rabinovitch udata,
489bdeacabdSShamir Rabinovitch struct mlx4_ib_ucontext,
490bdeacabdSShamir Rabinovitch ibucontext),
491bdeacabdSShamir Rabinovitch &mcq->db);
492225c7b1fSRoland Dreier } else {
4933ae15e16SRoland Dreier mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
4946296883cSYevgeny Petrilin mlx4_db_free(dev->dev, &mcq->db);
495225c7b1fSRoland Dreier }
496836a0fbbSLeon Romanovsky ib_umem_release(mcq->umem);
49743d781b9SLeon Romanovsky return 0;
498225c7b1fSRoland Dreier }
499225c7b1fSRoland Dreier
dump_cqe(void * cqe)500225c7b1fSRoland Dreier static void dump_cqe(void *cqe)
501225c7b1fSRoland Dreier {
502225c7b1fSRoland Dreier __be32 *buf = cqe;
503225c7b1fSRoland Dreier
504987c8f8fSShlomo Pongratz pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
505225c7b1fSRoland Dreier be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
506225c7b1fSRoland Dreier be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
507225c7b1fSRoland Dreier be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
508225c7b1fSRoland Dreier }
509225c7b1fSRoland Dreier
mlx4_ib_handle_error_cqe(struct mlx4_err_cqe * cqe,struct ib_wc * wc)510225c7b1fSRoland Dreier static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
511225c7b1fSRoland Dreier struct ib_wc *wc)
512225c7b1fSRoland Dreier {
513225c7b1fSRoland Dreier if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
514987c8f8fSShlomo Pongratz pr_debug("local QP operation err "
515225c7b1fSRoland Dreier "(QPN %06x, WQE index %x, vendor syndrome %02x, "
516225c7b1fSRoland Dreier "opcode = %02x)\n",
517225c7b1fSRoland Dreier be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
518225c7b1fSRoland Dreier cqe->vendor_err_syndrome,
519225c7b1fSRoland Dreier cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
520225c7b1fSRoland Dreier dump_cqe(cqe);
521225c7b1fSRoland Dreier }
522225c7b1fSRoland Dreier
523225c7b1fSRoland Dreier switch (cqe->syndrome) {
524225c7b1fSRoland Dreier case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
525225c7b1fSRoland Dreier wc->status = IB_WC_LOC_LEN_ERR;
526225c7b1fSRoland Dreier break;
527225c7b1fSRoland Dreier case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
528225c7b1fSRoland Dreier wc->status = IB_WC_LOC_QP_OP_ERR;
529225c7b1fSRoland Dreier break;
530225c7b1fSRoland Dreier case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
531225c7b1fSRoland Dreier wc->status = IB_WC_LOC_PROT_ERR;
532225c7b1fSRoland Dreier break;
533225c7b1fSRoland Dreier case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
534225c7b1fSRoland Dreier wc->status = IB_WC_WR_FLUSH_ERR;
535225c7b1fSRoland Dreier break;
536225c7b1fSRoland Dreier case MLX4_CQE_SYNDROME_MW_BIND_ERR:
537225c7b1fSRoland Dreier wc->status = IB_WC_MW_BIND_ERR;
538225c7b1fSRoland Dreier break;
539225c7b1fSRoland Dreier case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
540225c7b1fSRoland Dreier wc->status = IB_WC_BAD_RESP_ERR;
541225c7b1fSRoland Dreier break;
542225c7b1fSRoland Dreier case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
543225c7b1fSRoland Dreier wc->status = IB_WC_LOC_ACCESS_ERR;
544225c7b1fSRoland Dreier break;
545225c7b1fSRoland Dreier case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
546225c7b1fSRoland Dreier wc->status = IB_WC_REM_INV_REQ_ERR;
547225c7b1fSRoland Dreier break;
548225c7b1fSRoland Dreier case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
549225c7b1fSRoland Dreier wc->status = IB_WC_REM_ACCESS_ERR;
550225c7b1fSRoland Dreier break;
551225c7b1fSRoland Dreier case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
552225c7b1fSRoland Dreier wc->status = IB_WC_REM_OP_ERR;
553225c7b1fSRoland Dreier break;
554225c7b1fSRoland Dreier case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
555225c7b1fSRoland Dreier wc->status = IB_WC_RETRY_EXC_ERR;
556225c7b1fSRoland Dreier break;
557225c7b1fSRoland Dreier case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
558225c7b1fSRoland Dreier wc->status = IB_WC_RNR_RETRY_EXC_ERR;
559225c7b1fSRoland Dreier break;
560225c7b1fSRoland Dreier case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
561225c7b1fSRoland Dreier wc->status = IB_WC_REM_ABORT_ERR;
562225c7b1fSRoland Dreier break;
563225c7b1fSRoland Dreier default:
564225c7b1fSRoland Dreier wc->status = IB_WC_GENERAL_ERR;
565225c7b1fSRoland Dreier break;
566225c7b1fSRoland Dreier }
567225c7b1fSRoland Dreier
568225c7b1fSRoland Dreier wc->vendor_err = cqe->vendor_err_syndrome;
569225c7b1fSRoland Dreier }
570225c7b1fSRoland Dreier
mlx4_ib_ipoib_csum_ok(__be16 status,u8 badfcs_enc,__be16 checksum)5713593f69cSEugene Crosser static int mlx4_ib_ipoib_csum_ok(__be16 status, u8 badfcs_enc, __be16 checksum)
5728ff095ecSEli Cohen {
5733593f69cSEugene Crosser return ((badfcs_enc & MLX4_CQE_STATUS_L4_CSUM) ||
5743593f69cSEugene Crosser ((status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
5753593f69cSEugene Crosser (status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
5763593f69cSEugene Crosser MLX4_CQE_STATUS_UDP)) &&
5773593f69cSEugene Crosser (checksum == cpu_to_be16(0xffff))));
5788ff095ecSEli Cohen }
5798ff095ecSEli Cohen
use_tunnel_data(struct mlx4_ib_qp * qp,struct mlx4_ib_cq * cq,struct ib_wc * wc,unsigned tail,struct mlx4_cqe * cqe,int is_eth)580e6a00f66SYuval Shaia static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
5815ea8bbfcSJack Morgenstein unsigned tail, struct mlx4_cqe *cqe, int is_eth)
5821ffeb2ebSJack Morgenstein {
5831ffeb2ebSJack Morgenstein struct mlx4_ib_proxy_sqp_hdr *hdr;
5841ffeb2ebSJack Morgenstein
5851ffeb2ebSJack Morgenstein ib_dma_sync_single_for_cpu(qp->ibqp.device,
5861ffeb2ebSJack Morgenstein qp->sqp_proxy_rcv[tail].map,
5871ffeb2ebSJack Morgenstein sizeof (struct mlx4_ib_proxy_sqp_hdr),
5881ffeb2ebSJack Morgenstein DMA_FROM_DEVICE);
5891ffeb2ebSJack Morgenstein hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
5901ffeb2ebSJack Morgenstein wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
5911ffeb2ebSJack Morgenstein wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
5921ffeb2ebSJack Morgenstein wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
5931ffeb2ebSJack Morgenstein wc->dlid_path_bits = 0;
5941ffeb2ebSJack Morgenstein
5955ea8bbfcSJack Morgenstein if (is_eth) {
59665389322SMoni Shoua wc->slid = 0;
5975ea8bbfcSJack Morgenstein wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
5985ea8bbfcSJack Morgenstein memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
5995ea8bbfcSJack Morgenstein memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
6005ea8bbfcSJack Morgenstein wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
6015ea8bbfcSJack Morgenstein } else {
6025ea8bbfcSJack Morgenstein wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
6035ea8bbfcSJack Morgenstein wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
6045ea8bbfcSJack Morgenstein }
6051ffeb2ebSJack Morgenstein }
6061ffeb2ebSJack Morgenstein
mlx4_ib_qp_sw_comp(struct mlx4_ib_qp * qp,int num_entries,struct ib_wc * wc,int * npolled,int is_send)60735f05dabSYishai Hadas static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
60835f05dabSYishai Hadas struct ib_wc *wc, int *npolled, int is_send)
60935f05dabSYishai Hadas {
61035f05dabSYishai Hadas struct mlx4_ib_wq *wq;
61135f05dabSYishai Hadas unsigned cur;
61235f05dabSYishai Hadas int i;
61335f05dabSYishai Hadas
61435f05dabSYishai Hadas wq = is_send ? &qp->sq : &qp->rq;
61535f05dabSYishai Hadas cur = wq->head - wq->tail;
61635f05dabSYishai Hadas
61735f05dabSYishai Hadas if (cur == 0)
61835f05dabSYishai Hadas return;
61935f05dabSYishai Hadas
62035f05dabSYishai Hadas for (i = 0; i < cur && *npolled < num_entries; i++) {
62135f05dabSYishai Hadas wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
62235f05dabSYishai Hadas wc->status = IB_WC_WR_FLUSH_ERR;
62335f05dabSYishai Hadas wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
62435f05dabSYishai Hadas wq->tail++;
62535f05dabSYishai Hadas (*npolled)++;
62635f05dabSYishai Hadas wc->qp = &qp->ibqp;
62735f05dabSYishai Hadas wc++;
62835f05dabSYishai Hadas }
62935f05dabSYishai Hadas }
63035f05dabSYishai Hadas
mlx4_ib_poll_sw_comp(struct mlx4_ib_cq * cq,int num_entries,struct ib_wc * wc,int * npolled)63135f05dabSYishai Hadas static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
63235f05dabSYishai Hadas struct ib_wc *wc, int *npolled)
63335f05dabSYishai Hadas {
63435f05dabSYishai Hadas struct mlx4_ib_qp *qp;
63535f05dabSYishai Hadas
63635f05dabSYishai Hadas *npolled = 0;
637faa9141cSTalat Batheesh /* Find uncompleted WQEs belonging to that cq and return
63835f05dabSYishai Hadas * simulated FLUSH_ERR completions
63935f05dabSYishai Hadas */
64035f05dabSYishai Hadas list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
641799cdaf8SAriel Nahum mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
64235f05dabSYishai Hadas if (*npolled >= num_entries)
64335f05dabSYishai Hadas goto out;
64435f05dabSYishai Hadas }
64535f05dabSYishai Hadas
64635f05dabSYishai Hadas list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
64735f05dabSYishai Hadas mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
64835f05dabSYishai Hadas if (*npolled >= num_entries)
64935f05dabSYishai Hadas goto out;
65035f05dabSYishai Hadas }
65135f05dabSYishai Hadas
65235f05dabSYishai Hadas out:
65335f05dabSYishai Hadas return;
65435f05dabSYishai Hadas }
65535f05dabSYishai Hadas
mlx4_ib_poll_one(struct mlx4_ib_cq * cq,struct mlx4_ib_qp ** cur_qp,struct ib_wc * wc)656225c7b1fSRoland Dreier static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
657225c7b1fSRoland Dreier struct mlx4_ib_qp **cur_qp,
658225c7b1fSRoland Dreier struct ib_wc *wc)
659225c7b1fSRoland Dreier {
660225c7b1fSRoland Dreier struct mlx4_cqe *cqe;
661225c7b1fSRoland Dreier struct mlx4_qp *mqp;
662225c7b1fSRoland Dreier struct mlx4_ib_wq *wq;
663225c7b1fSRoland Dreier struct mlx4_ib_srq *srq;
664f3cca4b1SShlomo Pongratz struct mlx4_srq *msrq = NULL;
665225c7b1fSRoland Dreier int is_send;
666225c7b1fSRoland Dreier int is_error;
6675ea8bbfcSJack Morgenstein int is_eth;
668b3226184SRoland Dreier u32 g_mlpath_rqpn;
669225c7b1fSRoland Dreier u16 wqe_ctr;
6701ffeb2ebSJack Morgenstein unsigned tail = 0;
671225c7b1fSRoland Dreier
672bbf8eed1SVladimir Sokolovsky repoll:
673225c7b1fSRoland Dreier cqe = next_cqe_sw(cq);
674225c7b1fSRoland Dreier if (!cqe)
675225c7b1fSRoland Dreier return -EAGAIN;
676225c7b1fSRoland Dreier
67708ff3235SOr Gerlitz if (cq->buf.entry_size == 64)
67808ff3235SOr Gerlitz cqe++;
67908ff3235SOr Gerlitz
680225c7b1fSRoland Dreier ++cq->mcq.cons_index;
681225c7b1fSRoland Dreier
682225c7b1fSRoland Dreier /*
683225c7b1fSRoland Dreier * Make sure we read CQ entry contents after we've checked the
684225c7b1fSRoland Dreier * ownership bit.
685225c7b1fSRoland Dreier */
686225c7b1fSRoland Dreier rmb();
687225c7b1fSRoland Dreier
688225c7b1fSRoland Dreier is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
689225c7b1fSRoland Dreier is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
690225c7b1fSRoland Dreier MLX4_CQE_OPCODE_ERROR;
691225c7b1fSRoland Dreier
692bbf8eed1SVladimir Sokolovsky /* Resize CQ in progress */
693bbf8eed1SVladimir Sokolovsky if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
694bbf8eed1SVladimir Sokolovsky if (cq->resize_buf) {
695bbf8eed1SVladimir Sokolovsky struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
696bbf8eed1SVladimir Sokolovsky
697bbf8eed1SVladimir Sokolovsky mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
698bbf8eed1SVladimir Sokolovsky cq->buf = cq->resize_buf->buf;
699bbf8eed1SVladimir Sokolovsky cq->ibcq.cqe = cq->resize_buf->cqe;
700bbf8eed1SVladimir Sokolovsky
701bbf8eed1SVladimir Sokolovsky kfree(cq->resize_buf);
702bbf8eed1SVladimir Sokolovsky cq->resize_buf = NULL;
703bbf8eed1SVladimir Sokolovsky }
704bbf8eed1SVladimir Sokolovsky
705bbf8eed1SVladimir Sokolovsky goto repoll;
706bbf8eed1SVladimir Sokolovsky }
707bbf8eed1SVladimir Sokolovsky
708225c7b1fSRoland Dreier if (!*cur_qp ||
709f780a9f1SYevgeny Petrilin (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
710225c7b1fSRoland Dreier /*
711225c7b1fSRoland Dreier * We do not have to take the QP table lock here,
712225c7b1fSRoland Dreier * because CQs will be locked while QPs are removed
713225c7b1fSRoland Dreier * from the table.
714225c7b1fSRoland Dreier */
715225c7b1fSRoland Dreier mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
716f780a9f1SYevgeny Petrilin be32_to_cpu(cqe->vlan_my_qpn));
717225c7b1fSRoland Dreier *cur_qp = to_mibqp(mqp);
718225c7b1fSRoland Dreier }
719225c7b1fSRoland Dreier
720225c7b1fSRoland Dreier wc->qp = &(*cur_qp)->ibqp;
721225c7b1fSRoland Dreier
722f3cca4b1SShlomo Pongratz if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
723f3cca4b1SShlomo Pongratz u32 srq_num;
724f3cca4b1SShlomo Pongratz g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
725f3cca4b1SShlomo Pongratz srq_num = g_mlpath_rqpn & 0xffffff;
726f3cca4b1SShlomo Pongratz /* SRQ is also in the radix tree */
727f3cca4b1SShlomo Pongratz msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
728f3cca4b1SShlomo Pongratz srq_num);
729f3cca4b1SShlomo Pongratz }
730f3cca4b1SShlomo Pongratz
731225c7b1fSRoland Dreier if (is_send) {
732225c7b1fSRoland Dreier wq = &(*cur_qp)->sq;
733ea54b10cSJack Morgenstein if (!(*cur_qp)->sq_signal_bits) {
734225c7b1fSRoland Dreier wqe_ctr = be16_to_cpu(cqe->wqe_index);
735614c3c85SRoland Dreier wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
736ea54b10cSJack Morgenstein }
7370e6e7416SRoland Dreier wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
738225c7b1fSRoland Dreier ++wq->tail;
739225c7b1fSRoland Dreier } else if ((*cur_qp)->ibqp.srq) {
740225c7b1fSRoland Dreier srq = to_msrq((*cur_qp)->ibqp.srq);
741225c7b1fSRoland Dreier wqe_ctr = be16_to_cpu(cqe->wqe_index);
742225c7b1fSRoland Dreier wc->wr_id = srq->wrid[wqe_ctr];
743225c7b1fSRoland Dreier mlx4_ib_free_srq_wqe(srq, wqe_ctr);
744f3cca4b1SShlomo Pongratz } else if (msrq) {
745f3cca4b1SShlomo Pongratz srq = to_mibsrq(msrq);
746f3cca4b1SShlomo Pongratz wqe_ctr = be16_to_cpu(cqe->wqe_index);
747f3cca4b1SShlomo Pongratz wc->wr_id = srq->wrid[wqe_ctr];
748f3cca4b1SShlomo Pongratz mlx4_ib_free_srq_wqe(srq, wqe_ctr);
749225c7b1fSRoland Dreier } else {
750225c7b1fSRoland Dreier wq = &(*cur_qp)->rq;
7511ffeb2ebSJack Morgenstein tail = wq->tail & (wq->wqe_cnt - 1);
7521ffeb2ebSJack Morgenstein wc->wr_id = wq->wrid[tail];
753225c7b1fSRoland Dreier ++wq->tail;
754225c7b1fSRoland Dreier }
755225c7b1fSRoland Dreier
756225c7b1fSRoland Dreier if (unlikely(is_error)) {
757225c7b1fSRoland Dreier mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
758225c7b1fSRoland Dreier return 0;
759225c7b1fSRoland Dreier }
760225c7b1fSRoland Dreier
761225c7b1fSRoland Dreier wc->status = IB_WC_SUCCESS;
762225c7b1fSRoland Dreier
763225c7b1fSRoland Dreier if (is_send) {
764225c7b1fSRoland Dreier wc->wc_flags = 0;
765225c7b1fSRoland Dreier switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
766225c7b1fSRoland Dreier case MLX4_OPCODE_RDMA_WRITE_IMM:
767225c7b1fSRoland Dreier wc->wc_flags |= IB_WC_WITH_IMM;
768df561f66SGustavo A. R. Silva fallthrough;
769225c7b1fSRoland Dreier case MLX4_OPCODE_RDMA_WRITE:
770225c7b1fSRoland Dreier wc->opcode = IB_WC_RDMA_WRITE;
771225c7b1fSRoland Dreier break;
772225c7b1fSRoland Dreier case MLX4_OPCODE_SEND_IMM:
773225c7b1fSRoland Dreier wc->wc_flags |= IB_WC_WITH_IMM;
774df561f66SGustavo A. R. Silva fallthrough;
775225c7b1fSRoland Dreier case MLX4_OPCODE_SEND:
77695d04f07SRoland Dreier case MLX4_OPCODE_SEND_INVAL:
777225c7b1fSRoland Dreier wc->opcode = IB_WC_SEND;
778225c7b1fSRoland Dreier break;
779225c7b1fSRoland Dreier case MLX4_OPCODE_RDMA_READ:
78019891915SVu Pham wc->opcode = IB_WC_RDMA_READ;
781225c7b1fSRoland Dreier wc->byte_len = be32_to_cpu(cqe->byte_cnt);
782225c7b1fSRoland Dreier break;
783225c7b1fSRoland Dreier case MLX4_OPCODE_ATOMIC_CS:
784225c7b1fSRoland Dreier wc->opcode = IB_WC_COMP_SWAP;
785225c7b1fSRoland Dreier wc->byte_len = 8;
786225c7b1fSRoland Dreier break;
787225c7b1fSRoland Dreier case MLX4_OPCODE_ATOMIC_FA:
788225c7b1fSRoland Dreier wc->opcode = IB_WC_FETCH_ADD;
789225c7b1fSRoland Dreier wc->byte_len = 8;
790225c7b1fSRoland Dreier break;
7916fa8f719SVladimir Sokolovsky case MLX4_OPCODE_MASKED_ATOMIC_CS:
7926fa8f719SVladimir Sokolovsky wc->opcode = IB_WC_MASKED_COMP_SWAP;
7936fa8f719SVladimir Sokolovsky wc->byte_len = 8;
7946fa8f719SVladimir Sokolovsky break;
7956fa8f719SVladimir Sokolovsky case MLX4_OPCODE_MASKED_ATOMIC_FA:
7966fa8f719SVladimir Sokolovsky wc->opcode = IB_WC_MASKED_FETCH_ADD;
7976fa8f719SVladimir Sokolovsky wc->byte_len = 8;
7986fa8f719SVladimir Sokolovsky break;
799b832be1eSEli Cohen case MLX4_OPCODE_LSO:
800b832be1eSEli Cohen wc->opcode = IB_WC_LSO;
801b832be1eSEli Cohen break;
80295d04f07SRoland Dreier case MLX4_OPCODE_FMR:
803e761c67fSSagi Grimberg wc->opcode = IB_WC_REG_MR;
80495d04f07SRoland Dreier break;
80595d04f07SRoland Dreier case MLX4_OPCODE_LOCAL_INVAL:
80695d04f07SRoland Dreier wc->opcode = IB_WC_LOCAL_INV;
80795d04f07SRoland Dreier break;
808225c7b1fSRoland Dreier }
809225c7b1fSRoland Dreier } else {
810225c7b1fSRoland Dreier wc->byte_len = be32_to_cpu(cqe->byte_cnt);
811225c7b1fSRoland Dreier
812225c7b1fSRoland Dreier switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
813225c7b1fSRoland Dreier case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
814225c7b1fSRoland Dreier wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
815225c7b1fSRoland Dreier wc->wc_flags = IB_WC_WITH_IMM;
81600f7ec36SSteve Wise wc->ex.imm_data = cqe->immed_rss_invalid;
817225c7b1fSRoland Dreier break;
81895d04f07SRoland Dreier case MLX4_RECV_OPCODE_SEND_INVAL:
81995d04f07SRoland Dreier wc->opcode = IB_WC_RECV;
82095d04f07SRoland Dreier wc->wc_flags = IB_WC_WITH_INVALIDATE;
82195d04f07SRoland Dreier wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
82295d04f07SRoland Dreier break;
823225c7b1fSRoland Dreier case MLX4_RECV_OPCODE_SEND:
824225c7b1fSRoland Dreier wc->opcode = IB_WC_RECV;
825225c7b1fSRoland Dreier wc->wc_flags = 0;
826225c7b1fSRoland Dreier break;
827225c7b1fSRoland Dreier case MLX4_RECV_OPCODE_SEND_IMM:
828225c7b1fSRoland Dreier wc->opcode = IB_WC_RECV;
829225c7b1fSRoland Dreier wc->wc_flags = IB_WC_WITH_IMM;
83000f7ec36SSteve Wise wc->ex.imm_data = cqe->immed_rss_invalid;
831225c7b1fSRoland Dreier break;
832225c7b1fSRoland Dreier }
833225c7b1fSRoland Dreier
8345ea8bbfcSJack Morgenstein is_eth = (rdma_port_get_link_layer(wc->qp->device,
8355ea8bbfcSJack Morgenstein (*cur_qp)->port) ==
8365ea8bbfcSJack Morgenstein IB_LINK_LAYER_ETHERNET);
8371ffeb2ebSJack Morgenstein if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
8381ffeb2ebSJack Morgenstein if ((*cur_qp)->mlx4_ib_qp_type &
8391ffeb2ebSJack Morgenstein (MLX4_IB_QPT_PROXY_SMI_OWNER |
840e6a00f66SYuval Shaia MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
841e6a00f66SYuval Shaia use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
842e6a00f66SYuval Shaia is_eth);
843e6a00f66SYuval Shaia return 0;
844e6a00f66SYuval Shaia }
8451ffeb2ebSJack Morgenstein }
8461ffeb2ebSJack Morgenstein
847b3226184SRoland Dreier g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
848b3226184SRoland Dreier wc->src_qp = g_mlpath_rqpn & 0xffffff;
849b3226184SRoland Dreier wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
850b3226184SRoland Dreier wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
851e1bb7843SDotan Barak wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
852d927d505SOr Gerlitz wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
8533593f69cSEugene Crosser cqe->badfcs_enc,
854d927d505SOr Gerlitz cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
8555ea8bbfcSJack Morgenstein if (is_eth) {
85665389322SMoni Shoua wc->slid = 0;
8579106c410SOr Gerlitz wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
8585ea8bbfcSJack Morgenstein if (be32_to_cpu(cqe->vlan_my_qpn) &
859e802f8e4SHadar Hen Zion MLX4_CQE_CVLAN_PRESENT_MASK) {
860297e0dadSMoni Shoua wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
861297e0dadSMoni Shoua MLX4_CQE_VID_MASK;
862297e0dadSMoni Shoua } else {
863297e0dadSMoni Shoua wc->vlan_id = 0xffff;
864297e0dadSMoni Shoua }
865297e0dadSMoni Shoua memcpy(wc->smac, cqe->smac, ETH_ALEN);
8665ea8bbfcSJack Morgenstein wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
8675ea8bbfcSJack Morgenstein } else {
86865389322SMoni Shoua wc->slid = be16_to_cpu(cqe->rlid);
8695ea8bbfcSJack Morgenstein wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
8705ea8bbfcSJack Morgenstein wc->vlan_id = 0xffff;
8715ea8bbfcSJack Morgenstein }
872225c7b1fSRoland Dreier }
873225c7b1fSRoland Dreier
874225c7b1fSRoland Dreier return 0;
875225c7b1fSRoland Dreier }
876225c7b1fSRoland Dreier
mlx4_ib_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)877225c7b1fSRoland Dreier int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
878225c7b1fSRoland Dreier {
879225c7b1fSRoland Dreier struct mlx4_ib_cq *cq = to_mcq(ibcq);
880225c7b1fSRoland Dreier struct mlx4_ib_qp *cur_qp = NULL;
881225c7b1fSRoland Dreier unsigned long flags;
882225c7b1fSRoland Dreier int npolled;
88335f05dabSYishai Hadas struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
884225c7b1fSRoland Dreier
885225c7b1fSRoland Dreier spin_lock_irqsave(&cq->lock, flags);
88635f05dabSYishai Hadas if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
88735f05dabSYishai Hadas mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
88835f05dabSYishai Hadas goto out;
88935f05dabSYishai Hadas }
890225c7b1fSRoland Dreier
891225c7b1fSRoland Dreier for (npolled = 0; npolled < num_entries; ++npolled) {
89220697434SLeon Romanovsky if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
893225c7b1fSRoland Dreier break;
894225c7b1fSRoland Dreier }
895225c7b1fSRoland Dreier
896225c7b1fSRoland Dreier mlx4_cq_set_ci(&cq->mcq);
897225c7b1fSRoland Dreier
89835f05dabSYishai Hadas out:
899225c7b1fSRoland Dreier spin_unlock_irqrestore(&cq->lock, flags);
900225c7b1fSRoland Dreier
901225c7b1fSRoland Dreier return npolled;
902225c7b1fSRoland Dreier }
903225c7b1fSRoland Dreier
mlx4_ib_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)904225c7b1fSRoland Dreier int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
905225c7b1fSRoland Dreier {
906225c7b1fSRoland Dreier mlx4_cq_arm(&to_mcq(ibcq)->mcq,
907225c7b1fSRoland Dreier (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
908225c7b1fSRoland Dreier MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
909225c7b1fSRoland Dreier to_mdev(ibcq->device)->uar_map,
910225c7b1fSRoland Dreier MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
911225c7b1fSRoland Dreier
912225c7b1fSRoland Dreier return 0;
913225c7b1fSRoland Dreier }
914225c7b1fSRoland Dreier
__mlx4_ib_cq_clean(struct mlx4_ib_cq * cq,u32 qpn,struct mlx4_ib_srq * srq)915225c7b1fSRoland Dreier void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
916225c7b1fSRoland Dreier {
917225c7b1fSRoland Dreier u32 prod_index;
918225c7b1fSRoland Dreier int nfreed = 0;
919082dee32SJack Morgenstein struct mlx4_cqe *cqe, *dest;
920082dee32SJack Morgenstein u8 owner_bit;
92108ff3235SOr Gerlitz int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
922225c7b1fSRoland Dreier
923225c7b1fSRoland Dreier /*
924225c7b1fSRoland Dreier * First we need to find the current producer index, so we
925225c7b1fSRoland Dreier * know where to start cleaning from. It doesn't matter if HW
926225c7b1fSRoland Dreier * adds new entries after this loop -- the QP we're worried
927225c7b1fSRoland Dreier * about is already in RESET, so the new entries won't come
928225c7b1fSRoland Dreier * from our QP and therefore don't need to be checked.
929225c7b1fSRoland Dreier */
930225c7b1fSRoland Dreier for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
931225c7b1fSRoland Dreier if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
932225c7b1fSRoland Dreier break;
933225c7b1fSRoland Dreier
934225c7b1fSRoland Dreier /*
935225c7b1fSRoland Dreier * Now sweep backwards through the CQ, removing CQ entries
936225c7b1fSRoland Dreier * that match our QP by copying older entries on top of them.
937225c7b1fSRoland Dreier */
938225c7b1fSRoland Dreier while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
939225c7b1fSRoland Dreier cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
94008ff3235SOr Gerlitz cqe += cqe_inc;
94108ff3235SOr Gerlitz
942f780a9f1SYevgeny Petrilin if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
943225c7b1fSRoland Dreier if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
944225c7b1fSRoland Dreier mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
945225c7b1fSRoland Dreier ++nfreed;
946082dee32SJack Morgenstein } else if (nfreed) {
947082dee32SJack Morgenstein dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
94808ff3235SOr Gerlitz dest += cqe_inc;
94908ff3235SOr Gerlitz
950082dee32SJack Morgenstein owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
951082dee32SJack Morgenstein memcpy(dest, cqe, sizeof *cqe);
952082dee32SJack Morgenstein dest->owner_sr_opcode = owner_bit |
953082dee32SJack Morgenstein (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
954082dee32SJack Morgenstein }
955225c7b1fSRoland Dreier }
956225c7b1fSRoland Dreier
957225c7b1fSRoland Dreier if (nfreed) {
958225c7b1fSRoland Dreier cq->mcq.cons_index += nfreed;
959225c7b1fSRoland Dreier /*
960225c7b1fSRoland Dreier * Make sure update of buffer contents is done before
961225c7b1fSRoland Dreier * updating consumer index.
962225c7b1fSRoland Dreier */
963225c7b1fSRoland Dreier wmb();
964225c7b1fSRoland Dreier mlx4_cq_set_ci(&cq->mcq);
965225c7b1fSRoland Dreier }
966225c7b1fSRoland Dreier }
967225c7b1fSRoland Dreier
mlx4_ib_cq_clean(struct mlx4_ib_cq * cq,u32 qpn,struct mlx4_ib_srq * srq)968225c7b1fSRoland Dreier void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
969225c7b1fSRoland Dreier {
970225c7b1fSRoland Dreier spin_lock_irq(&cq->lock);
971225c7b1fSRoland Dreier __mlx4_ib_cq_clean(cq, qpn, srq);
972225c7b1fSRoland Dreier spin_unlock_irq(&cq->lock);
973225c7b1fSRoland Dreier }
974