1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/bitops.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/io.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/string.h>
22 #include "qed.h"
23 #include "qed_cxt.h"
24 #include "qed_hsi.h"
25 #include "qed_hw.h"
26 #include "qed_init_ops.h"
27 #include "qed_int.h"
28 #include "qed_ll2.h"
29 #include "qed_mcp.h"
30 #include "qed_reg_addr.h"
31 #include <linux/qed/qed_rdma_if.h>
32 #include "qed_rdma.h"
33 #include "qed_roce.h"
34 #include "qed_sp.h"
35 
36 
37 int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
38 			struct qed_bmap *bmap, u32 max_count, char *name)
39 {
40 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
41 
42 	bmap->max_count = max_count;
43 
44 	bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long),
45 			       GFP_KERNEL);
46 	if (!bmap->bitmap)
47 		return -ENOMEM;
48 
49 	snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
50 
51 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
52 	return 0;
53 }
54 
55 int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
56 			   struct qed_bmap *bmap, u32 *id_num)
57 {
58 	*id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
59 	if (*id_num >= bmap->max_count)
60 		return -EINVAL;
61 
62 	__set_bit(*id_num, bmap->bitmap);
63 
64 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
65 		   bmap->name, *id_num);
66 
67 	return 0;
68 }
69 
70 void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
71 		     struct qed_bmap *bmap, u32 id_num)
72 {
73 	if (id_num >= bmap->max_count)
74 		return;
75 
76 	__set_bit(id_num, bmap->bitmap);
77 }
78 
79 void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
80 			 struct qed_bmap *bmap, u32 id_num)
81 {
82 	bool b_acquired;
83 
84 	if (id_num >= bmap->max_count)
85 		return;
86 
87 	b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
88 	if (!b_acquired) {
89 		DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
90 			  bmap->name, id_num);
91 		return;
92 	}
93 
94 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
95 		   bmap->name, id_num);
96 }
97 
98 int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
99 		     struct qed_bmap *bmap, u32 id_num)
100 {
101 	if (id_num >= bmap->max_count)
102 		return -1;
103 
104 	return test_bit(id_num, bmap->bitmap);
105 }
106 
107 static bool qed_bmap_is_empty(struct qed_bmap *bmap)
108 {
109 	return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
110 }
111 
112 static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
113 {
114 	/* First sb id for RoCE is after all the l2 sb */
115 	return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
116 }
117 
118 int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
119 {
120 	struct qed_rdma_info *p_rdma_info;
121 
122 	p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
123 	if (!p_rdma_info)
124 		return -ENOMEM;
125 
126 	spin_lock_init(&p_rdma_info->lock);
127 
128 	p_hwfn->p_rdma_info = p_rdma_info;
129 	return 0;
130 }
131 
132 void qed_rdma_info_free(struct qed_hwfn *p_hwfn)
133 {
134 	kfree(p_hwfn->p_rdma_info);
135 	p_hwfn->p_rdma_info = NULL;
136 }
137 
138 static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
139 {
140 	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
141 	u32 num_cons, num_tasks;
142 	int rc = -ENOMEM;
143 
144 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
145 
146 	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
147 		p_rdma_info->proto = PROTOCOLID_IWARP;
148 	else
149 		p_rdma_info->proto = PROTOCOLID_ROCE;
150 
151 	num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
152 					       NULL);
153 
154 	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
155 		p_rdma_info->num_qps = num_cons;
156 	else
157 		p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */
158 
159 	num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
160 
161 	/* Each MR uses a single task */
162 	p_rdma_info->num_mrs = num_tasks;
163 
164 	/* Queue zone lines are shared between RoCE and L2 in such a way that
165 	 * they can be used by each without obstructing the other.
166 	 */
167 	p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
168 	p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
169 
170 	/* Allocate a struct with device params and fill it */
171 	p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
172 	if (!p_rdma_info->dev)
173 		return rc;
174 
175 	/* Allocate a struct with port params and fill it */
176 	p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
177 	if (!p_rdma_info->port)
178 		goto free_rdma_dev;
179 
180 	/* Allocate bit map for pd's */
181 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
182 				 "PD");
183 	if (rc) {
184 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
185 			   "Failed to allocate pd_map, rc = %d\n",
186 			   rc);
187 		goto free_rdma_port;
188 	}
189 
190 	/* Allocate bit map for XRC Domains */
191 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map,
192 				 QED_RDMA_MAX_XRCDS, "XRCD");
193 	if (rc) {
194 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
195 			   "Failed to allocate xrcd_map,rc = %d\n", rc);
196 		goto free_pd_map;
197 	}
198 
199 	/* Allocate DPI bitmap */
200 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
201 				 p_hwfn->dpi_count, "DPI");
202 	if (rc) {
203 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
204 			   "Failed to allocate DPI bitmap, rc = %d\n", rc);
205 		goto free_xrcd_map;
206 	}
207 
208 	/* Allocate bitmap for cq's. The maximum number of CQs is bound to
209 	 * the number of connections we support. (num_qps in iWARP or
210 	 * num_qps/2 in RoCE).
211 	 */
212 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ");
213 	if (rc) {
214 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
215 			   "Failed to allocate cq bitmap, rc = %d\n", rc);
216 		goto free_dpi_map;
217 	}
218 
219 	/* Allocate bitmap for toggle bit for cq icids
220 	 * We toggle the bit every time we create or resize cq for a given icid.
221 	 * Size needs to equal the size of the cq bmap.
222 	 */
223 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
224 				 num_cons, "Toggle");
225 	if (rc) {
226 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
227 			   "Failed to allocate toggle bits, rc = %d\n", rc);
228 		goto free_cq_map;
229 	}
230 
231 	/* Allocate bitmap for itids */
232 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
233 				 p_rdma_info->num_mrs, "MR");
234 	if (rc) {
235 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
236 			   "Failed to allocate itids bitmaps, rc = %d\n", rc);
237 		goto free_toggle_map;
238 	}
239 
240 	/* Allocate bitmap for cids used for qps. */
241 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
242 				 "CID");
243 	if (rc) {
244 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
245 			   "Failed to allocate cid bitmap, rc = %d\n", rc);
246 		goto free_tid_map;
247 	}
248 
249 	/* Allocate bitmap for cids used for responders/requesters. */
250 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
251 				 "REAL_CID");
252 	if (rc) {
253 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
254 			   "Failed to allocate real cid bitmap, rc = %d\n", rc);
255 		goto free_cid_map;
256 	}
257 
258 	/* The first SRQ follows the last XRC SRQ. This means that the
259 	 * SRQ IDs start from an offset equals to max_xrc_srqs.
260 	 */
261 	p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count;
262 	rc = qed_rdma_bmap_alloc(p_hwfn,
263 				 &p_rdma_info->xrc_srq_map,
264 				 p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ");
265 	if (rc) {
266 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
267 			   "Failed to allocate xrc srq bitmap, rc = %d\n", rc);
268 		goto free_real_cid_map;
269 	}
270 
271 	/* Allocate bitmap for srqs */
272 	p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count;
273 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
274 				 p_rdma_info->num_srqs, "SRQ");
275 	if (rc) {
276 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
277 			   "Failed to allocate srq bitmap, rc = %d\n", rc);
278 		goto free_xrc_srq_map;
279 	}
280 
281 	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
282 		rc = qed_iwarp_alloc(p_hwfn);
283 
284 	if (rc)
285 		goto free_srq_map;
286 
287 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
288 	return 0;
289 
290 free_srq_map:
291 	kfree(p_rdma_info->srq_map.bitmap);
292 free_xrc_srq_map:
293 	kfree(p_rdma_info->xrc_srq_map.bitmap);
294 free_real_cid_map:
295 	kfree(p_rdma_info->real_cid_map.bitmap);
296 free_cid_map:
297 	kfree(p_rdma_info->cid_map.bitmap);
298 free_tid_map:
299 	kfree(p_rdma_info->tid_map.bitmap);
300 free_toggle_map:
301 	kfree(p_rdma_info->toggle_bits.bitmap);
302 free_cq_map:
303 	kfree(p_rdma_info->cq_map.bitmap);
304 free_dpi_map:
305 	kfree(p_rdma_info->dpi_map.bitmap);
306 free_xrcd_map:
307 	kfree(p_rdma_info->xrcd_map.bitmap);
308 free_pd_map:
309 	kfree(p_rdma_info->pd_map.bitmap);
310 free_rdma_port:
311 	kfree(p_rdma_info->port);
312 free_rdma_dev:
313 	kfree(p_rdma_info->dev);
314 
315 	return rc;
316 }
317 
318 void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
319 			struct qed_bmap *bmap, bool check)
320 {
321 	int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
322 	int last_line = bmap->max_count / (64 * 8);
323 	int last_item = last_line * 8 +
324 	    DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
325 	u64 *pmap = (u64 *)bmap->bitmap;
326 	int line, item, offset;
327 	u8 str_last_line[200] = { 0 };
328 
329 	if (!weight || !check)
330 		goto end;
331 
332 	DP_NOTICE(p_hwfn,
333 		  "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
334 		  bmap->name, bmap->max_count, weight);
335 
336 	/* print aligned non-zero lines, if any */
337 	for (item = 0, line = 0; line < last_line; line++, item += 8)
338 		if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
339 			DP_NOTICE(p_hwfn,
340 				  "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
341 				  line,
342 				  pmap[item],
343 				  pmap[item + 1],
344 				  pmap[item + 2],
345 				  pmap[item + 3],
346 				  pmap[item + 4],
347 				  pmap[item + 5],
348 				  pmap[item + 6], pmap[item + 7]);
349 
350 	/* print last unaligned non-zero line, if any */
351 	if ((bmap->max_count % (64 * 8)) &&
352 	    (bitmap_weight((unsigned long *)&pmap[item],
353 			   bmap->max_count - item * 64))) {
354 		offset = sprintf(str_last_line, "line 0x%04x: ", line);
355 		for (; item < last_item; item++)
356 			offset += sprintf(str_last_line + offset,
357 					  "0x%016llx ", pmap[item]);
358 		DP_NOTICE(p_hwfn, "%s\n", str_last_line);
359 	}
360 
361 end:
362 	kfree(bmap->bitmap);
363 	bmap->bitmap = NULL;
364 }
365 
366 static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
367 {
368 	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
369 
370 	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
371 		qed_iwarp_resc_free(p_hwfn);
372 
373 	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
374 	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
375 	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
376 	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
377 	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
378 	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
379 	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
380 	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1);
381 	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1);
382 	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, 1);
383 
384 	kfree(p_rdma_info->port);
385 	kfree(p_rdma_info->dev);
386 }
387 
388 static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
389 {
390 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
391 
392 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
393 
394 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
395 	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
396 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
397 }
398 
399 static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
400 {
401 	qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
402 }
403 
404 static void qed_rdma_free(struct qed_hwfn *p_hwfn)
405 {
406 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
407 
408 	qed_rdma_free_reserved_lkey(p_hwfn);
409 	qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto);
410 	qed_rdma_resc_free(p_hwfn);
411 }
412 
413 static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
414 {
415 	guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
416 	guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
417 	guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
418 	guid[3] = 0xff;
419 	guid[4] = 0xfe;
420 	guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
421 	guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
422 	guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
423 }
424 
425 static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
426 				 struct qed_rdma_start_in_params *params)
427 {
428 	struct qed_rdma_events *events;
429 
430 	events = &p_hwfn->p_rdma_info->events;
431 
432 	events->unaffiliated_event = params->events->unaffiliated_event;
433 	events->affiliated_event = params->events->affiliated_event;
434 	events->context = params->events->context;
435 }
436 
437 static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
438 				  struct qed_rdma_start_in_params *params)
439 {
440 	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
441 	struct qed_dev *cdev = p_hwfn->cdev;
442 	u32 pci_status_control;
443 	u32 num_qps;
444 
445 	/* Vendor specific information */
446 	dev->vendor_id = cdev->vendor_id;
447 	dev->vendor_part_id = cdev->device_id;
448 	dev->hw_ver = cdev->chip_rev;
449 	dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
450 		      (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
451 
452 	qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
453 	dev->node_guid = dev->sys_image_guid;
454 
455 	dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
456 			     RDMA_MAX_SGE_PER_RQ_WQE);
457 
458 	if (cdev->rdma_max_sge)
459 		dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
460 
461 	dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE;
462 	if (p_hwfn->cdev->rdma_max_srq_sge) {
463 		dev->max_srq_sge = min_t(u32,
464 					 p_hwfn->cdev->rdma_max_srq_sge,
465 					 dev->max_srq_sge);
466 	}
467 	dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
468 
469 	dev->max_inline = (cdev->rdma_max_inline) ?
470 			  min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
471 			  dev->max_inline;
472 
473 	dev->max_wqe = QED_RDMA_MAX_WQE;
474 	dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
475 
476 	/* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
477 	 * it is up-aligned to 16 and then to ILT page size within qed cxt.
478 	 * This is OK in terms of ILT but we don't want to configure the FW
479 	 * above its abilities
480 	 */
481 	num_qps = ROCE_MAX_QPS;
482 	num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
483 	dev->max_qp = num_qps;
484 
485 	/* CQs uses the same icids that QPs use hence they are limited by the
486 	 * number of icids. There are two icids per QP.
487 	 */
488 	dev->max_cq = num_qps * 2;
489 
490 	/* The number of mrs is smaller by 1 since the first is reserved */
491 	dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
492 	dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
493 
494 	/* The maximum CQE capacity per CQ supported.
495 	 * max number of cqes will be in two layer pbl,
496 	 * 8 is the pointer size in bytes
497 	 * 32 is the size of cq element in bytes
498 	 */
499 	if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
500 		dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
501 	else
502 		dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
503 
504 	dev->max_mw = 0;
505 	dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
506 	dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
507 	dev->max_pkey = QED_RDMA_MAX_P_KEY;
508 
509 	dev->max_srq = p_hwfn->p_rdma_info->num_srqs;
510 	dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM;
511 	dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
512 					  (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
513 	dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
514 					 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
515 	dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
516 					   p_hwfn->p_rdma_info->num_qps;
517 	dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
518 	dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
519 	dev->max_pd = RDMA_MAX_PDS;
520 	dev->max_ah = p_hwfn->p_rdma_info->num_qps;
521 	dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
522 
523 	/* Set capablities */
524 	dev->dev_caps = 0;
525 	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
526 	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
527 	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
528 	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
529 	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
530 	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
531 	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
532 	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
533 
534 	/* Check atomic operations support in PCI configuration space. */
535 	pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2,
536 				   &pci_status_control);
537 
538 	if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
539 		SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
540 
541 	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
542 		qed_iwarp_init_devinfo(p_hwfn);
543 }
544 
545 static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
546 {
547 	struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
548 	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
549 
550 	port->port_state = p_hwfn->mcp_info->link_output.link_up ?
551 			   QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
552 
553 	port->max_msg_size = min_t(u64,
554 				   (dev->max_mr_mw_fmr_size *
555 				    p_hwfn->cdev->rdma_max_sge),
556 				   BIT(31));
557 
558 	port->pkey_bad_counter = 0;
559 }
560 
561 static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
562 {
563 	int rc = 0;
564 
565 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
566 	p_hwfn->b_rdma_enabled_in_prs = false;
567 
568 	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
569 		qed_iwarp_init_hw(p_hwfn, p_ptt);
570 	else
571 		rc = qed_roce_init_hw(p_hwfn, p_ptt);
572 
573 	return rc;
574 }
575 
576 static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
577 			     struct qed_rdma_start_in_params *params,
578 			     struct qed_ptt *p_ptt)
579 {
580 	struct rdma_init_func_ramrod_data *p_ramrod;
581 	struct qed_rdma_cnq_params *p_cnq_pbl_list;
582 	struct rdma_init_func_hdr *p_params_header;
583 	struct rdma_cnq_params *p_cnq_params;
584 	struct qed_sp_init_data init_data;
585 	struct qed_spq_entry *p_ent;
586 	u32 cnq_id, sb_id;
587 	u16 igu_sb_id;
588 	int rc;
589 
590 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
591 
592 	/* Save the number of cnqs for the function close ramrod */
593 	p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
594 
595 	/* Get SPQ entry */
596 	memset(&init_data, 0, sizeof(init_data));
597 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
598 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
599 
600 	rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
601 				 p_hwfn->p_rdma_info->proto, &init_data);
602 	if (rc)
603 		return rc;
604 
605 	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
606 		qed_iwarp_init_fw_ramrod(p_hwfn,
607 					 &p_ent->ramrod.iwarp_init_func);
608 		p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
609 	} else {
610 		p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
611 	}
612 
613 	p_params_header = &p_ramrod->params_header;
614 	p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
615 							   QED_RDMA_CNQ_RAM);
616 	p_params_header->num_cnqs = params->desired_cnq;
617 	p_params_header->first_reg_srq_id =
618 	    cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset);
619 	p_params_header->reg_srq_base_addr =
620 	    cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM));
621 	if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
622 		p_params_header->cq_ring_mode = 1;
623 	else
624 		p_params_header->cq_ring_mode = 0;
625 
626 	for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
627 		sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
628 		igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
629 		p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
630 		p_cnq_params = &p_ramrod->cnq_params[cnq_id];
631 		p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
632 
633 		p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
634 		p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
635 
636 		DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
637 			       p_cnq_pbl_list->pbl_ptr);
638 
639 		/* we assume here that cnq_id and qz_offset are the same */
640 		p_cnq_params->queue_zone_num =
641 			cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
642 				    cnq_id);
643 	}
644 
645 	return qed_spq_post(p_hwfn, p_ent, NULL);
646 }
647 
648 static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
649 {
650 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
651 	int rc;
652 
653 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
654 
655 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
656 	rc = qed_rdma_bmap_alloc_id(p_hwfn,
657 				    &p_hwfn->p_rdma_info->tid_map, itid);
658 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
659 	if (rc)
660 		goto out;
661 
662 	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
663 out:
664 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
665 	return rc;
666 }
667 
668 static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
669 {
670 	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
671 
672 	/* Tid 0 will be used as the key for "reserved MR".
673 	 * The driver should allocate memory for it so it can be loaded but no
674 	 * ramrod should be passed on it.
675 	 */
676 	qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
677 	if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
678 		DP_NOTICE(p_hwfn,
679 			  "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
680 		return -EINVAL;
681 	}
682 
683 	return 0;
684 }
685 
686 static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
687 			  struct qed_ptt *p_ptt,
688 			  struct qed_rdma_start_in_params *params)
689 {
690 	int rc;
691 
692 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
693 
694 	qed_rdma_init_devinfo(p_hwfn, params);
695 	qed_rdma_init_port(p_hwfn);
696 	qed_rdma_init_events(p_hwfn, params);
697 
698 	rc = qed_rdma_reserve_lkey(p_hwfn);
699 	if (rc)
700 		return rc;
701 
702 	rc = qed_rdma_init_hw(p_hwfn, p_ptt);
703 	if (rc)
704 		return rc;
705 
706 	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
707 		rc = qed_iwarp_setup(p_hwfn, params);
708 		if (rc)
709 			return rc;
710 	} else {
711 		rc = qed_roce_setup(p_hwfn);
712 		if (rc)
713 			return rc;
714 	}
715 
716 	return qed_rdma_start_fw(p_hwfn, params, p_ptt);
717 }
718 
719 static int qed_rdma_stop(void *rdma_cxt)
720 {
721 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
722 	struct rdma_close_func_ramrod_data *p_ramrod;
723 	struct qed_sp_init_data init_data;
724 	struct qed_spq_entry *p_ent;
725 	struct qed_ptt *p_ptt;
726 	u32 ll2_ethertype_en;
727 	int rc = -EBUSY;
728 
729 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
730 
731 	p_ptt = qed_ptt_acquire(p_hwfn);
732 	if (!p_ptt) {
733 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
734 		return rc;
735 	}
736 
737 	/* Disable RoCE search */
738 	qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
739 	p_hwfn->b_rdma_enabled_in_prs = false;
740 	p_hwfn->p_rdma_info->active = 0;
741 	qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
742 
743 	ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
744 
745 	qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
746 	       (ll2_ethertype_en & 0xFFFE));
747 
748 	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
749 		rc = qed_iwarp_stop(p_hwfn);
750 		if (rc) {
751 			qed_ptt_release(p_hwfn, p_ptt);
752 			return rc;
753 		}
754 	} else {
755 		qed_roce_stop(p_hwfn);
756 	}
757 
758 	qed_ptt_release(p_hwfn, p_ptt);
759 
760 	/* Get SPQ entry */
761 	memset(&init_data, 0, sizeof(init_data));
762 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
763 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
764 
765 	/* Stop RoCE */
766 	rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
767 				 p_hwfn->p_rdma_info->proto, &init_data);
768 	if (rc)
769 		goto out;
770 
771 	p_ramrod = &p_ent->ramrod.rdma_close_func;
772 
773 	p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
774 	p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
775 
776 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
777 
778 out:
779 	qed_rdma_free(p_hwfn);
780 
781 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
782 	return rc;
783 }
784 
785 static int qed_rdma_add_user(void *rdma_cxt,
786 			     struct qed_rdma_add_user_out_params *out_params)
787 {
788 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
789 	u32 dpi_start_offset;
790 	u32 returned_id = 0;
791 	int rc;
792 
793 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
794 
795 	/* Allocate DPI */
796 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
797 	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
798 				    &returned_id);
799 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
800 
801 	out_params->dpi = (u16)returned_id;
802 
803 	/* Calculate the corresponding DPI address */
804 	dpi_start_offset = p_hwfn->dpi_start_offset;
805 
806 	out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset +
807 			       out_params->dpi * p_hwfn->dpi_size;
808 
809 	out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
810 				    dpi_start_offset +
811 				    ((out_params->dpi) * p_hwfn->dpi_size);
812 
813 	out_params->dpi_size = p_hwfn->dpi_size;
814 	out_params->wid_count = p_hwfn->wid_count;
815 
816 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
817 	return rc;
818 }
819 
820 static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
821 {
822 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
823 	struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
824 	struct qed_mcp_link_state *p_link_output;
825 
826 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
827 
828 	/* The link state is saved only for the leading hwfn */
829 	p_link_output = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
830 
831 	p_port->port_state = p_link_output->link_up ? QED_RDMA_PORT_UP
832 	    : QED_RDMA_PORT_DOWN;
833 
834 	p_port->link_speed = p_link_output->speed;
835 
836 	p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
837 
838 	return p_port;
839 }
840 
841 static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
842 {
843 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
844 
845 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
846 
847 	/* Return struct with device parameters */
848 	return p_hwfn->p_rdma_info->dev;
849 }
850 
851 static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
852 {
853 	struct qed_hwfn *p_hwfn;
854 	u16 qz_num;
855 	u32 addr;
856 
857 	p_hwfn = (struct qed_hwfn *)rdma_cxt;
858 
859 	if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
860 		DP_NOTICE(p_hwfn,
861 			  "queue zone offset %d is too large (max is %d)\n",
862 			  qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
863 		return;
864 	}
865 
866 	qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
867 	addr = GTT_BAR0_MAP_REG_USDM_RAM +
868 	       USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
869 
870 	REG_WR16(p_hwfn, addr, prod);
871 
872 	/* keep prod updates ordered */
873 	wmb();
874 }
875 
876 static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
877 				  struct qed_dev_rdma_info *info)
878 {
879 	struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
880 
881 	memset(info, 0, sizeof(*info));
882 
883 	info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ?
884 	    QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP;
885 
886 	info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
887 
888 	qed_fill_dev_info(cdev, &info->common);
889 
890 	return 0;
891 }
892 
893 static int qed_rdma_get_sb_start(struct qed_dev *cdev)
894 {
895 	int feat_num;
896 
897 	if (cdev->num_hwfns > 1)
898 		feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE);
899 	else
900 		feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE) *
901 			   cdev->num_hwfns;
902 
903 	return feat_num;
904 }
905 
906 static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
907 {
908 	int n_cnq = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_RDMA_CNQ);
909 	int n_msix = cdev->int_params.rdma_msix_cnt;
910 
911 	return min_t(int, n_cnq, n_msix);
912 }
913 
914 static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
915 {
916 	int limit = 0;
917 
918 	/* Mark the fastpath as free/used */
919 	cdev->int_params.fp_initialized = cnt ? true : false;
920 
921 	if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
922 		DP_ERR(cdev,
923 		       "qed roce supports only MSI-X interrupts (detected %d).\n",
924 		       cdev->int_params.out.int_mode);
925 		return -EINVAL;
926 	} else if (cdev->int_params.fp_msix_cnt) {
927 		limit = cdev->int_params.rdma_msix_cnt;
928 	}
929 
930 	if (!limit)
931 		return -ENOMEM;
932 
933 	return min_t(int, cnt, limit);
934 }
935 
936 static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
937 {
938 	memset(info, 0, sizeof(*info));
939 
940 	if (!cdev->int_params.fp_initialized) {
941 		DP_INFO(cdev,
942 			"Protocol driver requested interrupt information, but its support is not yet configured\n");
943 		return -EINVAL;
944 	}
945 
946 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
947 		int msix_base = cdev->int_params.rdma_msix_base;
948 
949 		info->msix_cnt = cdev->int_params.rdma_msix_cnt;
950 		info->msix = &cdev->int_params.msix_table[msix_base];
951 
952 		DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
953 			   info->msix_cnt, msix_base);
954 	}
955 
956 	return 0;
957 }
958 
959 static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
960 {
961 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
962 	u32 returned_id;
963 	int rc;
964 
965 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
966 
967 	/* Allocates an unused protection domain */
968 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
969 	rc = qed_rdma_bmap_alloc_id(p_hwfn,
970 				    &p_hwfn->p_rdma_info->pd_map, &returned_id);
971 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
972 
973 	*pd = (u16)returned_id;
974 
975 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
976 	return rc;
977 }
978 
979 static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
980 {
981 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
982 
983 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
984 
985 	/* Returns a previously allocated protection domain for reuse */
986 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
987 	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
988 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
989 }
990 
991 static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id)
992 {
993 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
994 	u32 returned_id;
995 	int rc;
996 
997 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n");
998 
999 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1000 	rc = qed_rdma_bmap_alloc_id(p_hwfn,
1001 				    &p_hwfn->p_rdma_info->xrcd_map,
1002 				    &returned_id);
1003 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1004 	if (rc) {
1005 		DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n");
1006 		return rc;
1007 	}
1008 
1009 	*xrcd_id = (u16)returned_id;
1010 
1011 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc);
1012 	return rc;
1013 }
1014 
1015 static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id)
1016 {
1017 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1018 
1019 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id);
1020 
1021 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1022 	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id);
1023 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1024 }
1025 
1026 static enum qed_rdma_toggle_bit
1027 qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
1028 {
1029 	struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
1030 	enum qed_rdma_toggle_bit toggle_bit;
1031 	u32 bmap_id;
1032 
1033 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
1034 
1035 	/* the function toggle the bit that is related to a given icid
1036 	 * and returns the new toggle bit's value
1037 	 */
1038 	bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
1039 
1040 	spin_lock_bh(&p_info->lock);
1041 	toggle_bit = !test_and_change_bit(bmap_id,
1042 					  p_info->toggle_bits.bitmap);
1043 	spin_unlock_bh(&p_info->lock);
1044 
1045 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
1046 		   toggle_bit);
1047 
1048 	return toggle_bit;
1049 }
1050 
1051 static int qed_rdma_create_cq(void *rdma_cxt,
1052 			      struct qed_rdma_create_cq_in_params *params,
1053 			      u16 *icid)
1054 {
1055 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1056 	struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
1057 	struct rdma_create_cq_ramrod_data *p_ramrod;
1058 	enum qed_rdma_toggle_bit toggle_bit;
1059 	struct qed_sp_init_data init_data;
1060 	struct qed_spq_entry *p_ent;
1061 	u32 returned_id, start_cid;
1062 	int rc;
1063 
1064 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
1065 		   params->cq_handle_hi, params->cq_handle_lo);
1066 
1067 	/* Allocate icid */
1068 	spin_lock_bh(&p_info->lock);
1069 	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
1070 	spin_unlock_bh(&p_info->lock);
1071 
1072 	if (rc) {
1073 		DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
1074 		return rc;
1075 	}
1076 
1077 	start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
1078 						p_info->proto);
1079 	*icid = returned_id + start_cid;
1080 
1081 	/* Check if icid requires a page allocation */
1082 	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
1083 	if (rc)
1084 		goto err;
1085 
1086 	/* Get SPQ entry */
1087 	memset(&init_data, 0, sizeof(init_data));
1088 	init_data.cid = *icid;
1089 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1090 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1091 
1092 	/* Send create CQ ramrod */
1093 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1094 				 RDMA_RAMROD_CREATE_CQ,
1095 				 p_info->proto, &init_data);
1096 	if (rc)
1097 		goto err;
1098 
1099 	p_ramrod = &p_ent->ramrod.rdma_create_cq;
1100 
1101 	p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
1102 	p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
1103 	p_ramrod->dpi = cpu_to_le16(params->dpi);
1104 	p_ramrod->is_two_level_pbl = params->pbl_two_level;
1105 	p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
1106 	DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
1107 	p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
1108 	p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
1109 			   params->cnq_id;
1110 	p_ramrod->int_timeout = cpu_to_le16(params->int_timeout);
1111 
1112 	/* toggle the bit for every resize or create cq for a given icid */
1113 	toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1114 
1115 	p_ramrod->toggle_bit = toggle_bit;
1116 
1117 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1118 	if (rc) {
1119 		/* restore toggle bit */
1120 		qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1121 		goto err;
1122 	}
1123 
1124 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
1125 	return rc;
1126 
1127 err:
1128 	/* release allocated icid */
1129 	spin_lock_bh(&p_info->lock);
1130 	qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
1131 	spin_unlock_bh(&p_info->lock);
1132 	DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
1133 
1134 	return rc;
1135 }
1136 
1137 static int
1138 qed_rdma_destroy_cq(void *rdma_cxt,
1139 		    struct qed_rdma_destroy_cq_in_params *in_params,
1140 		    struct qed_rdma_destroy_cq_out_params *out_params)
1141 {
1142 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1143 	struct rdma_destroy_cq_output_params *p_ramrod_res;
1144 	struct rdma_destroy_cq_ramrod_data *p_ramrod;
1145 	struct qed_sp_init_data init_data;
1146 	struct qed_spq_entry *p_ent;
1147 	dma_addr_t ramrod_res_phys;
1148 	enum protocol_type proto;
1149 	int rc = -ENOMEM;
1150 
1151 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
1152 
1153 	p_ramrod_res =
1154 	    (struct rdma_destroy_cq_output_params *)
1155 	    dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1156 			       sizeof(struct rdma_destroy_cq_output_params),
1157 			       &ramrod_res_phys, GFP_KERNEL);
1158 	if (!p_ramrod_res) {
1159 		DP_NOTICE(p_hwfn,
1160 			  "qed destroy cq failed: cannot allocate memory (ramrod)\n");
1161 		return rc;
1162 	}
1163 
1164 	/* Get SPQ entry */
1165 	memset(&init_data, 0, sizeof(init_data));
1166 	init_data.cid = in_params->icid;
1167 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1168 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1169 	proto = p_hwfn->p_rdma_info->proto;
1170 	/* Send destroy CQ ramrod */
1171 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1172 				 RDMA_RAMROD_DESTROY_CQ,
1173 				 proto, &init_data);
1174 	if (rc)
1175 		goto err;
1176 
1177 	p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
1178 	DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1179 
1180 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1181 	if (rc)
1182 		goto err;
1183 
1184 	out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
1185 
1186 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1187 			  sizeof(struct rdma_destroy_cq_output_params),
1188 			  p_ramrod_res, ramrod_res_phys);
1189 
1190 	/* Free icid */
1191 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1192 
1193 	qed_bmap_release_id(p_hwfn,
1194 			    &p_hwfn->p_rdma_info->cq_map,
1195 			    (in_params->icid -
1196 			     qed_cxt_get_proto_cid_start(p_hwfn, proto)));
1197 
1198 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1199 
1200 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1201 	return rc;
1202 
1203 err:	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1204 			  sizeof(struct rdma_destroy_cq_output_params),
1205 			  p_ramrod_res, ramrod_res_phys);
1206 
1207 	return rc;
1208 }
1209 
1210 void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac)
1211 {
1212 	p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
1213 	p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
1214 	p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
1215 }
1216 
1217 static int qed_rdma_query_qp(void *rdma_cxt,
1218 			     struct qed_rdma_qp *qp,
1219 			     struct qed_rdma_query_qp_out_params *out_params)
1220 {
1221 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1222 	int rc = 0;
1223 
1224 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1225 
1226 	/* The following fields are filled in from qp and not FW as they can't
1227 	 * be modified by FW
1228 	 */
1229 	out_params->mtu = qp->mtu;
1230 	out_params->dest_qp = qp->dest_qp;
1231 	out_params->incoming_atomic_en = qp->incoming_atomic_en;
1232 	out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
1233 	out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
1234 	out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
1235 	out_params->dgid = qp->dgid;
1236 	out_params->flow_label = qp->flow_label;
1237 	out_params->hop_limit_ttl = qp->hop_limit_ttl;
1238 	out_params->traffic_class_tos = qp->traffic_class_tos;
1239 	out_params->timeout = qp->ack_timeout;
1240 	out_params->rnr_retry = qp->rnr_retry_cnt;
1241 	out_params->retry_cnt = qp->retry_cnt;
1242 	out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
1243 	out_params->pkey_index = 0;
1244 	out_params->max_rd_atomic = qp->max_rd_atomic_req;
1245 	out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
1246 	out_params->sqd_async = qp->sqd_async;
1247 
1248 	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
1249 		qed_iwarp_query_qp(qp, out_params);
1250 	else
1251 		rc = qed_roce_query_qp(p_hwfn, qp, out_params);
1252 
1253 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
1254 	return rc;
1255 }
1256 
1257 static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
1258 {
1259 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1260 	int rc = 0;
1261 
1262 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1263 
1264 	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
1265 		rc = qed_iwarp_destroy_qp(p_hwfn, qp);
1266 	else
1267 		rc = qed_roce_destroy_qp(p_hwfn, qp);
1268 
1269 	/* free qp params struct */
1270 	kfree(qp);
1271 
1272 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
1273 	return rc;
1274 }
1275 
1276 static struct qed_rdma_qp *
1277 qed_rdma_create_qp(void *rdma_cxt,
1278 		   struct qed_rdma_create_qp_in_params *in_params,
1279 		   struct qed_rdma_create_qp_out_params *out_params)
1280 {
1281 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1282 	struct qed_rdma_qp *qp;
1283 	u8 max_stats_queues;
1284 	int rc;
1285 
1286 	if (!rdma_cxt || !in_params || !out_params ||
1287 	    !p_hwfn->p_rdma_info->active) {
1288 		DP_ERR(p_hwfn->cdev,
1289 		       "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
1290 		       rdma_cxt, in_params, out_params);
1291 		return NULL;
1292 	}
1293 
1294 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1295 		   "qed rdma create qp called with qp_handle = %08x%08x\n",
1296 		   in_params->qp_handle_hi, in_params->qp_handle_lo);
1297 
1298 	/* Some sanity checks... */
1299 	max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
1300 	if (in_params->stats_queue >= max_stats_queues) {
1301 		DP_ERR(p_hwfn->cdev,
1302 		       "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
1303 		       in_params->stats_queue, max_stats_queues);
1304 		return NULL;
1305 	}
1306 
1307 	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
1308 		if (in_params->sq_num_pages * sizeof(struct regpair) >
1309 		    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) {
1310 			DP_NOTICE(p_hwfn->cdev,
1311 				  "Sq num pages: %d exceeds maximum\n",
1312 				  in_params->sq_num_pages);
1313 			return NULL;
1314 		}
1315 		if (in_params->rq_num_pages * sizeof(struct regpair) >
1316 		    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) {
1317 			DP_NOTICE(p_hwfn->cdev,
1318 				  "Rq num pages: %d exceeds maximum\n",
1319 				  in_params->rq_num_pages);
1320 			return NULL;
1321 		}
1322 	}
1323 
1324 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1325 	if (!qp)
1326 		return NULL;
1327 
1328 	qp->cur_state = QED_ROCE_QP_STATE_RESET;
1329 	qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
1330 	qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
1331 	qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
1332 	qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
1333 	qp->use_srq = in_params->use_srq;
1334 	qp->signal_all = in_params->signal_all;
1335 	qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
1336 	qp->pd = in_params->pd;
1337 	qp->dpi = in_params->dpi;
1338 	qp->sq_cq_id = in_params->sq_cq_id;
1339 	qp->sq_num_pages = in_params->sq_num_pages;
1340 	qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
1341 	qp->rq_cq_id = in_params->rq_cq_id;
1342 	qp->rq_num_pages = in_params->rq_num_pages;
1343 	qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
1344 	qp->srq_id = in_params->srq_id;
1345 	qp->req_offloaded = false;
1346 	qp->resp_offloaded = false;
1347 	qp->e2e_flow_control_en = qp->use_srq ? false : true;
1348 	qp->stats_queue = in_params->stats_queue;
1349 	qp->qp_type = in_params->qp_type;
1350 	qp->xrcd_id = in_params->xrcd_id;
1351 
1352 	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
1353 		rc = qed_iwarp_create_qp(p_hwfn, qp, out_params);
1354 		qp->qpid = qp->icid;
1355 	} else {
1356 		qp->edpm_mode = GET_FIELD(in_params->flags, QED_ROCE_EDPM_MODE);
1357 		rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
1358 		qp->qpid = ((0xFF << 16) | qp->icid);
1359 	}
1360 
1361 	if (rc) {
1362 		kfree(qp);
1363 		return NULL;
1364 	}
1365 
1366 	out_params->icid = qp->icid;
1367 	out_params->qp_id = qp->qpid;
1368 
1369 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
1370 	return qp;
1371 }
1372 
1373 static int qed_rdma_modify_qp(void *rdma_cxt,
1374 			      struct qed_rdma_qp *qp,
1375 			      struct qed_rdma_modify_qp_in_params *params)
1376 {
1377 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1378 	enum qed_roce_qp_state prev_state;
1379 	int rc = 0;
1380 
1381 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
1382 		   qp->icid, params->new_state);
1383 
1384 	if (rc) {
1385 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1386 		return rc;
1387 	}
1388 
1389 	if (GET_FIELD(params->modify_flags,
1390 		      QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
1391 		qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
1392 		qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
1393 		qp->incoming_atomic_en = params->incoming_atomic_en;
1394 	}
1395 
1396 	/* Update QP structure with the updated values */
1397 	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
1398 		qp->roce_mode = params->roce_mode;
1399 	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
1400 		qp->pkey = params->pkey;
1401 	if (GET_FIELD(params->modify_flags,
1402 		      QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
1403 		qp->e2e_flow_control_en = params->e2e_flow_control_en;
1404 	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
1405 		qp->dest_qp = params->dest_qp;
1406 	if (GET_FIELD(params->modify_flags,
1407 		      QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
1408 		/* Indicates that the following parameters have changed:
1409 		 * Traffic class, flow label, hop limit, source GID,
1410 		 * destination GID, loopback indicator
1411 		 */
1412 		qp->traffic_class_tos = params->traffic_class_tos;
1413 		qp->flow_label = params->flow_label;
1414 		qp->hop_limit_ttl = params->hop_limit_ttl;
1415 
1416 		qp->sgid = params->sgid;
1417 		qp->dgid = params->dgid;
1418 		qp->udp_src_port = 0;
1419 		qp->vlan_id = params->vlan_id;
1420 		qp->mtu = params->mtu;
1421 		qp->lb_indication = params->lb_indication;
1422 		memcpy((u8 *)&qp->remote_mac_addr[0],
1423 		       (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
1424 		if (params->use_local_mac) {
1425 			memcpy((u8 *)&qp->local_mac_addr[0],
1426 			       (u8 *)&params->local_mac_addr[0], ETH_ALEN);
1427 		} else {
1428 			memcpy((u8 *)&qp->local_mac_addr[0],
1429 			       (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1430 		}
1431 	}
1432 	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
1433 		qp->rq_psn = params->rq_psn;
1434 	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
1435 		qp->sq_psn = params->sq_psn;
1436 	if (GET_FIELD(params->modify_flags,
1437 		      QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
1438 		qp->max_rd_atomic_req = params->max_rd_atomic_req;
1439 	if (GET_FIELD(params->modify_flags,
1440 		      QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
1441 		qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
1442 	if (GET_FIELD(params->modify_flags,
1443 		      QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
1444 		qp->ack_timeout = params->ack_timeout;
1445 	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
1446 		qp->retry_cnt = params->retry_cnt;
1447 	if (GET_FIELD(params->modify_flags,
1448 		      QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
1449 		qp->rnr_retry_cnt = params->rnr_retry_cnt;
1450 	if (GET_FIELD(params->modify_flags,
1451 		      QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
1452 		qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
1453 
1454 	qp->sqd_async = params->sqd_async;
1455 
1456 	prev_state = qp->cur_state;
1457 	if (GET_FIELD(params->modify_flags,
1458 		      QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
1459 		qp->cur_state = params->new_state;
1460 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
1461 			   qp->cur_state);
1462 	}
1463 
1464 	switch (qp->qp_type) {
1465 	case QED_RDMA_QP_TYPE_XRC_INI:
1466 		qp->has_req = 1;
1467 		break;
1468 	case QED_RDMA_QP_TYPE_XRC_TGT:
1469 		qp->has_resp = 1;
1470 		break;
1471 	default:
1472 		qp->has_req = 1;
1473 		qp->has_resp = 1;
1474 	}
1475 
1476 	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
1477 		enum qed_iwarp_qp_state new_state =
1478 		    qed_roce2iwarp_state(qp->cur_state);
1479 
1480 		rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0);
1481 	} else {
1482 		rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
1483 	}
1484 
1485 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
1486 	return rc;
1487 }
1488 
1489 static int
1490 qed_rdma_register_tid(void *rdma_cxt,
1491 		      struct qed_rdma_register_tid_in_params *params)
1492 {
1493 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1494 	struct rdma_register_tid_ramrod_data *p_ramrod;
1495 	struct qed_sp_init_data init_data;
1496 	struct qed_spq_entry *p_ent;
1497 	enum rdma_tid_type tid_type;
1498 	u8 fw_return_code;
1499 	u16 flags = 0;
1500 	int rc;
1501 
1502 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
1503 
1504 	/* Get SPQ entry */
1505 	memset(&init_data, 0, sizeof(init_data));
1506 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1507 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1508 
1509 	rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
1510 				 p_hwfn->p_rdma_info->proto, &init_data);
1511 	if (rc) {
1512 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1513 		return rc;
1514 	}
1515 
1516 	if (p_hwfn->p_rdma_info->last_tid < params->itid)
1517 		p_hwfn->p_rdma_info->last_tid = params->itid;
1518 
1519 	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
1520 		  params->pbl_two_level);
1521 
1522 	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED,
1523 		  params->zbva);
1524 
1525 	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
1526 
1527 	/* Don't initialize D/C field, as it may override other bits. */
1528 	if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
1529 		SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
1530 			  params->page_size_log - 12);
1531 
1532 	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
1533 		  params->remote_read);
1534 
1535 	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
1536 		  params->remote_write);
1537 
1538 	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
1539 		  params->remote_atomic);
1540 
1541 	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
1542 		  params->local_write);
1543 
1544 	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ,
1545 		  params->local_read);
1546 
1547 	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
1548 		  params->mw_bind);
1549 
1550 	p_ramrod = &p_ent->ramrod.rdma_register_tid;
1551 	p_ramrod->flags = cpu_to_le16(flags);
1552 
1553 	SET_FIELD(p_ramrod->flags1,
1554 		  RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
1555 		  params->pbl_page_size_log - 12);
1556 
1557 	SET_FIELD(p_ramrod->flags2, RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR,
1558 		  params->dma_mr);
1559 
1560 	switch (params->tid_type) {
1561 	case QED_RDMA_TID_REGISTERED_MR:
1562 		tid_type = RDMA_TID_REGISTERED_MR;
1563 		break;
1564 	case QED_RDMA_TID_FMR:
1565 		tid_type = RDMA_TID_FMR;
1566 		break;
1567 	case QED_RDMA_TID_MW:
1568 		tid_type = RDMA_TID_MW;
1569 		break;
1570 	default:
1571 		rc = -EINVAL;
1572 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1573 		qed_sp_destroy_request(p_hwfn, p_ent);
1574 		return rc;
1575 	}
1576 
1577 	SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE,
1578 		  tid_type);
1579 
1580 	p_ramrod->itid = cpu_to_le32(params->itid);
1581 	p_ramrod->key = params->key;
1582 	p_ramrod->pd = cpu_to_le16(params->pd);
1583 	p_ramrod->length_hi = (u8)(params->length >> 32);
1584 	p_ramrod->length_lo = DMA_LO_LE(params->length);
1585 	if (params->zbva) {
1586 		/* Lower 32 bits of the registered MR address.
1587 		 * In case of zero based MR, will hold FBO
1588 		 */
1589 		p_ramrod->va.hi = 0;
1590 		p_ramrod->va.lo = cpu_to_le32(params->fbo);
1591 	} else {
1592 		DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
1593 	}
1594 	DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
1595 
1596 	/* DIF */
1597 	if (params->dif_enabled) {
1598 		SET_FIELD(p_ramrod->flags2,
1599 			  RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
1600 		DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
1601 			       params->dif_error_addr);
1602 	}
1603 
1604 	rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1605 	if (rc)
1606 		return rc;
1607 
1608 	if (fw_return_code != RDMA_RETURN_OK) {
1609 		DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1610 		return -EINVAL;
1611 	}
1612 
1613 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
1614 	return rc;
1615 }
1616 
1617 static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
1618 {
1619 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1620 	struct rdma_deregister_tid_ramrod_data *p_ramrod;
1621 	struct qed_sp_init_data init_data;
1622 	struct qed_spq_entry *p_ent;
1623 	struct qed_ptt *p_ptt;
1624 	u8 fw_return_code;
1625 	int rc;
1626 
1627 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
1628 
1629 	/* Get SPQ entry */
1630 	memset(&init_data, 0, sizeof(init_data));
1631 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1632 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1633 
1634 	rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
1635 				 p_hwfn->p_rdma_info->proto, &init_data);
1636 	if (rc) {
1637 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1638 		return rc;
1639 	}
1640 
1641 	p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
1642 	p_ramrod->itid = cpu_to_le32(itid);
1643 
1644 	rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1645 	if (rc) {
1646 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1647 		return rc;
1648 	}
1649 
1650 	if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
1651 		DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1652 		return -EINVAL;
1653 	} else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
1654 		/* Bit indicating that the TID is in use and a nig drain is
1655 		 * required before sending the ramrod again
1656 		 */
1657 		p_ptt = qed_ptt_acquire(p_hwfn);
1658 		if (!p_ptt) {
1659 			rc = -EBUSY;
1660 			DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1661 				   "Failed to acquire PTT\n");
1662 			return rc;
1663 		}
1664 
1665 		rc = qed_mcp_drain(p_hwfn, p_ptt);
1666 		if (rc) {
1667 			qed_ptt_release(p_hwfn, p_ptt);
1668 			DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1669 				   "Drain failed\n");
1670 			return rc;
1671 		}
1672 
1673 		qed_ptt_release(p_hwfn, p_ptt);
1674 
1675 		/* Resend the ramrod */
1676 		rc = qed_sp_init_request(p_hwfn, &p_ent,
1677 					 RDMA_RAMROD_DEREGISTER_MR,
1678 					 p_hwfn->p_rdma_info->proto,
1679 					 &init_data);
1680 		if (rc) {
1681 			DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1682 				   "Failed to init sp-element\n");
1683 			return rc;
1684 		}
1685 
1686 		rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1687 		if (rc) {
1688 			DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1689 				   "Ramrod failed\n");
1690 			return rc;
1691 		}
1692 
1693 		if (fw_return_code != RDMA_RETURN_OK) {
1694 			DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
1695 				  fw_return_code);
1696 			return rc;
1697 		}
1698 	}
1699 
1700 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
1701 	return rc;
1702 }
1703 
1704 static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
1705 {
1706 	return QED_AFFIN_HWFN(cdev);
1707 }
1708 
1709 static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn,
1710 					      bool is_xrc)
1711 {
1712 	if (is_xrc)
1713 		return &p_hwfn->p_rdma_info->xrc_srq_map;
1714 
1715 	return &p_hwfn->p_rdma_info->srq_map;
1716 }
1717 
1718 static int qed_rdma_modify_srq(void *rdma_cxt,
1719 			       struct qed_rdma_modify_srq_in_params *in_params)
1720 {
1721 	struct rdma_srq_modify_ramrod_data *p_ramrod;
1722 	struct qed_sp_init_data init_data = {};
1723 	struct qed_hwfn *p_hwfn = rdma_cxt;
1724 	struct qed_spq_entry *p_ent;
1725 	u16 opaque_fid;
1726 	int rc;
1727 
1728 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1729 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1730 
1731 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1732 				 RDMA_RAMROD_MODIFY_SRQ,
1733 				 p_hwfn->p_rdma_info->proto, &init_data);
1734 	if (rc)
1735 		return rc;
1736 
1737 	p_ramrod = &p_ent->ramrod.rdma_modify_srq;
1738 	p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
1739 	opaque_fid = p_hwfn->hw_info.opaque_fid;
1740 	p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
1741 	p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit);
1742 
1743 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1744 	if (rc)
1745 		return rc;
1746 
1747 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n",
1748 		   in_params->srq_id, in_params->is_xrc);
1749 
1750 	return rc;
1751 }
1752 
1753 static int
1754 qed_rdma_destroy_srq(void *rdma_cxt,
1755 		     struct qed_rdma_destroy_srq_in_params *in_params)
1756 {
1757 	struct rdma_srq_destroy_ramrod_data *p_ramrod;
1758 	struct qed_sp_init_data init_data = {};
1759 	struct qed_hwfn *p_hwfn = rdma_cxt;
1760 	struct qed_spq_entry *p_ent;
1761 	struct qed_bmap *bmap;
1762 	u16 opaque_fid;
1763 	u16 offset;
1764 	int rc;
1765 
1766 	opaque_fid = p_hwfn->hw_info.opaque_fid;
1767 
1768 	init_data.opaque_fid = opaque_fid;
1769 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1770 
1771 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1772 				 RDMA_RAMROD_DESTROY_SRQ,
1773 				 p_hwfn->p_rdma_info->proto, &init_data);
1774 	if (rc)
1775 		return rc;
1776 
1777 	p_ramrod = &p_ent->ramrod.rdma_destroy_srq;
1778 	p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
1779 	p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
1780 
1781 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1782 	if (rc)
1783 		return rc;
1784 
1785 	bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
1786 	offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
1787 
1788 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1789 	qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset);
1790 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1791 
1792 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1793 		   "XRC/SRQ destroyed Id = %x, is_xrc=%u\n",
1794 		   in_params->srq_id, in_params->is_xrc);
1795 
1796 	return rc;
1797 }
1798 
1799 static int
1800 qed_rdma_create_srq(void *rdma_cxt,
1801 		    struct qed_rdma_create_srq_in_params *in_params,
1802 		    struct qed_rdma_create_srq_out_params *out_params)
1803 {
1804 	struct rdma_srq_create_ramrod_data *p_ramrod;
1805 	struct qed_sp_init_data init_data = {};
1806 	struct qed_hwfn *p_hwfn = rdma_cxt;
1807 	enum qed_cxt_elem_type elem_type;
1808 	struct qed_spq_entry *p_ent;
1809 	u16 opaque_fid, srq_id;
1810 	struct qed_bmap *bmap;
1811 	u32 returned_id;
1812 	u16 offset;
1813 	int rc;
1814 
1815 	bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
1816 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1817 	rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
1818 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1819 
1820 	if (rc) {
1821 		DP_NOTICE(p_hwfn,
1822 			  "failed to allocate xrc/srq id (is_xrc=%u)\n",
1823 			  in_params->is_xrc);
1824 		return rc;
1825 	}
1826 
1827 	elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ);
1828 	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
1829 	if (rc)
1830 		goto err;
1831 
1832 	opaque_fid = p_hwfn->hw_info.opaque_fid;
1833 
1834 	opaque_fid = p_hwfn->hw_info.opaque_fid;
1835 	init_data.opaque_fid = opaque_fid;
1836 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1837 
1838 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1839 				 RDMA_RAMROD_CREATE_SRQ,
1840 				 p_hwfn->p_rdma_info->proto, &init_data);
1841 	if (rc)
1842 		goto err;
1843 
1844 	p_ramrod = &p_ent->ramrod.rdma_create_srq;
1845 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr);
1846 	p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
1847 	p_ramrod->pd_id = cpu_to_le16(in_params->pd_id);
1848 	p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
1849 	p_ramrod->page_size = cpu_to_le16(in_params->page_size);
1850 	DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr);
1851 	offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
1852 	srq_id = (u16)returned_id + offset;
1853 	p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
1854 
1855 	if (in_params->is_xrc) {
1856 		SET_FIELD(p_ramrod->flags,
1857 			  RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1);
1858 		SET_FIELD(p_ramrod->flags,
1859 			  RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN,
1860 			  in_params->reserved_key_en);
1861 		p_ramrod->xrc_srq_cq_cid =
1862 			cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1863 				     in_params->cq_cid);
1864 		p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id);
1865 	}
1866 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1867 	if (rc)
1868 		goto err;
1869 
1870 	out_params->srq_id = srq_id;
1871 
1872 	DP_VERBOSE(p_hwfn,
1873 		   QED_MSG_RDMA,
1874 		   "XRC/SRQ created Id = %x (is_xrc=%u)\n",
1875 		   out_params->srq_id, in_params->is_xrc);
1876 	return rc;
1877 
1878 err:
1879 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1880 	qed_bmap_release_id(p_hwfn, bmap, returned_id);
1881 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1882 
1883 	return rc;
1884 }
1885 
1886 bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
1887 {
1888 	bool result;
1889 
1890 	/* if rdma wasn't activated yet, naturally there are no qps */
1891 	if (!p_hwfn->p_rdma_info->active)
1892 		return false;
1893 
1894 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1895 	if (!p_hwfn->p_rdma_info->cid_map.bitmap)
1896 		result = false;
1897 	else
1898 		result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map);
1899 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1900 	return result;
1901 }
1902 
1903 void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1904 {
1905 	u32 val;
1906 
1907 	val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
1908 
1909 	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
1910 	DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
1911 		   "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
1912 		   val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
1913 }
1914 
1915 
1916 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1917 {
1918 	p_hwfn->db_bar_no_edpm = true;
1919 
1920 	qed_rdma_dpm_conf(p_hwfn, p_ptt);
1921 }
1922 
1923 static int qed_rdma_start(void *rdma_cxt,
1924 			  struct qed_rdma_start_in_params *params)
1925 {
1926 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1927 	struct qed_ptt *p_ptt;
1928 	int rc = -EBUSY;
1929 
1930 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1931 		   "desired_cnq = %08x\n", params->desired_cnq);
1932 
1933 	p_ptt = qed_ptt_acquire(p_hwfn);
1934 	if (!p_ptt)
1935 		goto err;
1936 
1937 	rc = qed_rdma_alloc(p_hwfn);
1938 	if (rc)
1939 		goto err1;
1940 
1941 	rc = qed_rdma_setup(p_hwfn, p_ptt, params);
1942 	if (rc)
1943 		goto err2;
1944 
1945 	qed_ptt_release(p_hwfn, p_ptt);
1946 	p_hwfn->p_rdma_info->active = 1;
1947 
1948 	return rc;
1949 
1950 err2:
1951 	qed_rdma_free(p_hwfn);
1952 err1:
1953 	qed_ptt_release(p_hwfn, p_ptt);
1954 err:
1955 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
1956 	return rc;
1957 }
1958 
1959 static int qed_rdma_init(struct qed_dev *cdev,
1960 			 struct qed_rdma_start_in_params *params)
1961 {
1962 	return qed_rdma_start(QED_AFFIN_HWFN(cdev), params);
1963 }
1964 
1965 static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
1966 {
1967 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1968 
1969 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
1970 
1971 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1972 	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
1973 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1974 }
1975 
1976 static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
1977 				       u8 *old_mac_address,
1978 				       u8 *new_mac_address)
1979 {
1980 	int rc = 0;
1981 
1982 	if (old_mac_address)
1983 		qed_llh_remove_mac_filter(cdev, 0, old_mac_address);
1984 	if (new_mac_address)
1985 		rc = qed_llh_add_mac_filter(cdev, 0, new_mac_address);
1986 
1987 	if (rc)
1988 		DP_ERR(cdev,
1989 		       "qed roce ll2 mac filter set: failed to add MAC filter\n");
1990 
1991 	return rc;
1992 }
1993 
1994 static int qed_iwarp_set_engine_affin(struct qed_dev *cdev, bool b_reset)
1995 {
1996 	enum qed_eng eng;
1997 	u8 ppfid = 0;
1998 	int rc;
1999 
2000 	/* Make sure iwarp cmt mode is enabled before setting affinity */
2001 	if (!cdev->iwarp_cmt)
2002 		return -EINVAL;
2003 
2004 	if (b_reset)
2005 		eng = QED_BOTH_ENG;
2006 	else
2007 		eng = cdev->l2_affin_hint ? QED_ENG1 : QED_ENG0;
2008 
2009 	rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng);
2010 	if (rc) {
2011 		DP_NOTICE(cdev,
2012 			  "Failed to set the engine affinity of ppfid %d\n",
2013 			  ppfid);
2014 		return rc;
2015 	}
2016 
2017 	DP_VERBOSE(cdev, (QED_MSG_RDMA | QED_MSG_SP),
2018 		   "LLH: Set the engine affinity of non-RoCE packets as %d\n",
2019 		   eng);
2020 
2021 	return 0;
2022 }
2023 
2024 static const struct qed_rdma_ops qed_rdma_ops_pass = {
2025 	.common = &qed_common_ops_pass,
2026 	.fill_dev_info = &qed_fill_rdma_dev_info,
2027 	.rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
2028 	.rdma_init = &qed_rdma_init,
2029 	.rdma_add_user = &qed_rdma_add_user,
2030 	.rdma_remove_user = &qed_rdma_remove_user,
2031 	.rdma_stop = &qed_rdma_stop,
2032 	.rdma_query_port = &qed_rdma_query_port,
2033 	.rdma_query_device = &qed_rdma_query_device,
2034 	.rdma_get_start_sb = &qed_rdma_get_sb_start,
2035 	.rdma_get_rdma_int = &qed_rdma_get_int,
2036 	.rdma_set_rdma_int = &qed_rdma_set_int,
2037 	.rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
2038 	.rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
2039 	.rdma_alloc_pd = &qed_rdma_alloc_pd,
2040 	.rdma_dealloc_pd = &qed_rdma_free_pd,
2041 	.rdma_alloc_xrcd = &qed_rdma_alloc_xrcd,
2042 	.rdma_dealloc_xrcd = &qed_rdma_free_xrcd,
2043 	.rdma_create_cq = &qed_rdma_create_cq,
2044 	.rdma_destroy_cq = &qed_rdma_destroy_cq,
2045 	.rdma_create_qp = &qed_rdma_create_qp,
2046 	.rdma_modify_qp = &qed_rdma_modify_qp,
2047 	.rdma_query_qp = &qed_rdma_query_qp,
2048 	.rdma_destroy_qp = &qed_rdma_destroy_qp,
2049 	.rdma_alloc_tid = &qed_rdma_alloc_tid,
2050 	.rdma_free_tid = &qed_rdma_free_tid,
2051 	.rdma_register_tid = &qed_rdma_register_tid,
2052 	.rdma_deregister_tid = &qed_rdma_deregister_tid,
2053 	.rdma_create_srq = &qed_rdma_create_srq,
2054 	.rdma_modify_srq = &qed_rdma_modify_srq,
2055 	.rdma_destroy_srq = &qed_rdma_destroy_srq,
2056 	.ll2_acquire_connection = &qed_ll2_acquire_connection,
2057 	.ll2_establish_connection = &qed_ll2_establish_connection,
2058 	.ll2_terminate_connection = &qed_ll2_terminate_connection,
2059 	.ll2_release_connection = &qed_ll2_release_connection,
2060 	.ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
2061 	.ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
2062 	.ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
2063 	.ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
2064 	.ll2_get_stats = &qed_ll2_get_stats,
2065 	.iwarp_set_engine_affin = &qed_iwarp_set_engine_affin,
2066 	.iwarp_connect = &qed_iwarp_connect,
2067 	.iwarp_create_listen = &qed_iwarp_create_listen,
2068 	.iwarp_destroy_listen = &qed_iwarp_destroy_listen,
2069 	.iwarp_accept = &qed_iwarp_accept,
2070 	.iwarp_reject = &qed_iwarp_reject,
2071 	.iwarp_send_rtr = &qed_iwarp_send_rtr,
2072 };
2073 
2074 const struct qed_rdma_ops *qed_get_rdma_ops(void)
2075 {
2076 	return &qed_rdma_ops_pass;
2077 }
2078 EXPORT_SYMBOL(qed_get_rdma_ops);
2079