1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: QPLib resource manager
37  */
38 
39 #include <linux/spinlock.h>
40 #include <linux/pci.h>
41 #include <linux/interrupt.h>
42 #include <linux/inetdevice.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/if_vlan.h>
45 #include "roce_hsi.h"
46 #include "qplib_res.h"
47 #include "qplib_sp.h"
48 #include "qplib_rcfw.h"
49 
50 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
51 				      struct bnxt_qplib_stats *stats);
52 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
53 				      struct bnxt_qplib_stats *stats);
54 
55 /* PBL */
56 static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
57 		       bool is_umem)
58 {
59 	int i;
60 
61 	if (!is_umem) {
62 		for (i = 0; i < pbl->pg_count; i++) {
63 			if (pbl->pg_arr[i])
64 				dma_free_coherent(&pdev->dev, pbl->pg_size,
65 						  (void *)((unsigned long)
66 						   pbl->pg_arr[i] &
67 						  PAGE_MASK),
68 						  pbl->pg_map_arr[i]);
69 			else
70 				dev_warn(&pdev->dev,
71 					 "QPLIB: PBL free pg_arr[%d] empty?!",
72 					 i);
73 			pbl->pg_arr[i] = NULL;
74 		}
75 	}
76 	kfree(pbl->pg_arr);
77 	pbl->pg_arr = NULL;
78 	kfree(pbl->pg_map_arr);
79 	pbl->pg_map_arr = NULL;
80 	pbl->pg_count = 0;
81 	pbl->pg_size = 0;
82 }
83 
84 static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
85 		       struct scatterlist *sghead, u32 pages, u32 pg_size)
86 {
87 	struct scatterlist *sg;
88 	bool is_umem = false;
89 	int i;
90 
91 	/* page ptr arrays */
92 	pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
93 	if (!pbl->pg_arr)
94 		return -ENOMEM;
95 
96 	pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
97 	if (!pbl->pg_map_arr) {
98 		kfree(pbl->pg_arr);
99 		pbl->pg_arr = NULL;
100 		return -ENOMEM;
101 	}
102 	pbl->pg_count = 0;
103 	pbl->pg_size = pg_size;
104 
105 	if (!sghead) {
106 		for (i = 0; i < pages; i++) {
107 			pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
108 							     pbl->pg_size,
109 							     &pbl->pg_map_arr[i],
110 							     GFP_KERNEL);
111 			if (!pbl->pg_arr[i])
112 				goto fail;
113 			pbl->pg_count++;
114 		}
115 	} else {
116 		i = 0;
117 		is_umem = true;
118 		for_each_sg(sghead, sg, pages, i) {
119 			pbl->pg_map_arr[i] = sg_dma_address(sg);
120 			pbl->pg_arr[i] = sg_virt(sg);
121 			if (!pbl->pg_arr[i])
122 				goto fail;
123 
124 			pbl->pg_count++;
125 		}
126 	}
127 
128 	return 0;
129 
130 fail:
131 	__free_pbl(pdev, pbl, is_umem);
132 	return -ENOMEM;
133 }
134 
135 /* HWQ */
136 void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
137 {
138 	int i;
139 
140 	if (!hwq->max_elements)
141 		return;
142 	if (hwq->level >= PBL_LVL_MAX)
143 		return;
144 
145 	for (i = 0; i < hwq->level + 1; i++) {
146 		if (i == hwq->level)
147 			__free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
148 		else
149 			__free_pbl(pdev, &hwq->pbl[i], false);
150 	}
151 
152 	hwq->level = PBL_LVL_MAX;
153 	hwq->max_elements = 0;
154 	hwq->element_size = 0;
155 	hwq->prod = 0;
156 	hwq->cons = 0;
157 	hwq->cp_bit = 0;
158 }
159 
160 /* All HWQs are power of 2 in size */
161 int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
162 			      struct scatterlist *sghead, int nmap,
163 			      u32 *elements, u32 element_size, u32 aux,
164 			      u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
165 {
166 	u32 pages, slots, size, aux_pages = 0, aux_size = 0;
167 	dma_addr_t *src_phys_ptr, **dst_virt_ptr;
168 	int i, rc;
169 
170 	hwq->level = PBL_LVL_MAX;
171 
172 	slots = roundup_pow_of_two(*elements);
173 	if (aux) {
174 		aux_size = roundup_pow_of_two(aux);
175 		aux_pages = (slots * aux_size) / pg_size;
176 		if ((slots * aux_size) % pg_size)
177 			aux_pages++;
178 	}
179 	size = roundup_pow_of_two(element_size);
180 
181 	if (!sghead) {
182 		hwq->is_user = false;
183 		pages = (slots * size) / pg_size + aux_pages;
184 		if ((slots * size) % pg_size)
185 			pages++;
186 		if (!pages)
187 			return -EINVAL;
188 	} else {
189 		hwq->is_user = true;
190 		pages = nmap;
191 	}
192 
193 	/* Alloc the 1st memory block; can be a PDL/PTL/PBL */
194 	if (sghead && (pages == MAX_PBL_LVL_0_PGS))
195 		rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
196 				 pages, pg_size);
197 	else
198 		rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size);
199 	if (rc)
200 		goto fail;
201 
202 	hwq->level = PBL_LVL_0;
203 
204 	if (pages > MAX_PBL_LVL_0_PGS) {
205 		if (pages > MAX_PBL_LVL_1_PGS) {
206 			/* 2 levels of indirection */
207 			rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
208 					 MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size);
209 			if (rc)
210 				goto fail;
211 			/* Fill in lvl0 PBL */
212 			dst_virt_ptr =
213 				(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
214 			src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
215 			for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
216 				dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
217 					src_phys_ptr[i] | PTU_PDE_VALID;
218 			hwq->level = PBL_LVL_1;
219 
220 			rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
221 					 pages, pg_size);
222 			if (rc)
223 				goto fail;
224 
225 			/* Fill in lvl1 PBL */
226 			dst_virt_ptr =
227 				(dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
228 			src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
229 			for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
230 				dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
231 					src_phys_ptr[i] | PTU_PTE_VALID;
232 			}
233 			if (hwq_type == HWQ_TYPE_QUEUE) {
234 				/* Find the last pg of the size */
235 				i = hwq->pbl[PBL_LVL_2].pg_count;
236 				dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
237 								  PTU_PTE_LAST;
238 				if (i > 1)
239 					dst_virt_ptr[PTR_PG(i - 2)]
240 						    [PTR_IDX(i - 2)] |=
241 						    PTU_PTE_NEXT_TO_LAST;
242 			}
243 			hwq->level = PBL_LVL_2;
244 		} else {
245 			u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
246 						PTU_PTE_VALID;
247 
248 			/* 1 level of indirection */
249 			rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
250 					 pages, pg_size);
251 			if (rc)
252 				goto fail;
253 			/* Fill in lvl0 PBL */
254 			dst_virt_ptr =
255 				(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
256 			src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
257 			for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
258 				dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
259 					src_phys_ptr[i] | flag;
260 			}
261 			if (hwq_type == HWQ_TYPE_QUEUE) {
262 				/* Find the last pg of the size */
263 				i = hwq->pbl[PBL_LVL_1].pg_count;
264 				dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
265 								  PTU_PTE_LAST;
266 				if (i > 1)
267 					dst_virt_ptr[PTR_PG(i - 2)]
268 						    [PTR_IDX(i - 2)] |=
269 						    PTU_PTE_NEXT_TO_LAST;
270 			}
271 			hwq->level = PBL_LVL_1;
272 		}
273 	}
274 	hwq->pdev = pdev;
275 	spin_lock_init(&hwq->lock);
276 	hwq->prod = 0;
277 	hwq->cons = 0;
278 	*elements = hwq->max_elements = slots;
279 	hwq->element_size = size;
280 
281 	/* For direct access to the elements */
282 	hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
283 	hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
284 
285 	return 0;
286 
287 fail:
288 	bnxt_qplib_free_hwq(pdev, hwq);
289 	return -ENOMEM;
290 }
291 
292 /* Context Tables */
293 void bnxt_qplib_free_ctx(struct pci_dev *pdev,
294 			 struct bnxt_qplib_ctx *ctx)
295 {
296 	int i;
297 
298 	bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
299 	bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
300 	bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
301 	bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
302 	bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
303 	for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
304 		bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
305 	bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
306 	bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
307 }
308 
309 /*
310  * Routine: bnxt_qplib_alloc_ctx
311  * Description:
312  *     Context tables are memories which are used by the chip fw.
313  *     The 6 tables defined are:
314  *             QPC ctx - holds QP states
315  *             MRW ctx - holds memory region and window
316  *             SRQ ctx - holds shared RQ states
317  *             CQ ctx - holds completion queue states
318  *             TQM ctx - holds Tx Queue Manager context
319  *             TIM ctx - holds timer context
320  *     Depending on the size of the tbl requested, either a 1 Page Buffer List
321  *     or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
322  *     instead.
323  *     Table might be employed as follows:
324  *             For 0      < ctx size <= 1 PAGE, 0 level of ind is used
325  *             For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
326  *             For 512    < ctx size <= MAX, 2 levels of ind is used
327  * Returns:
328  *     0 if success, else -ERRORS
329  */
330 int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
331 			 struct bnxt_qplib_ctx *ctx,
332 			 bool virt_fn)
333 {
334 	int i, j, k, rc = 0;
335 	int fnz_idx = -1;
336 	__le64 **pbl_ptr;
337 
338 	if (virt_fn)
339 		goto stats_alloc;
340 
341 	/* QPC Tables */
342 	ctx->qpc_tbl.max_elements = ctx->qpc_count;
343 	rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0,
344 				       &ctx->qpc_tbl.max_elements,
345 				       BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
346 				       PAGE_SIZE, HWQ_TYPE_CTX);
347 	if (rc)
348 		goto fail;
349 
350 	/* MRW Tables */
351 	ctx->mrw_tbl.max_elements = ctx->mrw_count;
352 	rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0,
353 				       &ctx->mrw_tbl.max_elements,
354 				       BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
355 				       PAGE_SIZE, HWQ_TYPE_CTX);
356 	if (rc)
357 		goto fail;
358 
359 	/* SRQ Tables */
360 	ctx->srqc_tbl.max_elements = ctx->srqc_count;
361 	rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0,
362 				       &ctx->srqc_tbl.max_elements,
363 				       BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
364 				       PAGE_SIZE, HWQ_TYPE_CTX);
365 	if (rc)
366 		goto fail;
367 
368 	/* CQ Tables */
369 	ctx->cq_tbl.max_elements = ctx->cq_count;
370 	rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0,
371 				       &ctx->cq_tbl.max_elements,
372 				       BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
373 				       PAGE_SIZE, HWQ_TYPE_CTX);
374 	if (rc)
375 		goto fail;
376 
377 	/* TQM Buffer */
378 	ctx->tqm_pde.max_elements = 512;
379 	rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0,
380 				       &ctx->tqm_pde.max_elements, sizeof(u64),
381 				       0, PAGE_SIZE, HWQ_TYPE_CTX);
382 	if (rc)
383 		goto fail;
384 
385 	for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
386 		if (!ctx->tqm_count[i])
387 			continue;
388 		ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
389 					       ctx->tqm_count[i];
390 		rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0,
391 					       &ctx->tqm_tbl[i].max_elements, 1,
392 					       0, PAGE_SIZE, HWQ_TYPE_CTX);
393 		if (rc)
394 			goto fail;
395 	}
396 	pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
397 	for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
398 	     i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
399 		if (!ctx->tqm_tbl[i].max_elements)
400 			continue;
401 		if (fnz_idx == -1)
402 			fnz_idx = i;
403 		switch (ctx->tqm_tbl[i].level) {
404 		case PBL_LVL_2:
405 			for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
406 			     k++)
407 				pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
408 				  cpu_to_le64(
409 				    ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
410 				    | PTU_PTE_VALID);
411 			break;
412 		case PBL_LVL_1:
413 		case PBL_LVL_0:
414 		default:
415 			pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
416 				ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
417 				PTU_PTE_VALID);
418 			break;
419 		}
420 	}
421 	if (fnz_idx == -1)
422 		fnz_idx = 0;
423 	ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
424 			     PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
425 
426 	/* TIM Buffer */
427 	ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
428 	rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0,
429 				       &ctx->tim_tbl.max_elements, 1,
430 				       0, PAGE_SIZE, HWQ_TYPE_CTX);
431 	if (rc)
432 		goto fail;
433 
434 stats_alloc:
435 	/* Stats */
436 	rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
437 	if (rc)
438 		goto fail;
439 
440 	return 0;
441 
442 fail:
443 	bnxt_qplib_free_ctx(pdev, ctx);
444 	return rc;
445 }
446 
447 /* GUID */
448 void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
449 {
450 	u8 mac[ETH_ALEN];
451 
452 	/* MAC-48 to EUI-64 mapping */
453 	memcpy(mac, dev_addr, ETH_ALEN);
454 	guid[0] = mac[0] ^ 2;
455 	guid[1] = mac[1];
456 	guid[2] = mac[2];
457 	guid[3] = 0xff;
458 	guid[4] = 0xfe;
459 	guid[5] = mac[3];
460 	guid[6] = mac[4];
461 	guid[7] = mac[5];
462 }
463 
464 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
465 				     struct bnxt_qplib_sgid_tbl *sgid_tbl)
466 {
467 	kfree(sgid_tbl->tbl);
468 	kfree(sgid_tbl->hw_id);
469 	kfree(sgid_tbl->ctx);
470 	kfree(sgid_tbl->vlan);
471 	sgid_tbl->tbl = NULL;
472 	sgid_tbl->hw_id = NULL;
473 	sgid_tbl->ctx = NULL;
474 	sgid_tbl->vlan = NULL;
475 	sgid_tbl->max = 0;
476 	sgid_tbl->active = 0;
477 }
478 
479 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
480 				     struct bnxt_qplib_sgid_tbl *sgid_tbl,
481 				     u16 max)
482 {
483 	sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
484 	if (!sgid_tbl->tbl)
485 		return -ENOMEM;
486 
487 	sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
488 	if (!sgid_tbl->hw_id)
489 		goto out_free1;
490 
491 	sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
492 	if (!sgid_tbl->ctx)
493 		goto out_free2;
494 
495 	sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
496 	if (!sgid_tbl->vlan)
497 		goto out_free3;
498 
499 	sgid_tbl->max = max;
500 	return 0;
501 out_free3:
502 	kfree(sgid_tbl->ctx);
503 	sgid_tbl->ctx = NULL;
504 out_free2:
505 	kfree(sgid_tbl->hw_id);
506 	sgid_tbl->hw_id = NULL;
507 out_free1:
508 	kfree(sgid_tbl->tbl);
509 	sgid_tbl->tbl = NULL;
510 	return -ENOMEM;
511 };
512 
513 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
514 					struct bnxt_qplib_sgid_tbl *sgid_tbl)
515 {
516 	int i;
517 
518 	for (i = 0; i < sgid_tbl->max; i++) {
519 		if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
520 			   sizeof(bnxt_qplib_gid_zero)))
521 			bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
522 	}
523 	memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
524 	memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
525 	memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
526 	sgid_tbl->active = 0;
527 }
528 
529 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
530 				     struct net_device *netdev)
531 {
532 	memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
533 	memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
534 }
535 
536 static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
537 				     struct bnxt_qplib_pkey_tbl *pkey_tbl)
538 {
539 	if (!pkey_tbl->tbl)
540 		dev_dbg(&res->pdev->dev, "QPLIB: PKEY tbl not present");
541 	else
542 		kfree(pkey_tbl->tbl);
543 
544 	pkey_tbl->tbl = NULL;
545 	pkey_tbl->max = 0;
546 	pkey_tbl->active = 0;
547 }
548 
549 static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
550 				     struct bnxt_qplib_pkey_tbl *pkey_tbl,
551 				     u16 max)
552 {
553 	pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
554 	if (!pkey_tbl->tbl)
555 		return -ENOMEM;
556 
557 	pkey_tbl->max = max;
558 	return 0;
559 };
560 
561 /* PDs */
562 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
563 {
564 	u32 bit_num;
565 
566 	bit_num = find_first_bit(pdt->tbl, pdt->max);
567 	if (bit_num == pdt->max)
568 		return -ENOMEM;
569 
570 	/* Found unused PD */
571 	clear_bit(bit_num, pdt->tbl);
572 	pd->id = bit_num;
573 	return 0;
574 }
575 
576 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
577 			  struct bnxt_qplib_pd_tbl *pdt,
578 			  struct bnxt_qplib_pd *pd)
579 {
580 	if (test_and_set_bit(pd->id, pdt->tbl)) {
581 		dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d",
582 			 pd->id);
583 		return -EINVAL;
584 	}
585 	pd->id = 0;
586 	return 0;
587 }
588 
589 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
590 {
591 	kfree(pdt->tbl);
592 	pdt->tbl = NULL;
593 	pdt->max = 0;
594 }
595 
596 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
597 				   struct bnxt_qplib_pd_tbl *pdt,
598 				   u32 max)
599 {
600 	u32 bytes;
601 
602 	bytes = max >> 3;
603 	if (!bytes)
604 		bytes = 1;
605 	pdt->tbl = kmalloc(bytes, GFP_KERNEL);
606 	if (!pdt->tbl)
607 		return -ENOMEM;
608 
609 	pdt->max = max;
610 	memset((u8 *)pdt->tbl, 0xFF, bytes);
611 
612 	return 0;
613 }
614 
615 /* DPIs */
616 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
617 			 struct bnxt_qplib_dpi     *dpi,
618 			 void                      *app)
619 {
620 	u32 bit_num;
621 
622 	bit_num = find_first_bit(dpit->tbl, dpit->max);
623 	if (bit_num == dpit->max)
624 		return -ENOMEM;
625 
626 	/* Found unused DPI */
627 	clear_bit(bit_num, dpit->tbl);
628 	dpit->app_tbl[bit_num] = app;
629 
630 	dpi->dpi = bit_num;
631 	dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
632 	dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
633 
634 	return 0;
635 }
636 
637 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
638 			   struct bnxt_qplib_dpi_tbl *dpit,
639 			   struct bnxt_qplib_dpi     *dpi)
640 {
641 	if (dpi->dpi >= dpit->max) {
642 		dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d", dpi->dpi);
643 		return -EINVAL;
644 	}
645 	if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
646 		dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d",
647 			 dpi->dpi);
648 		return -EINVAL;
649 	}
650 	if (dpit->app_tbl)
651 		dpit->app_tbl[dpi->dpi] = NULL;
652 	memset(dpi, 0, sizeof(*dpi));
653 
654 	return 0;
655 }
656 
657 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res     *res,
658 				    struct bnxt_qplib_dpi_tbl *dpit)
659 {
660 	kfree(dpit->tbl);
661 	kfree(dpit->app_tbl);
662 	if (dpit->dbr_bar_reg_iomem)
663 		pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
664 	memset(dpit, 0, sizeof(*dpit));
665 }
666 
667 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
668 				    struct bnxt_qplib_dpi_tbl *dpit,
669 				    u32                       dbr_offset)
670 {
671 	u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
672 	resource_size_t bar_reg_base;
673 	u32 dbr_len, bytes;
674 
675 	if (dpit->dbr_bar_reg_iomem) {
676 		dev_err(&res->pdev->dev,
677 			"QPLIB: DBR BAR region %d already mapped", dbr_bar_reg);
678 		return -EALREADY;
679 	}
680 
681 	bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
682 	if (!bar_reg_base) {
683 		dev_err(&res->pdev->dev,
684 			"QPLIB: BAR region %d resc start failed", dbr_bar_reg);
685 		return -ENOMEM;
686 	}
687 
688 	dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
689 	if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
690 		dev_err(&res->pdev->dev, "QPLIB: Invalid DBR length %d",
691 			dbr_len);
692 		return -ENOMEM;
693 	}
694 
695 	dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
696 						  dbr_len);
697 	if (!dpit->dbr_bar_reg_iomem) {
698 		dev_err(&res->pdev->dev,
699 			"QPLIB: FP: DBR BAR region %d mapping failed",
700 			dbr_bar_reg);
701 		return -ENOMEM;
702 	}
703 
704 	dpit->unmapped_dbr = bar_reg_base + dbr_offset;
705 	dpit->max = dbr_len / PAGE_SIZE;
706 
707 	dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
708 	if (!dpit->app_tbl)
709 		goto unmap_io;
710 
711 	bytes = dpit->max >> 3;
712 	if (!bytes)
713 		bytes = 1;
714 
715 	dpit->tbl = kmalloc(bytes, GFP_KERNEL);
716 	if (!dpit->tbl) {
717 		kfree(dpit->app_tbl);
718 		dpit->app_tbl = NULL;
719 		goto unmap_io;
720 	}
721 
722 	memset((u8 *)dpit->tbl, 0xFF, bytes);
723 
724 	return 0;
725 
726 unmap_io:
727 	pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
728 	return -ENOMEM;
729 }
730 
731 /* PKEYs */
732 static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
733 {
734 	memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
735 	pkey_tbl->active = 0;
736 }
737 
738 static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
739 				     struct bnxt_qplib_pkey_tbl *pkey_tbl)
740 {
741 	u16 pkey = 0xFFFF;
742 
743 	memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
744 
745 	/* pkey default = 0xFFFF */
746 	bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
747 }
748 
749 /* Stats */
750 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
751 				      struct bnxt_qplib_stats *stats)
752 {
753 	if (stats->dma) {
754 		dma_free_coherent(&pdev->dev, stats->size,
755 				  stats->dma, stats->dma_map);
756 	}
757 	memset(stats, 0, sizeof(*stats));
758 	stats->fw_id = -1;
759 }
760 
761 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
762 				      struct bnxt_qplib_stats *stats)
763 {
764 	memset(stats, 0, sizeof(*stats));
765 	stats->fw_id = -1;
766 	stats->size = sizeof(struct ctx_hw_stats);
767 	stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
768 					&stats->dma_map, GFP_KERNEL);
769 	if (!stats->dma) {
770 		dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
771 		return -ENOMEM;
772 	}
773 	return 0;
774 }
775 
776 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
777 {
778 	bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
779 	bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
780 }
781 
782 int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
783 {
784 	bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
785 	bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
786 
787 	return 0;
788 }
789 
790 void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
791 {
792 	bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
793 	bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
794 	bnxt_qplib_free_pd_tbl(&res->pd_tbl);
795 	bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
796 
797 	res->netdev = NULL;
798 	res->pdev = NULL;
799 }
800 
801 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
802 			 struct net_device *netdev,
803 			 struct bnxt_qplib_dev_attr *dev_attr)
804 {
805 	int rc = 0;
806 
807 	res->pdev = pdev;
808 	res->netdev = netdev;
809 
810 	rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
811 	if (rc)
812 		goto fail;
813 
814 	rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
815 	if (rc)
816 		goto fail;
817 
818 	rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
819 	if (rc)
820 		goto fail;
821 
822 	rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
823 	if (rc)
824 		goto fail;
825 
826 	return 0;
827 fail:
828 	bnxt_qplib_free_res(res);
829 	return rc;
830 }
831