1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/delay.h>
14 #include <linux/irq.h>
15 #include <linux/pci.h>
16 #include <linux/sysfs.h>
17 
18 #include "cgx.h"
19 #include "rvu.h"
20 #include "rvu_reg.h"
21 
22 #define DRV_NAME	"octeontx2-af"
23 #define DRV_STRING      "Marvell OcteonTX2 RVU Admin Function Driver"
24 #define DRV_VERSION	"1.0"
25 
26 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
27 
28 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
29 				struct rvu_block *block, int lf);
30 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
31 				  struct rvu_block *block, int lf);
32 
33 /* Supported devices */
34 static const struct pci_device_id rvu_id_table[] = {
35 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
36 	{ 0, }  /* end of table */
37 };
38 
39 MODULE_AUTHOR("Marvell International Ltd.");
40 MODULE_DESCRIPTION(DRV_STRING);
41 MODULE_LICENSE("GPL v2");
42 MODULE_VERSION(DRV_VERSION);
43 MODULE_DEVICE_TABLE(pci, rvu_id_table);
44 
45 /* Poll a RVU block's register 'offset', for a 'zero'
46  * or 'nonzero' at bits specified by 'mask'
47  */
48 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
49 {
50 	unsigned long timeout = jiffies + usecs_to_jiffies(100);
51 	void __iomem *reg;
52 	u64 reg_val;
53 
54 	reg = rvu->afreg_base + ((block << 28) | offset);
55 	while (time_before(jiffies, timeout)) {
56 		reg_val = readq(reg);
57 		if (zero && !(reg_val & mask))
58 			return 0;
59 		if (!zero && (reg_val & mask))
60 			return 0;
61 		usleep_range(1, 5);
62 		timeout--;
63 	}
64 	return -EBUSY;
65 }
66 
67 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
68 {
69 	int id;
70 
71 	if (!rsrc->bmap)
72 		return -EINVAL;
73 
74 	id = find_first_zero_bit(rsrc->bmap, rsrc->max);
75 	if (id >= rsrc->max)
76 		return -ENOSPC;
77 
78 	__set_bit(id, rsrc->bmap);
79 
80 	return id;
81 }
82 
83 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
84 {
85 	int start;
86 
87 	if (!rsrc->bmap)
88 		return -EINVAL;
89 
90 	start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
91 	if (start >= rsrc->max)
92 		return -ENOSPC;
93 
94 	bitmap_set(rsrc->bmap, start, nrsrc);
95 	return start;
96 }
97 
98 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
99 {
100 	if (!rsrc->bmap)
101 		return;
102 	if (start >= rsrc->max)
103 		return;
104 
105 	bitmap_clear(rsrc->bmap, start, nrsrc);
106 }
107 
108 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
109 {
110 	int start;
111 
112 	if (!rsrc->bmap)
113 		return false;
114 
115 	start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
116 	if (start >= rsrc->max)
117 		return false;
118 
119 	return true;
120 }
121 
122 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
123 {
124 	if (!rsrc->bmap)
125 		return;
126 
127 	__clear_bit(id, rsrc->bmap);
128 }
129 
130 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
131 {
132 	int used;
133 
134 	if (!rsrc->bmap)
135 		return 0;
136 
137 	used = bitmap_weight(rsrc->bmap, rsrc->max);
138 	return (rsrc->max - used);
139 }
140 
141 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
142 {
143 	rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
144 			     sizeof(long), GFP_KERNEL);
145 	if (!rsrc->bmap)
146 		return -ENOMEM;
147 	return 0;
148 }
149 
150 /* Get block LF's HW index from a PF_FUNC's block slot number */
151 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
152 {
153 	u16 match = 0;
154 	int lf;
155 
156 	spin_lock(&rvu->rsrc_lock);
157 	for (lf = 0; lf < block->lf.max; lf++) {
158 		if (block->fn_map[lf] == pcifunc) {
159 			if (slot == match) {
160 				spin_unlock(&rvu->rsrc_lock);
161 				return lf;
162 			}
163 			match++;
164 		}
165 	}
166 	spin_unlock(&rvu->rsrc_lock);
167 	return -ENODEV;
168 }
169 
170 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
171  * Some silicon variants of OcteonTX2 supports
172  * multiple blocks of same type.
173  *
174  * @pcifunc has to be zero when no LF is yet attached.
175  */
176 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
177 {
178 	int devnum, blkaddr = -ENODEV;
179 	u64 cfg, reg;
180 	bool is_pf;
181 
182 	switch (blktype) {
183 	case BLKTYPE_NPC:
184 		blkaddr = BLKADDR_NPC;
185 		goto exit;
186 	case BLKTYPE_NPA:
187 		blkaddr = BLKADDR_NPA;
188 		goto exit;
189 	case BLKTYPE_NIX:
190 		/* For now assume NIX0 */
191 		if (!pcifunc) {
192 			blkaddr = BLKADDR_NIX0;
193 			goto exit;
194 		}
195 		break;
196 	case BLKTYPE_SSO:
197 		blkaddr = BLKADDR_SSO;
198 		goto exit;
199 	case BLKTYPE_SSOW:
200 		blkaddr = BLKADDR_SSOW;
201 		goto exit;
202 	case BLKTYPE_TIM:
203 		blkaddr = BLKADDR_TIM;
204 		goto exit;
205 	case BLKTYPE_CPT:
206 		/* For now assume CPT0 */
207 		if (!pcifunc) {
208 			blkaddr = BLKADDR_CPT0;
209 			goto exit;
210 		}
211 		break;
212 	}
213 
214 	/* Check if this is a RVU PF or VF */
215 	if (pcifunc & RVU_PFVF_FUNC_MASK) {
216 		is_pf = false;
217 		devnum = rvu_get_hwvf(rvu, pcifunc);
218 	} else {
219 		is_pf = true;
220 		devnum = rvu_get_pf(pcifunc);
221 	}
222 
223 	/* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
224 	if (blktype == BLKTYPE_NIX) {
225 		reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
226 		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
227 		if (cfg)
228 			blkaddr = BLKADDR_NIX0;
229 	}
230 
231 	/* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
232 	if (blktype == BLKTYPE_CPT) {
233 		reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
234 		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
235 		if (cfg)
236 			blkaddr = BLKADDR_CPT0;
237 	}
238 
239 exit:
240 	if (is_block_implemented(rvu->hw, blkaddr))
241 		return blkaddr;
242 	return -ENODEV;
243 }
244 
245 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
246 				struct rvu_block *block, u16 pcifunc,
247 				u16 lf, bool attach)
248 {
249 	int devnum, num_lfs = 0;
250 	bool is_pf;
251 	u64 reg;
252 
253 	if (lf >= block->lf.max) {
254 		dev_err(&rvu->pdev->dev,
255 			"%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
256 			__func__, lf, block->name, block->lf.max);
257 		return;
258 	}
259 
260 	/* Check if this is for a RVU PF or VF */
261 	if (pcifunc & RVU_PFVF_FUNC_MASK) {
262 		is_pf = false;
263 		devnum = rvu_get_hwvf(rvu, pcifunc);
264 	} else {
265 		is_pf = true;
266 		devnum = rvu_get_pf(pcifunc);
267 	}
268 
269 	block->fn_map[lf] = attach ? pcifunc : 0;
270 
271 	switch (block->type) {
272 	case BLKTYPE_NPA:
273 		pfvf->npalf = attach ? true : false;
274 		num_lfs = pfvf->npalf;
275 		break;
276 	case BLKTYPE_NIX:
277 		pfvf->nixlf = attach ? true : false;
278 		num_lfs = pfvf->nixlf;
279 		break;
280 	case BLKTYPE_SSO:
281 		attach ? pfvf->sso++ : pfvf->sso--;
282 		num_lfs = pfvf->sso;
283 		break;
284 	case BLKTYPE_SSOW:
285 		attach ? pfvf->ssow++ : pfvf->ssow--;
286 		num_lfs = pfvf->ssow;
287 		break;
288 	case BLKTYPE_TIM:
289 		attach ? pfvf->timlfs++ : pfvf->timlfs--;
290 		num_lfs = pfvf->timlfs;
291 		break;
292 	case BLKTYPE_CPT:
293 		attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
294 		num_lfs = pfvf->cptlfs;
295 		break;
296 	}
297 
298 	reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
299 	rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
300 }
301 
302 inline int rvu_get_pf(u16 pcifunc)
303 {
304 	return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
305 }
306 
307 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
308 {
309 	u64 cfg;
310 
311 	/* Get numVFs attached to this PF and first HWVF */
312 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
313 	*numvfs = (cfg >> 12) & 0xFF;
314 	*hwvf = cfg & 0xFFF;
315 }
316 
317 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
318 {
319 	int pf, func;
320 	u64 cfg;
321 
322 	pf = rvu_get_pf(pcifunc);
323 	func = pcifunc & RVU_PFVF_FUNC_MASK;
324 
325 	/* Get first HWVF attached to this PF */
326 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
327 
328 	return ((cfg & 0xFFF) + func - 1);
329 }
330 
331 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
332 {
333 	/* Check if it is a PF or VF */
334 	if (pcifunc & RVU_PFVF_FUNC_MASK)
335 		return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
336 	else
337 		return &rvu->pf[rvu_get_pf(pcifunc)];
338 }
339 
340 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
341 {
342 	struct rvu_block *block;
343 
344 	if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
345 		return false;
346 
347 	block = &hw->block[blkaddr];
348 	return block->implemented;
349 }
350 
351 static void rvu_check_block_implemented(struct rvu *rvu)
352 {
353 	struct rvu_hwinfo *hw = rvu->hw;
354 	struct rvu_block *block;
355 	int blkid;
356 	u64 cfg;
357 
358 	/* For each block check if 'implemented' bit is set */
359 	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
360 		block = &hw->block[blkid];
361 		cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
362 		if (cfg & BIT_ULL(11))
363 			block->implemented = true;
364 	}
365 }
366 
367 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
368 {
369 	int err;
370 
371 	if (!block->implemented)
372 		return 0;
373 
374 	rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
375 	err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
376 			   true);
377 	return err;
378 }
379 
380 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
381 {
382 	struct rvu_block *block = &rvu->hw->block[blkaddr];
383 
384 	if (!block->implemented)
385 		return;
386 
387 	rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
388 	rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
389 }
390 
391 static void rvu_reset_all_blocks(struct rvu *rvu)
392 {
393 	/* Do a HW reset of all RVU blocks */
394 	rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
395 	rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
396 	rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
397 	rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
398 	rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
399 	rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
400 	rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST);
401 	rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST);
402 	rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST);
403 }
404 
405 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
406 {
407 	struct rvu_pfvf *pfvf;
408 	u64 cfg;
409 	int lf;
410 
411 	for (lf = 0; lf < block->lf.max; lf++) {
412 		cfg = rvu_read64(rvu, block->addr,
413 				 block->lfcfg_reg | (lf << block->lfshift));
414 		if (!(cfg & BIT_ULL(63)))
415 			continue;
416 
417 		/* Set this resource as being used */
418 		__set_bit(lf, block->lf.bmap);
419 
420 		/* Get, to whom this LF is attached */
421 		pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
422 		rvu_update_rsrc_map(rvu, pfvf, block,
423 				    (cfg >> 8) & 0xFFFF, lf, true);
424 
425 		/* Set start MSIX vector for this LF within this PF/VF */
426 		rvu_set_msix_offset(rvu, pfvf, block, lf);
427 	}
428 }
429 
430 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
431 {
432 	int min_vecs;
433 
434 	if (!vf)
435 		goto check_pf;
436 
437 	if (!nvecs) {
438 		dev_warn(rvu->dev,
439 			 "PF%d:VF%d is configured with zero msix vectors, %d\n",
440 			 pf, vf - 1, nvecs);
441 	}
442 	return;
443 
444 check_pf:
445 	if (pf == 0)
446 		min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
447 	else
448 		min_vecs = RVU_PF_INT_VEC_CNT;
449 
450 	if (!(nvecs < min_vecs))
451 		return;
452 	dev_warn(rvu->dev,
453 		 "PF%d is configured with too few vectors, %d, min is %d\n",
454 		 pf, nvecs, min_vecs);
455 }
456 
457 static int rvu_setup_msix_resources(struct rvu *rvu)
458 {
459 	struct rvu_hwinfo *hw = rvu->hw;
460 	int pf, vf, numvfs, hwvf, err;
461 	int nvecs, offset, max_msix;
462 	struct rvu_pfvf *pfvf;
463 	u64 cfg, phy_addr;
464 	dma_addr_t iova;
465 
466 	for (pf = 0; pf < hw->total_pfs; pf++) {
467 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
468 		/* If PF is not enabled, nothing to do */
469 		if (!((cfg >> 20) & 0x01))
470 			continue;
471 
472 		rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
473 
474 		pfvf = &rvu->pf[pf];
475 		/* Get num of MSIX vectors attached to this PF */
476 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
477 		pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
478 		rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
479 
480 		/* Alloc msix bitmap for this PF */
481 		err = rvu_alloc_bitmap(&pfvf->msix);
482 		if (err)
483 			return err;
484 
485 		/* Allocate memory for MSIX vector to RVU block LF mapping */
486 		pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
487 						sizeof(u16), GFP_KERNEL);
488 		if (!pfvf->msix_lfmap)
489 			return -ENOMEM;
490 
491 		/* For PF0 (AF) firmware will set msix vector offsets for
492 		 * AF, block AF and PF0_INT vectors, so jump to VFs.
493 		 */
494 		if (!pf)
495 			goto setup_vfmsix;
496 
497 		/* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
498 		 * These are allocated on driver init and never freed,
499 		 * so no need to set 'msix_lfmap' for these.
500 		 */
501 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
502 		nvecs = (cfg >> 12) & 0xFF;
503 		cfg &= ~0x7FFULL;
504 		offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
505 		rvu_write64(rvu, BLKADDR_RVUM,
506 			    RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
507 setup_vfmsix:
508 		/* Alloc msix bitmap for VFs */
509 		for (vf = 0; vf < numvfs; vf++) {
510 			pfvf =  &rvu->hwvf[hwvf + vf];
511 			/* Get num of MSIX vectors attached to this VF */
512 			cfg = rvu_read64(rvu, BLKADDR_RVUM,
513 					 RVU_PRIV_PFX_MSIX_CFG(pf));
514 			pfvf->msix.max = (cfg & 0xFFF) + 1;
515 			rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
516 
517 			/* Alloc msix bitmap for this VF */
518 			err = rvu_alloc_bitmap(&pfvf->msix);
519 			if (err)
520 				return err;
521 
522 			pfvf->msix_lfmap =
523 				devm_kcalloc(rvu->dev, pfvf->msix.max,
524 					     sizeof(u16), GFP_KERNEL);
525 			if (!pfvf->msix_lfmap)
526 				return -ENOMEM;
527 
528 			/* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
529 			 * These are allocated on driver init and never freed,
530 			 * so no need to set 'msix_lfmap' for these.
531 			 */
532 			cfg = rvu_read64(rvu, BLKADDR_RVUM,
533 					 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
534 			nvecs = (cfg >> 12) & 0xFF;
535 			cfg &= ~0x7FFULL;
536 			offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
537 			rvu_write64(rvu, BLKADDR_RVUM,
538 				    RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
539 				    cfg | offset);
540 		}
541 	}
542 
543 	/* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
544 	 * create a IOMMU mapping for the physcial address configured by
545 	 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
546 	 */
547 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
548 	max_msix = cfg & 0xFFFFF;
549 	phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
550 	iova = dma_map_resource(rvu->dev, phy_addr,
551 				max_msix * PCI_MSIX_ENTRY_SIZE,
552 				DMA_BIDIRECTIONAL, 0);
553 
554 	if (dma_mapping_error(rvu->dev, iova))
555 		return -ENOMEM;
556 
557 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
558 	rvu->msix_base_iova = iova;
559 
560 	return 0;
561 }
562 
563 static void rvu_free_hw_resources(struct rvu *rvu)
564 {
565 	struct rvu_hwinfo *hw = rvu->hw;
566 	struct rvu_block *block;
567 	struct rvu_pfvf  *pfvf;
568 	int id, max_msix;
569 	u64 cfg;
570 
571 	rvu_npa_freemem(rvu);
572 	rvu_npc_freemem(rvu);
573 	rvu_nix_freemem(rvu);
574 
575 	/* Free block LF bitmaps */
576 	for (id = 0; id < BLK_COUNT; id++) {
577 		block = &hw->block[id];
578 		kfree(block->lf.bmap);
579 	}
580 
581 	/* Free MSIX bitmaps */
582 	for (id = 0; id < hw->total_pfs; id++) {
583 		pfvf = &rvu->pf[id];
584 		kfree(pfvf->msix.bmap);
585 	}
586 
587 	for (id = 0; id < hw->total_vfs; id++) {
588 		pfvf = &rvu->hwvf[id];
589 		kfree(pfvf->msix.bmap);
590 	}
591 
592 	/* Unmap MSIX vector base IOVA mapping */
593 	if (!rvu->msix_base_iova)
594 		return;
595 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
596 	max_msix = cfg & 0xFFFFF;
597 	dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
598 			   max_msix * PCI_MSIX_ENTRY_SIZE,
599 			   DMA_BIDIRECTIONAL, 0);
600 }
601 
602 static int rvu_setup_hw_resources(struct rvu *rvu)
603 {
604 	struct rvu_hwinfo *hw = rvu->hw;
605 	struct rvu_block *block;
606 	int blkid, err;
607 	u64 cfg;
608 
609 	/* Get HW supported max RVU PF & VF count */
610 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
611 	hw->total_pfs = (cfg >> 32) & 0xFF;
612 	hw->total_vfs = (cfg >> 20) & 0xFFF;
613 	hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
614 
615 	/* Init NPA LF's bitmap */
616 	block = &hw->block[BLKADDR_NPA];
617 	if (!block->implemented)
618 		goto nix;
619 	cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
620 	block->lf.max = (cfg >> 16) & 0xFFF;
621 	block->addr = BLKADDR_NPA;
622 	block->type = BLKTYPE_NPA;
623 	block->lfshift = 8;
624 	block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
625 	block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
626 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
627 	block->lfcfg_reg = NPA_PRIV_LFX_CFG;
628 	block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
629 	block->lfreset_reg = NPA_AF_LF_RST;
630 	sprintf(block->name, "NPA");
631 	err = rvu_alloc_bitmap(&block->lf);
632 	if (err)
633 		return err;
634 
635 nix:
636 	/* Init NIX LF's bitmap */
637 	block = &hw->block[BLKADDR_NIX0];
638 	if (!block->implemented)
639 		goto sso;
640 	cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
641 	block->lf.max = cfg & 0xFFF;
642 	block->addr = BLKADDR_NIX0;
643 	block->type = BLKTYPE_NIX;
644 	block->lfshift = 8;
645 	block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
646 	block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
647 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
648 	block->lfcfg_reg = NIX_PRIV_LFX_CFG;
649 	block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
650 	block->lfreset_reg = NIX_AF_LF_RST;
651 	sprintf(block->name, "NIX");
652 	err = rvu_alloc_bitmap(&block->lf);
653 	if (err)
654 		return err;
655 
656 sso:
657 	/* Init SSO group's bitmap */
658 	block = &hw->block[BLKADDR_SSO];
659 	if (!block->implemented)
660 		goto ssow;
661 	cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
662 	block->lf.max = cfg & 0xFFFF;
663 	block->addr = BLKADDR_SSO;
664 	block->type = BLKTYPE_SSO;
665 	block->multislot = true;
666 	block->lfshift = 3;
667 	block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
668 	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
669 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
670 	block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
671 	block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
672 	block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
673 	sprintf(block->name, "SSO GROUP");
674 	err = rvu_alloc_bitmap(&block->lf);
675 	if (err)
676 		return err;
677 
678 ssow:
679 	/* Init SSO workslot's bitmap */
680 	block = &hw->block[BLKADDR_SSOW];
681 	if (!block->implemented)
682 		goto tim;
683 	block->lf.max = (cfg >> 56) & 0xFF;
684 	block->addr = BLKADDR_SSOW;
685 	block->type = BLKTYPE_SSOW;
686 	block->multislot = true;
687 	block->lfshift = 3;
688 	block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
689 	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
690 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
691 	block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
692 	block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
693 	block->lfreset_reg = SSOW_AF_LF_HWS_RST;
694 	sprintf(block->name, "SSOWS");
695 	err = rvu_alloc_bitmap(&block->lf);
696 	if (err)
697 		return err;
698 
699 tim:
700 	/* Init TIM LF's bitmap */
701 	block = &hw->block[BLKADDR_TIM];
702 	if (!block->implemented)
703 		goto cpt;
704 	cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
705 	block->lf.max = cfg & 0xFFFF;
706 	block->addr = BLKADDR_TIM;
707 	block->type = BLKTYPE_TIM;
708 	block->multislot = true;
709 	block->lfshift = 3;
710 	block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
711 	block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
712 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
713 	block->lfcfg_reg = TIM_PRIV_LFX_CFG;
714 	block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
715 	block->lfreset_reg = TIM_AF_LF_RST;
716 	sprintf(block->name, "TIM");
717 	err = rvu_alloc_bitmap(&block->lf);
718 	if (err)
719 		return err;
720 
721 cpt:
722 	/* Init CPT LF's bitmap */
723 	block = &hw->block[BLKADDR_CPT0];
724 	if (!block->implemented)
725 		goto init;
726 	cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
727 	block->lf.max = cfg & 0xFF;
728 	block->addr = BLKADDR_CPT0;
729 	block->type = BLKTYPE_CPT;
730 	block->multislot = true;
731 	block->lfshift = 3;
732 	block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
733 	block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
734 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
735 	block->lfcfg_reg = CPT_PRIV_LFX_CFG;
736 	block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
737 	block->lfreset_reg = CPT_AF_LF_RST;
738 	sprintf(block->name, "CPT");
739 	err = rvu_alloc_bitmap(&block->lf);
740 	if (err)
741 		return err;
742 
743 init:
744 	/* Allocate memory for PFVF data */
745 	rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
746 			       sizeof(struct rvu_pfvf), GFP_KERNEL);
747 	if (!rvu->pf)
748 		return -ENOMEM;
749 
750 	rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
751 				 sizeof(struct rvu_pfvf), GFP_KERNEL);
752 	if (!rvu->hwvf)
753 		return -ENOMEM;
754 
755 	spin_lock_init(&rvu->rsrc_lock);
756 
757 	err = rvu_setup_msix_resources(rvu);
758 	if (err)
759 		return err;
760 
761 	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
762 		block = &hw->block[blkid];
763 		if (!block->lf.bmap)
764 			continue;
765 
766 		/* Allocate memory for block LF/slot to pcifunc mapping info */
767 		block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
768 					     sizeof(u16), GFP_KERNEL);
769 		if (!block->fn_map)
770 			return -ENOMEM;
771 
772 		/* Scan all blocks to check if low level firmware has
773 		 * already provisioned any of the resources to a PF/VF.
774 		 */
775 		rvu_scan_block(rvu, block);
776 	}
777 
778 	err = rvu_npc_init(rvu);
779 	if (err)
780 		return err;
781 
782 	err = rvu_npa_init(rvu);
783 	if (err)
784 		return err;
785 
786 	err = rvu_nix_init(rvu);
787 	if (err)
788 		return err;
789 
790 	return 0;
791 }
792 
793 /* NPA and NIX admin queue APIs */
794 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
795 {
796 	if (!aq)
797 		return;
798 
799 	qmem_free(rvu->dev, aq->inst);
800 	qmem_free(rvu->dev, aq->res);
801 	devm_kfree(rvu->dev, aq);
802 }
803 
804 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
805 		 int qsize, int inst_size, int res_size)
806 {
807 	struct admin_queue *aq;
808 	int err;
809 
810 	*ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
811 	if (!*ad_queue)
812 		return -ENOMEM;
813 	aq = *ad_queue;
814 
815 	/* Alloc memory for instructions i.e AQ */
816 	err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
817 	if (err) {
818 		devm_kfree(rvu->dev, aq);
819 		return err;
820 	}
821 
822 	/* Alloc memory for results */
823 	err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
824 	if (err) {
825 		rvu_aq_free(rvu, aq);
826 		return err;
827 	}
828 
829 	spin_lock_init(&aq->lock);
830 	return 0;
831 }
832 
833 static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req,
834 				  struct ready_msg_rsp *rsp)
835 {
836 	return 0;
837 }
838 
839 /* Get current count of a RVU block's LF/slots
840  * provisioned to a given RVU func.
841  */
842 static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
843 {
844 	switch (blktype) {
845 	case BLKTYPE_NPA:
846 		return pfvf->npalf ? 1 : 0;
847 	case BLKTYPE_NIX:
848 		return pfvf->nixlf ? 1 : 0;
849 	case BLKTYPE_SSO:
850 		return pfvf->sso;
851 	case BLKTYPE_SSOW:
852 		return pfvf->ssow;
853 	case BLKTYPE_TIM:
854 		return pfvf->timlfs;
855 	case BLKTYPE_CPT:
856 		return pfvf->cptlfs;
857 	}
858 	return 0;
859 }
860 
861 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
862 			   int pcifunc, int slot)
863 {
864 	u64 val;
865 
866 	val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
867 	rvu_write64(rvu, block->addr, block->lookup_reg, val);
868 	/* Wait for the lookup to finish */
869 	/* TODO: put some timeout here */
870 	while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
871 		;
872 
873 	val = rvu_read64(rvu, block->addr, block->lookup_reg);
874 
875 	/* Check LF valid bit */
876 	if (!(val & (1ULL << 12)))
877 		return -1;
878 
879 	return (val & 0xFFF);
880 }
881 
882 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
883 {
884 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
885 	struct rvu_hwinfo *hw = rvu->hw;
886 	struct rvu_block *block;
887 	int slot, lf, num_lfs;
888 	int blkaddr;
889 
890 	blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
891 	if (blkaddr < 0)
892 		return;
893 
894 	block = &hw->block[blkaddr];
895 
896 	num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
897 	if (!num_lfs)
898 		return;
899 
900 	for (slot = 0; slot < num_lfs; slot++) {
901 		lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
902 		if (lf < 0) /* This should never happen */
903 			continue;
904 
905 		/* Disable the LF */
906 		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
907 			    (lf << block->lfshift), 0x00ULL);
908 
909 		/* Update SW maintained mapping info as well */
910 		rvu_update_rsrc_map(rvu, pfvf, block,
911 				    pcifunc, lf, false);
912 
913 		/* Free the resource */
914 		rvu_free_rsrc(&block->lf, lf);
915 
916 		/* Clear MSIX vector offset for this LF */
917 		rvu_clear_msix_offset(rvu, pfvf, block, lf);
918 	}
919 }
920 
921 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
922 			    u16 pcifunc)
923 {
924 	struct rvu_hwinfo *hw = rvu->hw;
925 	bool detach_all = true;
926 	struct rvu_block *block;
927 	int blkid;
928 
929 	spin_lock(&rvu->rsrc_lock);
930 
931 	/* Check for partial resource detach */
932 	if (detach && detach->partial)
933 		detach_all = false;
934 
935 	/* Check for RVU block's LFs attached to this func,
936 	 * if so, detach them.
937 	 */
938 	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
939 		block = &hw->block[blkid];
940 		if (!block->lf.bmap)
941 			continue;
942 		if (!detach_all && detach) {
943 			if (blkid == BLKADDR_NPA && !detach->npalf)
944 				continue;
945 			else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
946 				continue;
947 			else if ((blkid == BLKADDR_SSO) && !detach->sso)
948 				continue;
949 			else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
950 				continue;
951 			else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
952 				continue;
953 			else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
954 				continue;
955 		}
956 		rvu_detach_block(rvu, pcifunc, block->type);
957 	}
958 
959 	spin_unlock(&rvu->rsrc_lock);
960 	return 0;
961 }
962 
963 static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu,
964 					     struct rsrc_detach *detach,
965 					     struct msg_rsp *rsp)
966 {
967 	return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
968 }
969 
970 static void rvu_attach_block(struct rvu *rvu, int pcifunc,
971 			     int blktype, int num_lfs)
972 {
973 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
974 	struct rvu_hwinfo *hw = rvu->hw;
975 	struct rvu_block *block;
976 	int slot, lf;
977 	int blkaddr;
978 	u64 cfg;
979 
980 	if (!num_lfs)
981 		return;
982 
983 	blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
984 	if (blkaddr < 0)
985 		return;
986 
987 	block = &hw->block[blkaddr];
988 	if (!block->lf.bmap)
989 		return;
990 
991 	for (slot = 0; slot < num_lfs; slot++) {
992 		/* Allocate the resource */
993 		lf = rvu_alloc_rsrc(&block->lf);
994 		if (lf < 0)
995 			return;
996 
997 		cfg = (1ULL << 63) | (pcifunc << 8) | slot;
998 		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
999 			    (lf << block->lfshift), cfg);
1000 		rvu_update_rsrc_map(rvu, pfvf, block,
1001 				    pcifunc, lf, true);
1002 
1003 		/* Set start MSIX vector for this LF within this PF/VF */
1004 		rvu_set_msix_offset(rvu, pfvf, block, lf);
1005 	}
1006 }
1007 
1008 static int rvu_check_rsrc_availability(struct rvu *rvu,
1009 				       struct rsrc_attach *req, u16 pcifunc)
1010 {
1011 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1012 	struct rvu_hwinfo *hw = rvu->hw;
1013 	struct rvu_block *block;
1014 	int free_lfs, mappedlfs;
1015 
1016 	/* Only one NPA LF can be attached */
1017 	if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
1018 		block = &hw->block[BLKADDR_NPA];
1019 		free_lfs = rvu_rsrc_free_count(&block->lf);
1020 		if (!free_lfs)
1021 			goto fail;
1022 	} else if (req->npalf) {
1023 		dev_err(&rvu->pdev->dev,
1024 			"Func 0x%x: Invalid req, already has NPA\n",
1025 			 pcifunc);
1026 		return -EINVAL;
1027 	}
1028 
1029 	/* Only one NIX LF can be attached */
1030 	if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
1031 		block = &hw->block[BLKADDR_NIX0];
1032 		free_lfs = rvu_rsrc_free_count(&block->lf);
1033 		if (!free_lfs)
1034 			goto fail;
1035 	} else if (req->nixlf) {
1036 		dev_err(&rvu->pdev->dev,
1037 			"Func 0x%x: Invalid req, already has NIX\n",
1038 			pcifunc);
1039 		return -EINVAL;
1040 	}
1041 
1042 	if (req->sso) {
1043 		block = &hw->block[BLKADDR_SSO];
1044 		/* Is request within limits ? */
1045 		if (req->sso > block->lf.max) {
1046 			dev_err(&rvu->pdev->dev,
1047 				"Func 0x%x: Invalid SSO req, %d > max %d\n",
1048 				 pcifunc, req->sso, block->lf.max);
1049 			return -EINVAL;
1050 		}
1051 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1052 		free_lfs = rvu_rsrc_free_count(&block->lf);
1053 		/* Check if additional resources are available */
1054 		if (req->sso > mappedlfs &&
1055 		    ((req->sso - mappedlfs) > free_lfs))
1056 			goto fail;
1057 	}
1058 
1059 	if (req->ssow) {
1060 		block = &hw->block[BLKADDR_SSOW];
1061 		if (req->ssow > block->lf.max) {
1062 			dev_err(&rvu->pdev->dev,
1063 				"Func 0x%x: Invalid SSOW req, %d > max %d\n",
1064 				 pcifunc, req->sso, block->lf.max);
1065 			return -EINVAL;
1066 		}
1067 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1068 		free_lfs = rvu_rsrc_free_count(&block->lf);
1069 		if (req->ssow > mappedlfs &&
1070 		    ((req->ssow - mappedlfs) > free_lfs))
1071 			goto fail;
1072 	}
1073 
1074 	if (req->timlfs) {
1075 		block = &hw->block[BLKADDR_TIM];
1076 		if (req->timlfs > block->lf.max) {
1077 			dev_err(&rvu->pdev->dev,
1078 				"Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1079 				 pcifunc, req->timlfs, block->lf.max);
1080 			return -EINVAL;
1081 		}
1082 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1083 		free_lfs = rvu_rsrc_free_count(&block->lf);
1084 		if (req->timlfs > mappedlfs &&
1085 		    ((req->timlfs - mappedlfs) > free_lfs))
1086 			goto fail;
1087 	}
1088 
1089 	if (req->cptlfs) {
1090 		block = &hw->block[BLKADDR_CPT0];
1091 		if (req->cptlfs > block->lf.max) {
1092 			dev_err(&rvu->pdev->dev,
1093 				"Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1094 				 pcifunc, req->cptlfs, block->lf.max);
1095 			return -EINVAL;
1096 		}
1097 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1098 		free_lfs = rvu_rsrc_free_count(&block->lf);
1099 		if (req->cptlfs > mappedlfs &&
1100 		    ((req->cptlfs - mappedlfs) > free_lfs))
1101 			goto fail;
1102 	}
1103 
1104 	return 0;
1105 
1106 fail:
1107 	dev_info(rvu->dev, "Request for %s failed\n", block->name);
1108 	return -ENOSPC;
1109 }
1110 
1111 static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
1112 					     struct rsrc_attach *attach,
1113 					     struct msg_rsp *rsp)
1114 {
1115 	u16 pcifunc = attach->hdr.pcifunc;
1116 	int err;
1117 
1118 	/* If first request, detach all existing attached resources */
1119 	if (!attach->modify)
1120 		rvu_detach_rsrcs(rvu, NULL, pcifunc);
1121 
1122 	spin_lock(&rvu->rsrc_lock);
1123 
1124 	/* Check if the request can be accommodated */
1125 	err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1126 	if (err)
1127 		goto exit;
1128 
1129 	/* Now attach the requested resources */
1130 	if (attach->npalf)
1131 		rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
1132 
1133 	if (attach->nixlf)
1134 		rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
1135 
1136 	if (attach->sso) {
1137 		/* RVU func doesn't know which exact LF or slot is attached
1138 		 * to it, it always sees as slot 0,1,2. So for a 'modify'
1139 		 * request, simply detach all existing attached LFs/slots
1140 		 * and attach a fresh.
1141 		 */
1142 		if (attach->modify)
1143 			rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1144 		rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
1145 	}
1146 
1147 	if (attach->ssow) {
1148 		if (attach->modify)
1149 			rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1150 		rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
1151 	}
1152 
1153 	if (attach->timlfs) {
1154 		if (attach->modify)
1155 			rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1156 		rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
1157 	}
1158 
1159 	if (attach->cptlfs) {
1160 		if (attach->modify)
1161 			rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1162 		rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
1163 	}
1164 
1165 exit:
1166 	spin_unlock(&rvu->rsrc_lock);
1167 	return err;
1168 }
1169 
1170 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1171 			       int blkaddr, int lf)
1172 {
1173 	u16 vec;
1174 
1175 	if (lf < 0)
1176 		return MSIX_VECTOR_INVALID;
1177 
1178 	for (vec = 0; vec < pfvf->msix.max; vec++) {
1179 		if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1180 			return vec;
1181 	}
1182 	return MSIX_VECTOR_INVALID;
1183 }
1184 
1185 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1186 				struct rvu_block *block, int lf)
1187 {
1188 	u16 nvecs, vec, offset;
1189 	u64 cfg;
1190 
1191 	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1192 			 (lf << block->lfshift));
1193 	nvecs = (cfg >> 12) & 0xFF;
1194 
1195 	/* Check and alloc MSIX vectors, must be contiguous */
1196 	if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1197 		return;
1198 
1199 	offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1200 
1201 	/* Config MSIX offset in LF */
1202 	rvu_write64(rvu, block->addr, block->msixcfg_reg |
1203 		    (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1204 
1205 	/* Update the bitmap as well */
1206 	for (vec = 0; vec < nvecs; vec++)
1207 		pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1208 }
1209 
1210 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1211 				  struct rvu_block *block, int lf)
1212 {
1213 	u16 nvecs, vec, offset;
1214 	u64 cfg;
1215 
1216 	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1217 			 (lf << block->lfshift));
1218 	nvecs = (cfg >> 12) & 0xFF;
1219 
1220 	/* Clear MSIX offset in LF */
1221 	rvu_write64(rvu, block->addr, block->msixcfg_reg |
1222 		    (lf << block->lfshift), cfg & ~0x7FFULL);
1223 
1224 	offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1225 
1226 	/* Update the mapping */
1227 	for (vec = 0; vec < nvecs; vec++)
1228 		pfvf->msix_lfmap[offset + vec] = 0;
1229 
1230 	/* Free the same in MSIX bitmap */
1231 	rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1232 }
1233 
1234 static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
1235 					struct msix_offset_rsp *rsp)
1236 {
1237 	struct rvu_hwinfo *hw = rvu->hw;
1238 	u16 pcifunc = req->hdr.pcifunc;
1239 	struct rvu_pfvf *pfvf;
1240 	int lf, slot;
1241 
1242 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1243 	if (!pfvf->msix.bmap)
1244 		return 0;
1245 
1246 	/* Set MSIX offsets for each block's LFs attached to this PF/VF */
1247 	lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1248 	rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1249 
1250 	lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
1251 	rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
1252 
1253 	rsp->sso = pfvf->sso;
1254 	for (slot = 0; slot < rsp->sso; slot++) {
1255 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1256 		rsp->sso_msixoff[slot] =
1257 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1258 	}
1259 
1260 	rsp->ssow = pfvf->ssow;
1261 	for (slot = 0; slot < rsp->ssow; slot++) {
1262 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1263 		rsp->ssow_msixoff[slot] =
1264 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1265 	}
1266 
1267 	rsp->timlfs = pfvf->timlfs;
1268 	for (slot = 0; slot < rsp->timlfs; slot++) {
1269 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1270 		rsp->timlf_msixoff[slot] =
1271 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1272 	}
1273 
1274 	rsp->cptlfs = pfvf->cptlfs;
1275 	for (slot = 0; slot < rsp->cptlfs; slot++) {
1276 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1277 		rsp->cptlf_msixoff[slot] =
1278 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1279 	}
1280 	return 0;
1281 }
1282 
1283 static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
1284 				struct mbox_msghdr *req)
1285 {
1286 	/* Check if valid, if not reply with a invalid msg */
1287 	if (req->sig != OTX2_MBOX_REQ_SIG)
1288 		goto bad_message;
1289 
1290 	switch (req->id) {
1291 #define M(_name, _id, _req_type, _rsp_type)				\
1292 	case _id: {							\
1293 		struct _rsp_type *rsp;					\
1294 		int err;						\
1295 									\
1296 		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
1297 			&rvu->mbox, devid,				\
1298 			sizeof(struct _rsp_type));			\
1299 		if (rsp) {						\
1300 			rsp->hdr.id = _id;				\
1301 			rsp->hdr.sig = OTX2_MBOX_RSP_SIG;		\
1302 			rsp->hdr.pcifunc = req->pcifunc;		\
1303 			rsp->hdr.rc = 0;				\
1304 		}							\
1305 									\
1306 		err = rvu_mbox_handler_ ## _name(rvu,			\
1307 						 (struct _req_type *)req, \
1308 						 rsp);			\
1309 		if (rsp && err)						\
1310 			rsp->hdr.rc = err;				\
1311 									\
1312 		return rsp ? err : -ENOMEM;				\
1313 	}
1314 MBOX_MESSAGES
1315 #undef M
1316 		break;
1317 bad_message:
1318 	default:
1319 		otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc,
1320 				       req->id);
1321 		return -ENODEV;
1322 	}
1323 }
1324 
1325 static void rvu_mbox_handler(struct work_struct *work)
1326 {
1327 	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1328 	struct rvu *rvu = mwork->rvu;
1329 	struct otx2_mbox_dev *mdev;
1330 	struct mbox_hdr *req_hdr;
1331 	struct mbox_msghdr *msg;
1332 	struct otx2_mbox *mbox;
1333 	int offset, id, err;
1334 	u16 pf;
1335 
1336 	mbox = &rvu->mbox;
1337 	pf = mwork - rvu->mbox_wrk;
1338 	mdev = &mbox->dev[pf];
1339 
1340 	/* Process received mbox messages */
1341 	req_hdr = mdev->mbase + mbox->rx_start;
1342 	if (req_hdr->num_msgs == 0)
1343 		return;
1344 
1345 	offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
1346 
1347 	for (id = 0; id < req_hdr->num_msgs; id++) {
1348 		msg = mdev->mbase + offset;
1349 
1350 		/* Set which PF sent this message based on mbox IRQ */
1351 		msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
1352 		msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT);
1353 		err = rvu_process_mbox_msg(rvu, pf, msg);
1354 		if (!err) {
1355 			offset = mbox->rx_start + msg->next_msgoff;
1356 			continue;
1357 		}
1358 
1359 		if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
1360 			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
1361 				 err, otx2_mbox_id2name(msg->id), msg->id, pf,
1362 				 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1363 		else
1364 			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
1365 				 err, otx2_mbox_id2name(msg->id), msg->id, pf);
1366 	}
1367 
1368 	/* Send mbox responses to PF */
1369 	otx2_mbox_msg_send(mbox, pf);
1370 }
1371 
1372 static void rvu_mbox_up_handler(struct work_struct *work)
1373 {
1374 	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1375 	struct rvu *rvu = mwork->rvu;
1376 	struct otx2_mbox_dev *mdev;
1377 	struct mbox_hdr *rsp_hdr;
1378 	struct mbox_msghdr *msg;
1379 	struct otx2_mbox *mbox;
1380 	int offset, id;
1381 	u16 pf;
1382 
1383 	mbox = &rvu->mbox_up;
1384 	pf = mwork - rvu->mbox_wrk_up;
1385 	mdev = &mbox->dev[pf];
1386 
1387 	rsp_hdr = mdev->mbase + mbox->rx_start;
1388 	if (rsp_hdr->num_msgs == 0) {
1389 		dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
1390 		return;
1391 	}
1392 
1393 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
1394 
1395 	for (id = 0; id < rsp_hdr->num_msgs; id++) {
1396 		msg = mdev->mbase + offset;
1397 
1398 		if (msg->id >= MBOX_MSG_MAX) {
1399 			dev_err(rvu->dev,
1400 				"Mbox msg with unknown ID 0x%x\n", msg->id);
1401 			goto end;
1402 		}
1403 
1404 		if (msg->sig != OTX2_MBOX_RSP_SIG) {
1405 			dev_err(rvu->dev,
1406 				"Mbox msg with wrong signature %x, ID 0x%x\n",
1407 				msg->sig, msg->id);
1408 			goto end;
1409 		}
1410 
1411 		switch (msg->id) {
1412 		case MBOX_MSG_CGX_LINK_EVENT:
1413 			break;
1414 		default:
1415 			if (msg->rc)
1416 				dev_err(rvu->dev,
1417 					"Mbox msg response has err %d, ID 0x%x\n",
1418 					msg->rc, msg->id);
1419 			break;
1420 		}
1421 end:
1422 		offset = mbox->rx_start + msg->next_msgoff;
1423 		mdev->msgs_acked++;
1424 	}
1425 
1426 	otx2_mbox_reset(mbox, 0);
1427 }
1428 
1429 static int rvu_mbox_init(struct rvu *rvu)
1430 {
1431 	struct rvu_hwinfo *hw = rvu->hw;
1432 	void __iomem *hwbase = NULL;
1433 	struct rvu_work *mwork;
1434 	u64 bar4_addr;
1435 	int err, pf;
1436 
1437 	rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox",
1438 				       WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
1439 				       hw->total_pfs);
1440 	if (!rvu->mbox_wq)
1441 		return -ENOMEM;
1442 
1443 	rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs,
1444 				     sizeof(struct rvu_work), GFP_KERNEL);
1445 	if (!rvu->mbox_wrk) {
1446 		err = -ENOMEM;
1447 		goto exit;
1448 	}
1449 
1450 	rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs,
1451 					sizeof(struct rvu_work), GFP_KERNEL);
1452 	if (!rvu->mbox_wrk_up) {
1453 		err = -ENOMEM;
1454 		goto exit;
1455 	}
1456 
1457 	/* Map mbox region shared with PFs */
1458 	bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
1459 	/* Mailbox is a reserved memory (in RAM) region shared between
1460 	 * RVU devices, shouldn't be mapped as device memory to allow
1461 	 * unaligned accesses.
1462 	 */
1463 	hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs);
1464 	if (!hwbase) {
1465 		dev_err(rvu->dev, "Unable to map mailbox region\n");
1466 		err = -ENOMEM;
1467 		goto exit;
1468 	}
1469 
1470 	err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base,
1471 			     MBOX_DIR_AFPF, hw->total_pfs);
1472 	if (err)
1473 		goto exit;
1474 
1475 	err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base,
1476 			     MBOX_DIR_AFPF_UP, hw->total_pfs);
1477 	if (err)
1478 		goto exit;
1479 
1480 	for (pf = 0; pf < hw->total_pfs; pf++) {
1481 		mwork = &rvu->mbox_wrk[pf];
1482 		mwork->rvu = rvu;
1483 		INIT_WORK(&mwork->work, rvu_mbox_handler);
1484 	}
1485 
1486 	for (pf = 0; pf < hw->total_pfs; pf++) {
1487 		mwork = &rvu->mbox_wrk_up[pf];
1488 		mwork->rvu = rvu;
1489 		INIT_WORK(&mwork->work, rvu_mbox_up_handler);
1490 	}
1491 
1492 	return 0;
1493 exit:
1494 	if (hwbase)
1495 		iounmap((void __iomem *)hwbase);
1496 	destroy_workqueue(rvu->mbox_wq);
1497 	return err;
1498 }
1499 
1500 static void rvu_mbox_destroy(struct rvu *rvu)
1501 {
1502 	if (rvu->mbox_wq) {
1503 		flush_workqueue(rvu->mbox_wq);
1504 		destroy_workqueue(rvu->mbox_wq);
1505 		rvu->mbox_wq = NULL;
1506 	}
1507 
1508 	if (rvu->mbox.hwbase)
1509 		iounmap((void __iomem *)rvu->mbox.hwbase);
1510 
1511 	otx2_mbox_destroy(&rvu->mbox);
1512 	otx2_mbox_destroy(&rvu->mbox_up);
1513 }
1514 
1515 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
1516 {
1517 	struct rvu *rvu = (struct rvu *)rvu_irq;
1518 	struct otx2_mbox_dev *mdev;
1519 	struct otx2_mbox *mbox;
1520 	struct mbox_hdr *hdr;
1521 	u64 intr;
1522 	u8  pf;
1523 
1524 	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
1525 	/* Clear interrupts */
1526 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
1527 
1528 	/* Sync with mbox memory region */
1529 	smp_wmb();
1530 
1531 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
1532 		if (intr & (1ULL << pf)) {
1533 			mbox = &rvu->mbox;
1534 			mdev = &mbox->dev[pf];
1535 			hdr = mdev->mbase + mbox->rx_start;
1536 			if (hdr->num_msgs)
1537 				queue_work(rvu->mbox_wq,
1538 					   &rvu->mbox_wrk[pf].work);
1539 			mbox = &rvu->mbox_up;
1540 			mdev = &mbox->dev[pf];
1541 			hdr = mdev->mbase + mbox->rx_start;
1542 			if (hdr->num_msgs)
1543 				queue_work(rvu->mbox_wq,
1544 					   &rvu->mbox_wrk_up[pf].work);
1545 		}
1546 	}
1547 
1548 	return IRQ_HANDLED;
1549 }
1550 
1551 static void rvu_enable_mbox_intr(struct rvu *rvu)
1552 {
1553 	struct rvu_hwinfo *hw = rvu->hw;
1554 
1555 	/* Clear spurious irqs, if any */
1556 	rvu_write64(rvu, BLKADDR_RVUM,
1557 		    RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
1558 
1559 	/* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
1560 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
1561 		    INTR_MASK(hw->total_pfs) & ~1ULL);
1562 }
1563 
1564 static void rvu_unregister_interrupts(struct rvu *rvu)
1565 {
1566 	int irq;
1567 
1568 	/* Disable the Mbox interrupt */
1569 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
1570 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
1571 
1572 	for (irq = 0; irq < rvu->num_vec; irq++) {
1573 		if (rvu->irq_allocated[irq])
1574 			free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
1575 	}
1576 
1577 	pci_free_irq_vectors(rvu->pdev);
1578 	rvu->num_vec = 0;
1579 }
1580 
1581 static int rvu_register_interrupts(struct rvu *rvu)
1582 {
1583 	int ret;
1584 
1585 	rvu->num_vec = pci_msix_vec_count(rvu->pdev);
1586 
1587 	rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
1588 					   NAME_SIZE, GFP_KERNEL);
1589 	if (!rvu->irq_name)
1590 		return -ENOMEM;
1591 
1592 	rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
1593 					  sizeof(bool), GFP_KERNEL);
1594 	if (!rvu->irq_allocated)
1595 		return -ENOMEM;
1596 
1597 	/* Enable MSI-X */
1598 	ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
1599 				    rvu->num_vec, PCI_IRQ_MSIX);
1600 	if (ret < 0) {
1601 		dev_err(rvu->dev,
1602 			"RVUAF: Request for %d msix vectors failed, ret %d\n",
1603 			rvu->num_vec, ret);
1604 		return ret;
1605 	}
1606 
1607 	/* Register mailbox interrupt handler */
1608 	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
1609 	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
1610 			  rvu_mbox_intr_handler, 0,
1611 			  &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
1612 	if (ret) {
1613 		dev_err(rvu->dev,
1614 			"RVUAF: IRQ registration failed for mbox irq\n");
1615 		goto fail;
1616 	}
1617 
1618 	rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
1619 
1620 	/* Enable mailbox interrupts from all PFs */
1621 	rvu_enable_mbox_intr(rvu);
1622 
1623 	return 0;
1624 
1625 fail:
1626 	pci_free_irq_vectors(rvu->pdev);
1627 	return ret;
1628 }
1629 
1630 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1631 {
1632 	struct device *dev = &pdev->dev;
1633 	struct rvu *rvu;
1634 	int    err;
1635 
1636 	rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
1637 	if (!rvu)
1638 		return -ENOMEM;
1639 
1640 	rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
1641 	if (!rvu->hw) {
1642 		devm_kfree(dev, rvu);
1643 		return -ENOMEM;
1644 	}
1645 
1646 	pci_set_drvdata(pdev, rvu);
1647 	rvu->pdev = pdev;
1648 	rvu->dev = &pdev->dev;
1649 
1650 	err = pci_enable_device(pdev);
1651 	if (err) {
1652 		dev_err(dev, "Failed to enable PCI device\n");
1653 		goto err_freemem;
1654 	}
1655 
1656 	err = pci_request_regions(pdev, DRV_NAME);
1657 	if (err) {
1658 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
1659 		goto err_disable_device;
1660 	}
1661 
1662 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
1663 	if (err) {
1664 		dev_err(dev, "Unable to set DMA mask\n");
1665 		goto err_release_regions;
1666 	}
1667 
1668 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
1669 	if (err) {
1670 		dev_err(dev, "Unable to set consistent DMA mask\n");
1671 		goto err_release_regions;
1672 	}
1673 
1674 	/* Map Admin function CSRs */
1675 	rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
1676 	rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
1677 	if (!rvu->afreg_base || !rvu->pfreg_base) {
1678 		dev_err(dev, "Unable to map admin function CSRs, aborting\n");
1679 		err = -ENOMEM;
1680 		goto err_release_regions;
1681 	}
1682 
1683 	/* Check which blocks the HW supports */
1684 	rvu_check_block_implemented(rvu);
1685 
1686 	rvu_reset_all_blocks(rvu);
1687 
1688 	err = rvu_setup_hw_resources(rvu);
1689 	if (err)
1690 		goto err_release_regions;
1691 
1692 	err = rvu_mbox_init(rvu);
1693 	if (err)
1694 		goto err_hwsetup;
1695 
1696 	err = rvu_cgx_probe(rvu);
1697 	if (err)
1698 		goto err_mbox;
1699 
1700 	err = rvu_register_interrupts(rvu);
1701 	if (err)
1702 		goto err_cgx;
1703 
1704 	return 0;
1705 err_cgx:
1706 	rvu_cgx_wq_destroy(rvu);
1707 err_mbox:
1708 	rvu_mbox_destroy(rvu);
1709 err_hwsetup:
1710 	rvu_reset_all_blocks(rvu);
1711 	rvu_free_hw_resources(rvu);
1712 err_release_regions:
1713 	pci_release_regions(pdev);
1714 err_disable_device:
1715 	pci_disable_device(pdev);
1716 err_freemem:
1717 	pci_set_drvdata(pdev, NULL);
1718 	devm_kfree(&pdev->dev, rvu->hw);
1719 	devm_kfree(dev, rvu);
1720 	return err;
1721 }
1722 
1723 static void rvu_remove(struct pci_dev *pdev)
1724 {
1725 	struct rvu *rvu = pci_get_drvdata(pdev);
1726 
1727 	rvu_unregister_interrupts(rvu);
1728 	rvu_cgx_wq_destroy(rvu);
1729 	rvu_mbox_destroy(rvu);
1730 	rvu_reset_all_blocks(rvu);
1731 	rvu_free_hw_resources(rvu);
1732 
1733 	pci_release_regions(pdev);
1734 	pci_disable_device(pdev);
1735 	pci_set_drvdata(pdev, NULL);
1736 
1737 	devm_kfree(&pdev->dev, rvu->hw);
1738 	devm_kfree(&pdev->dev, rvu);
1739 }
1740 
1741 static struct pci_driver rvu_driver = {
1742 	.name = DRV_NAME,
1743 	.id_table = rvu_id_table,
1744 	.probe = rvu_probe,
1745 	.remove = rvu_remove,
1746 };
1747 
1748 static int __init rvu_init_module(void)
1749 {
1750 	int err;
1751 
1752 	pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
1753 
1754 	err = pci_register_driver(&cgx_driver);
1755 	if (err < 0)
1756 		return err;
1757 
1758 	err =  pci_register_driver(&rvu_driver);
1759 	if (err < 0)
1760 		pci_unregister_driver(&cgx_driver);
1761 
1762 	return err;
1763 }
1764 
1765 static void __exit rvu_cleanup_module(void)
1766 {
1767 	pci_unregister_driver(&rvu_driver);
1768 	pci_unregister_driver(&cgx_driver);
1769 }
1770 
1771 module_init(rvu_init_module);
1772 module_exit(rvu_cleanup_module);
1773