1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/delay.h>
14 #include <linux/irq.h>
15 #include <linux/pci.h>
16 #include <linux/sysfs.h>
17 
18 #include "cgx.h"
19 #include "rvu.h"
20 #include "rvu_reg.h"
21 #include "ptp.h"
22 
23 #include "rvu_trace.h"
24 
25 #define DRV_NAME	"rvu_af"
26 #define DRV_STRING      "Marvell OcteonTX2 RVU Admin Function Driver"
27 
28 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
29 
30 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
31 				struct rvu_block *block, int lf);
32 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
33 				  struct rvu_block *block, int lf);
34 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
35 
36 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
37 			 int type, int num,
38 			 void (mbox_handler)(struct work_struct *),
39 			 void (mbox_up_handler)(struct work_struct *));
40 enum {
41 	TYPE_AFVF,
42 	TYPE_AFPF,
43 };
44 
45 /* Supported devices */
46 static const struct pci_device_id rvu_id_table[] = {
47 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
48 	{ 0, }  /* end of table */
49 };
50 
51 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
52 MODULE_DESCRIPTION(DRV_STRING);
53 MODULE_LICENSE("GPL v2");
54 MODULE_DEVICE_TABLE(pci, rvu_id_table);
55 
56 static char *mkex_profile; /* MKEX profile name */
57 module_param(mkex_profile, charp, 0000);
58 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
59 
60 static char *kpu_profile; /* KPU profile name */
61 module_param(kpu_profile, charp, 0000);
62 MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
63 
64 static void rvu_setup_hw_capabilities(struct rvu *rvu)
65 {
66 	struct rvu_hwinfo *hw = rvu->hw;
67 
68 	hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
69 	hw->cap.nix_fixed_txschq_mapping = false;
70 	hw->cap.nix_shaping = true;
71 	hw->cap.nix_tx_link_bp = true;
72 	hw->cap.nix_rx_multicast = true;
73 	hw->cap.nix_shaper_toggle_wait = false;
74 	hw->rvu = rvu;
75 
76 	if (is_rvu_pre_96xx_C0(rvu)) {
77 		hw->cap.nix_fixed_txschq_mapping = true;
78 		hw->cap.nix_txsch_per_cgx_lmac = 4;
79 		hw->cap.nix_txsch_per_lbk_lmac = 132;
80 		hw->cap.nix_txsch_per_sdp_lmac = 76;
81 		hw->cap.nix_shaping = false;
82 		hw->cap.nix_tx_link_bp = false;
83 		if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
84 			hw->cap.nix_rx_multicast = false;
85 	}
86 	if (!is_rvu_pre_96xx_C0(rvu))
87 		hw->cap.nix_shaper_toggle_wait = true;
88 
89 	if (!is_rvu_otx2(rvu))
90 		hw->cap.per_pf_mbox_regs = true;
91 }
92 
93 /* Poll a RVU block's register 'offset', for a 'zero'
94  * or 'nonzero' at bits specified by 'mask'
95  */
96 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
97 {
98 	unsigned long timeout = jiffies + usecs_to_jiffies(10000);
99 	void __iomem *reg;
100 	u64 reg_val;
101 
102 	reg = rvu->afreg_base + ((block << 28) | offset);
103 again:
104 	reg_val = readq(reg);
105 	if (zero && !(reg_val & mask))
106 		return 0;
107 	if (!zero && (reg_val & mask))
108 		return 0;
109 	if (time_before(jiffies, timeout)) {
110 		usleep_range(1, 5);
111 		goto again;
112 	}
113 	return -EBUSY;
114 }
115 
116 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
117 {
118 	int id;
119 
120 	if (!rsrc->bmap)
121 		return -EINVAL;
122 
123 	id = find_first_zero_bit(rsrc->bmap, rsrc->max);
124 	if (id >= rsrc->max)
125 		return -ENOSPC;
126 
127 	__set_bit(id, rsrc->bmap);
128 
129 	return id;
130 }
131 
132 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
133 {
134 	int start;
135 
136 	if (!rsrc->bmap)
137 		return -EINVAL;
138 
139 	start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
140 	if (start >= rsrc->max)
141 		return -ENOSPC;
142 
143 	bitmap_set(rsrc->bmap, start, nrsrc);
144 	return start;
145 }
146 
147 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
148 {
149 	if (!rsrc->bmap)
150 		return;
151 	if (start >= rsrc->max)
152 		return;
153 
154 	bitmap_clear(rsrc->bmap, start, nrsrc);
155 }
156 
157 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
158 {
159 	int start;
160 
161 	if (!rsrc->bmap)
162 		return false;
163 
164 	start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
165 	if (start >= rsrc->max)
166 		return false;
167 
168 	return true;
169 }
170 
171 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
172 {
173 	if (!rsrc->bmap)
174 		return;
175 
176 	__clear_bit(id, rsrc->bmap);
177 }
178 
179 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
180 {
181 	int used;
182 
183 	if (!rsrc->bmap)
184 		return 0;
185 
186 	used = bitmap_weight(rsrc->bmap, rsrc->max);
187 	return (rsrc->max - used);
188 }
189 
190 bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
191 {
192 	if (!rsrc->bmap)
193 		return false;
194 
195 	return !test_bit(id, rsrc->bmap);
196 }
197 
198 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
199 {
200 	rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
201 			     sizeof(long), GFP_KERNEL);
202 	if (!rsrc->bmap)
203 		return -ENOMEM;
204 	return 0;
205 }
206 
207 /* Get block LF's HW index from a PF_FUNC's block slot number */
208 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
209 {
210 	u16 match = 0;
211 	int lf;
212 
213 	mutex_lock(&rvu->rsrc_lock);
214 	for (lf = 0; lf < block->lf.max; lf++) {
215 		if (block->fn_map[lf] == pcifunc) {
216 			if (slot == match) {
217 				mutex_unlock(&rvu->rsrc_lock);
218 				return lf;
219 			}
220 			match++;
221 		}
222 	}
223 	mutex_unlock(&rvu->rsrc_lock);
224 	return -ENODEV;
225 }
226 
227 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
228  * Some silicon variants of OcteonTX2 supports
229  * multiple blocks of same type.
230  *
231  * @pcifunc has to be zero when no LF is yet attached.
232  *
233  * For a pcifunc if LFs are attached from multiple blocks of same type, then
234  * return blkaddr of first encountered block.
235  */
236 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
237 {
238 	int devnum, blkaddr = -ENODEV;
239 	u64 cfg, reg;
240 	bool is_pf;
241 
242 	switch (blktype) {
243 	case BLKTYPE_NPC:
244 		blkaddr = BLKADDR_NPC;
245 		goto exit;
246 	case BLKTYPE_NPA:
247 		blkaddr = BLKADDR_NPA;
248 		goto exit;
249 	case BLKTYPE_NIX:
250 		/* For now assume NIX0 */
251 		if (!pcifunc) {
252 			blkaddr = BLKADDR_NIX0;
253 			goto exit;
254 		}
255 		break;
256 	case BLKTYPE_SSO:
257 		blkaddr = BLKADDR_SSO;
258 		goto exit;
259 	case BLKTYPE_SSOW:
260 		blkaddr = BLKADDR_SSOW;
261 		goto exit;
262 	case BLKTYPE_TIM:
263 		blkaddr = BLKADDR_TIM;
264 		goto exit;
265 	case BLKTYPE_CPT:
266 		/* For now assume CPT0 */
267 		if (!pcifunc) {
268 			blkaddr = BLKADDR_CPT0;
269 			goto exit;
270 		}
271 		break;
272 	}
273 
274 	/* Check if this is a RVU PF or VF */
275 	if (pcifunc & RVU_PFVF_FUNC_MASK) {
276 		is_pf = false;
277 		devnum = rvu_get_hwvf(rvu, pcifunc);
278 	} else {
279 		is_pf = true;
280 		devnum = rvu_get_pf(pcifunc);
281 	}
282 
283 	/* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
284 	 * 'BLKADDR_NIX1'.
285 	 */
286 	if (blktype == BLKTYPE_NIX) {
287 		reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
288 			RVU_PRIV_HWVFX_NIXX_CFG(0);
289 		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
290 		if (cfg) {
291 			blkaddr = BLKADDR_NIX0;
292 			goto exit;
293 		}
294 
295 		reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
296 			RVU_PRIV_HWVFX_NIXX_CFG(1);
297 		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
298 		if (cfg)
299 			blkaddr = BLKADDR_NIX1;
300 	}
301 
302 	if (blktype == BLKTYPE_CPT) {
303 		reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
304 			RVU_PRIV_HWVFX_CPTX_CFG(0);
305 		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
306 		if (cfg) {
307 			blkaddr = BLKADDR_CPT0;
308 			goto exit;
309 		}
310 
311 		reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
312 			RVU_PRIV_HWVFX_CPTX_CFG(1);
313 		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
314 		if (cfg)
315 			blkaddr = BLKADDR_CPT1;
316 	}
317 
318 exit:
319 	if (is_block_implemented(rvu->hw, blkaddr))
320 		return blkaddr;
321 	return -ENODEV;
322 }
323 
324 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
325 				struct rvu_block *block, u16 pcifunc,
326 				u16 lf, bool attach)
327 {
328 	int devnum, num_lfs = 0;
329 	bool is_pf;
330 	u64 reg;
331 
332 	if (lf >= block->lf.max) {
333 		dev_err(&rvu->pdev->dev,
334 			"%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
335 			__func__, lf, block->name, block->lf.max);
336 		return;
337 	}
338 
339 	/* Check if this is for a RVU PF or VF */
340 	if (pcifunc & RVU_PFVF_FUNC_MASK) {
341 		is_pf = false;
342 		devnum = rvu_get_hwvf(rvu, pcifunc);
343 	} else {
344 		is_pf = true;
345 		devnum = rvu_get_pf(pcifunc);
346 	}
347 
348 	block->fn_map[lf] = attach ? pcifunc : 0;
349 
350 	switch (block->addr) {
351 	case BLKADDR_NPA:
352 		pfvf->npalf = attach ? true : false;
353 		num_lfs = pfvf->npalf;
354 		break;
355 	case BLKADDR_NIX0:
356 	case BLKADDR_NIX1:
357 		pfvf->nixlf = attach ? true : false;
358 		num_lfs = pfvf->nixlf;
359 		break;
360 	case BLKADDR_SSO:
361 		attach ? pfvf->sso++ : pfvf->sso--;
362 		num_lfs = pfvf->sso;
363 		break;
364 	case BLKADDR_SSOW:
365 		attach ? pfvf->ssow++ : pfvf->ssow--;
366 		num_lfs = pfvf->ssow;
367 		break;
368 	case BLKADDR_TIM:
369 		attach ? pfvf->timlfs++ : pfvf->timlfs--;
370 		num_lfs = pfvf->timlfs;
371 		break;
372 	case BLKADDR_CPT0:
373 		attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
374 		num_lfs = pfvf->cptlfs;
375 		break;
376 	case BLKADDR_CPT1:
377 		attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
378 		num_lfs = pfvf->cpt1_lfs;
379 		break;
380 	}
381 
382 	reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
383 	rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
384 }
385 
386 inline int rvu_get_pf(u16 pcifunc)
387 {
388 	return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
389 }
390 
391 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
392 {
393 	u64 cfg;
394 
395 	/* Get numVFs attached to this PF and first HWVF */
396 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
397 	if (numvfs)
398 		*numvfs = (cfg >> 12) & 0xFF;
399 	if (hwvf)
400 		*hwvf = cfg & 0xFFF;
401 }
402 
403 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
404 {
405 	int pf, func;
406 	u64 cfg;
407 
408 	pf = rvu_get_pf(pcifunc);
409 	func = pcifunc & RVU_PFVF_FUNC_MASK;
410 
411 	/* Get first HWVF attached to this PF */
412 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
413 
414 	return ((cfg & 0xFFF) + func - 1);
415 }
416 
417 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
418 {
419 	/* Check if it is a PF or VF */
420 	if (pcifunc & RVU_PFVF_FUNC_MASK)
421 		return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
422 	else
423 		return &rvu->pf[rvu_get_pf(pcifunc)];
424 }
425 
426 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
427 {
428 	int pf, vf, nvfs;
429 	u64 cfg;
430 
431 	pf = rvu_get_pf(pcifunc);
432 	if (pf >= rvu->hw->total_pfs)
433 		return false;
434 
435 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
436 		return true;
437 
438 	/* Check if VF is within number of VFs attached to this PF */
439 	vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
440 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
441 	nvfs = (cfg >> 12) & 0xFF;
442 	if (vf >= nvfs)
443 		return false;
444 
445 	return true;
446 }
447 
448 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
449 {
450 	struct rvu_block *block;
451 
452 	if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
453 		return false;
454 
455 	block = &hw->block[blkaddr];
456 	return block->implemented;
457 }
458 
459 static void rvu_check_block_implemented(struct rvu *rvu)
460 {
461 	struct rvu_hwinfo *hw = rvu->hw;
462 	struct rvu_block *block;
463 	int blkid;
464 	u64 cfg;
465 
466 	/* For each block check if 'implemented' bit is set */
467 	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
468 		block = &hw->block[blkid];
469 		cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
470 		if (cfg & BIT_ULL(11))
471 			block->implemented = true;
472 	}
473 }
474 
475 static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
476 {
477 	rvu_write64(rvu, BLKADDR_RVUM,
478 		    RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
479 		    RVU_BLK_RVUM_REVID);
480 }
481 
482 static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
483 {
484 	rvu_write64(rvu, BLKADDR_RVUM,
485 		    RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
486 }
487 
488 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
489 {
490 	int err;
491 
492 	if (!block->implemented)
493 		return 0;
494 
495 	rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
496 	err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
497 			   true);
498 	return err;
499 }
500 
501 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
502 {
503 	struct rvu_block *block = &rvu->hw->block[blkaddr];
504 	int err;
505 
506 	if (!block->implemented)
507 		return;
508 
509 	rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
510 	err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
511 	if (err)
512 		dev_err(rvu->dev, "HW block:%d reset failed\n", blkaddr);
513 }
514 
515 static void rvu_reset_all_blocks(struct rvu *rvu)
516 {
517 	/* Do a HW reset of all RVU blocks */
518 	rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
519 	rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
520 	rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
521 	rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
522 	rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
523 	rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
524 	rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
525 	rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
526 	rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
527 	rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
528 	rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
529 	rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
530 	rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
531 }
532 
533 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
534 {
535 	struct rvu_pfvf *pfvf;
536 	u64 cfg;
537 	int lf;
538 
539 	for (lf = 0; lf < block->lf.max; lf++) {
540 		cfg = rvu_read64(rvu, block->addr,
541 				 block->lfcfg_reg | (lf << block->lfshift));
542 		if (!(cfg & BIT_ULL(63)))
543 			continue;
544 
545 		/* Set this resource as being used */
546 		__set_bit(lf, block->lf.bmap);
547 
548 		/* Get, to whom this LF is attached */
549 		pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
550 		rvu_update_rsrc_map(rvu, pfvf, block,
551 				    (cfg >> 8) & 0xFFFF, lf, true);
552 
553 		/* Set start MSIX vector for this LF within this PF/VF */
554 		rvu_set_msix_offset(rvu, pfvf, block, lf);
555 	}
556 }
557 
558 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
559 {
560 	int min_vecs;
561 
562 	if (!vf)
563 		goto check_pf;
564 
565 	if (!nvecs) {
566 		dev_warn(rvu->dev,
567 			 "PF%d:VF%d is configured with zero msix vectors, %d\n",
568 			 pf, vf - 1, nvecs);
569 	}
570 	return;
571 
572 check_pf:
573 	if (pf == 0)
574 		min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
575 	else
576 		min_vecs = RVU_PF_INT_VEC_CNT;
577 
578 	if (!(nvecs < min_vecs))
579 		return;
580 	dev_warn(rvu->dev,
581 		 "PF%d is configured with too few vectors, %d, min is %d\n",
582 		 pf, nvecs, min_vecs);
583 }
584 
585 static int rvu_setup_msix_resources(struct rvu *rvu)
586 {
587 	struct rvu_hwinfo *hw = rvu->hw;
588 	int pf, vf, numvfs, hwvf, err;
589 	int nvecs, offset, max_msix;
590 	struct rvu_pfvf *pfvf;
591 	u64 cfg, phy_addr;
592 	dma_addr_t iova;
593 
594 	for (pf = 0; pf < hw->total_pfs; pf++) {
595 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
596 		/* If PF is not enabled, nothing to do */
597 		if (!((cfg >> 20) & 0x01))
598 			continue;
599 
600 		rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
601 
602 		pfvf = &rvu->pf[pf];
603 		/* Get num of MSIX vectors attached to this PF */
604 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
605 		pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
606 		rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
607 
608 		/* Alloc msix bitmap for this PF */
609 		err = rvu_alloc_bitmap(&pfvf->msix);
610 		if (err)
611 			return err;
612 
613 		/* Allocate memory for MSIX vector to RVU block LF mapping */
614 		pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
615 						sizeof(u16), GFP_KERNEL);
616 		if (!pfvf->msix_lfmap)
617 			return -ENOMEM;
618 
619 		/* For PF0 (AF) firmware will set msix vector offsets for
620 		 * AF, block AF and PF0_INT vectors, so jump to VFs.
621 		 */
622 		if (!pf)
623 			goto setup_vfmsix;
624 
625 		/* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
626 		 * These are allocated on driver init and never freed,
627 		 * so no need to set 'msix_lfmap' for these.
628 		 */
629 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
630 		nvecs = (cfg >> 12) & 0xFF;
631 		cfg &= ~0x7FFULL;
632 		offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
633 		rvu_write64(rvu, BLKADDR_RVUM,
634 			    RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
635 setup_vfmsix:
636 		/* Alloc msix bitmap for VFs */
637 		for (vf = 0; vf < numvfs; vf++) {
638 			pfvf =  &rvu->hwvf[hwvf + vf];
639 			/* Get num of MSIX vectors attached to this VF */
640 			cfg = rvu_read64(rvu, BLKADDR_RVUM,
641 					 RVU_PRIV_PFX_MSIX_CFG(pf));
642 			pfvf->msix.max = (cfg & 0xFFF) + 1;
643 			rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
644 
645 			/* Alloc msix bitmap for this VF */
646 			err = rvu_alloc_bitmap(&pfvf->msix);
647 			if (err)
648 				return err;
649 
650 			pfvf->msix_lfmap =
651 				devm_kcalloc(rvu->dev, pfvf->msix.max,
652 					     sizeof(u16), GFP_KERNEL);
653 			if (!pfvf->msix_lfmap)
654 				return -ENOMEM;
655 
656 			/* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
657 			 * These are allocated on driver init and never freed,
658 			 * so no need to set 'msix_lfmap' for these.
659 			 */
660 			cfg = rvu_read64(rvu, BLKADDR_RVUM,
661 					 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
662 			nvecs = (cfg >> 12) & 0xFF;
663 			cfg &= ~0x7FFULL;
664 			offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
665 			rvu_write64(rvu, BLKADDR_RVUM,
666 				    RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
667 				    cfg | offset);
668 		}
669 	}
670 
671 	/* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
672 	 * create an IOMMU mapping for the physical address configured by
673 	 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
674 	 */
675 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
676 	max_msix = cfg & 0xFFFFF;
677 	if (rvu->fwdata && rvu->fwdata->msixtr_base)
678 		phy_addr = rvu->fwdata->msixtr_base;
679 	else
680 		phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
681 
682 	iova = dma_map_resource(rvu->dev, phy_addr,
683 				max_msix * PCI_MSIX_ENTRY_SIZE,
684 				DMA_BIDIRECTIONAL, 0);
685 
686 	if (dma_mapping_error(rvu->dev, iova))
687 		return -ENOMEM;
688 
689 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
690 	rvu->msix_base_iova = iova;
691 	rvu->msixtr_base_phy = phy_addr;
692 
693 	return 0;
694 }
695 
696 static void rvu_reset_msix(struct rvu *rvu)
697 {
698 	/* Restore msixtr base register */
699 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
700 		    rvu->msixtr_base_phy);
701 }
702 
703 static void rvu_free_hw_resources(struct rvu *rvu)
704 {
705 	struct rvu_hwinfo *hw = rvu->hw;
706 	struct rvu_block *block;
707 	struct rvu_pfvf  *pfvf;
708 	int id, max_msix;
709 	u64 cfg;
710 
711 	rvu_npa_freemem(rvu);
712 	rvu_npc_freemem(rvu);
713 	rvu_nix_freemem(rvu);
714 
715 	/* Free block LF bitmaps */
716 	for (id = 0; id < BLK_COUNT; id++) {
717 		block = &hw->block[id];
718 		kfree(block->lf.bmap);
719 	}
720 
721 	/* Free MSIX bitmaps */
722 	for (id = 0; id < hw->total_pfs; id++) {
723 		pfvf = &rvu->pf[id];
724 		kfree(pfvf->msix.bmap);
725 	}
726 
727 	for (id = 0; id < hw->total_vfs; id++) {
728 		pfvf = &rvu->hwvf[id];
729 		kfree(pfvf->msix.bmap);
730 	}
731 
732 	/* Unmap MSIX vector base IOVA mapping */
733 	if (!rvu->msix_base_iova)
734 		return;
735 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
736 	max_msix = cfg & 0xFFFFF;
737 	dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
738 			   max_msix * PCI_MSIX_ENTRY_SIZE,
739 			   DMA_BIDIRECTIONAL, 0);
740 
741 	rvu_reset_msix(rvu);
742 	mutex_destroy(&rvu->rsrc_lock);
743 }
744 
745 static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
746 {
747 	struct rvu_hwinfo *hw = rvu->hw;
748 	int pf, vf, numvfs, hwvf;
749 	struct rvu_pfvf *pfvf;
750 	u64 *mac;
751 
752 	for (pf = 0; pf < hw->total_pfs; pf++) {
753 		/* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
754 		if (!pf)
755 			goto lbkvf;
756 
757 		if (!is_pf_cgxmapped(rvu, pf))
758 			continue;
759 		/* Assign MAC address to PF */
760 		pfvf = &rvu->pf[pf];
761 		if (rvu->fwdata && pf < PF_MACNUM_MAX) {
762 			mac = &rvu->fwdata->pf_macs[pf];
763 			if (*mac)
764 				u64_to_ether_addr(*mac, pfvf->mac_addr);
765 			else
766 				eth_random_addr(pfvf->mac_addr);
767 		} else {
768 			eth_random_addr(pfvf->mac_addr);
769 		}
770 		ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
771 
772 lbkvf:
773 		/* Assign MAC address to VFs*/
774 		rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
775 		for (vf = 0; vf < numvfs; vf++, hwvf++) {
776 			pfvf = &rvu->hwvf[hwvf];
777 			if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
778 				mac = &rvu->fwdata->vf_macs[hwvf];
779 				if (*mac)
780 					u64_to_ether_addr(*mac, pfvf->mac_addr);
781 				else
782 					eth_random_addr(pfvf->mac_addr);
783 			} else {
784 				eth_random_addr(pfvf->mac_addr);
785 			}
786 			ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
787 		}
788 	}
789 }
790 
791 static int rvu_fwdata_init(struct rvu *rvu)
792 {
793 	u64 fwdbase;
794 	int err;
795 
796 	/* Get firmware data base address */
797 	err = cgx_get_fwdata_base(&fwdbase);
798 	if (err)
799 		goto fail;
800 	rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
801 	if (!rvu->fwdata)
802 		goto fail;
803 	if (!is_rvu_fwdata_valid(rvu)) {
804 		dev_err(rvu->dev,
805 			"Mismatch in 'fwdata' struct btw kernel and firmware\n");
806 		iounmap(rvu->fwdata);
807 		rvu->fwdata = NULL;
808 		return -EINVAL;
809 	}
810 	return 0;
811 fail:
812 	dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
813 	return -EIO;
814 }
815 
816 static void rvu_fwdata_exit(struct rvu *rvu)
817 {
818 	if (rvu->fwdata)
819 		iounmap(rvu->fwdata);
820 }
821 
822 static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
823 {
824 	struct rvu_hwinfo *hw = rvu->hw;
825 	struct rvu_block *block;
826 	int blkid;
827 	u64 cfg;
828 
829 	/* Init NIX LF's bitmap */
830 	block = &hw->block[blkaddr];
831 	if (!block->implemented)
832 		return 0;
833 	blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
834 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
835 	block->lf.max = cfg & 0xFFF;
836 	block->addr = blkaddr;
837 	block->type = BLKTYPE_NIX;
838 	block->lfshift = 8;
839 	block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
840 	block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
841 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
842 	block->lfcfg_reg = NIX_PRIV_LFX_CFG;
843 	block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
844 	block->lfreset_reg = NIX_AF_LF_RST;
845 	sprintf(block->name, "NIX%d", blkid);
846 	rvu->nix_blkaddr[blkid] = blkaddr;
847 	return rvu_alloc_bitmap(&block->lf);
848 }
849 
850 static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
851 {
852 	struct rvu_hwinfo *hw = rvu->hw;
853 	struct rvu_block *block;
854 	int blkid;
855 	u64 cfg;
856 
857 	/* Init CPT LF's bitmap */
858 	block = &hw->block[blkaddr];
859 	if (!block->implemented)
860 		return 0;
861 	blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
862 	cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
863 	block->lf.max = cfg & 0xFF;
864 	block->addr = blkaddr;
865 	block->type = BLKTYPE_CPT;
866 	block->multislot = true;
867 	block->lfshift = 3;
868 	block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
869 	block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
870 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
871 	block->lfcfg_reg = CPT_PRIV_LFX_CFG;
872 	block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
873 	block->lfreset_reg = CPT_AF_LF_RST;
874 	sprintf(block->name, "CPT%d", blkid);
875 	return rvu_alloc_bitmap(&block->lf);
876 }
877 
878 static void rvu_get_lbk_bufsize(struct rvu *rvu)
879 {
880 	struct pci_dev *pdev = NULL;
881 	void __iomem *base;
882 	u64 lbk_const;
883 
884 	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
885 			      PCI_DEVID_OCTEONTX2_LBK, pdev);
886 	if (!pdev)
887 		return;
888 
889 	base = pci_ioremap_bar(pdev, 0);
890 	if (!base)
891 		goto err_put;
892 
893 	lbk_const = readq(base + LBK_CONST);
894 
895 	/* cache fifo size */
896 	rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
897 
898 	iounmap(base);
899 err_put:
900 	pci_dev_put(pdev);
901 }
902 
903 static int rvu_setup_hw_resources(struct rvu *rvu)
904 {
905 	struct rvu_hwinfo *hw = rvu->hw;
906 	struct rvu_block *block;
907 	int blkid, err;
908 	u64 cfg;
909 
910 	/* Get HW supported max RVU PF & VF count */
911 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
912 	hw->total_pfs = (cfg >> 32) & 0xFF;
913 	hw->total_vfs = (cfg >> 20) & 0xFFF;
914 	hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
915 
916 	/* Init NPA LF's bitmap */
917 	block = &hw->block[BLKADDR_NPA];
918 	if (!block->implemented)
919 		goto nix;
920 	cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
921 	block->lf.max = (cfg >> 16) & 0xFFF;
922 	block->addr = BLKADDR_NPA;
923 	block->type = BLKTYPE_NPA;
924 	block->lfshift = 8;
925 	block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
926 	block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
927 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
928 	block->lfcfg_reg = NPA_PRIV_LFX_CFG;
929 	block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
930 	block->lfreset_reg = NPA_AF_LF_RST;
931 	sprintf(block->name, "NPA");
932 	err = rvu_alloc_bitmap(&block->lf);
933 	if (err) {
934 		dev_err(rvu->dev,
935 			"%s: Failed to allocate NPA LF bitmap\n", __func__);
936 		return err;
937 	}
938 
939 nix:
940 	err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
941 	if (err) {
942 		dev_err(rvu->dev,
943 			"%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
944 		return err;
945 	}
946 
947 	err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
948 	if (err) {
949 		dev_err(rvu->dev,
950 			"%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
951 		return err;
952 	}
953 
954 	/* Init SSO group's bitmap */
955 	block = &hw->block[BLKADDR_SSO];
956 	if (!block->implemented)
957 		goto ssow;
958 	cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
959 	block->lf.max = cfg & 0xFFFF;
960 	block->addr = BLKADDR_SSO;
961 	block->type = BLKTYPE_SSO;
962 	block->multislot = true;
963 	block->lfshift = 3;
964 	block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
965 	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
966 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
967 	block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
968 	block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
969 	block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
970 	sprintf(block->name, "SSO GROUP");
971 	err = rvu_alloc_bitmap(&block->lf);
972 	if (err) {
973 		dev_err(rvu->dev,
974 			"%s: Failed to allocate SSO LF bitmap\n", __func__);
975 		return err;
976 	}
977 
978 ssow:
979 	/* Init SSO workslot's bitmap */
980 	block = &hw->block[BLKADDR_SSOW];
981 	if (!block->implemented)
982 		goto tim;
983 	block->lf.max = (cfg >> 56) & 0xFF;
984 	block->addr = BLKADDR_SSOW;
985 	block->type = BLKTYPE_SSOW;
986 	block->multislot = true;
987 	block->lfshift = 3;
988 	block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
989 	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
990 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
991 	block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
992 	block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
993 	block->lfreset_reg = SSOW_AF_LF_HWS_RST;
994 	sprintf(block->name, "SSOWS");
995 	err = rvu_alloc_bitmap(&block->lf);
996 	if (err) {
997 		dev_err(rvu->dev,
998 			"%s: Failed to allocate SSOW LF bitmap\n", __func__);
999 		return err;
1000 	}
1001 
1002 tim:
1003 	/* Init TIM LF's bitmap */
1004 	block = &hw->block[BLKADDR_TIM];
1005 	if (!block->implemented)
1006 		goto cpt;
1007 	cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
1008 	block->lf.max = cfg & 0xFFFF;
1009 	block->addr = BLKADDR_TIM;
1010 	block->type = BLKTYPE_TIM;
1011 	block->multislot = true;
1012 	block->lfshift = 3;
1013 	block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
1014 	block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
1015 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
1016 	block->lfcfg_reg = TIM_PRIV_LFX_CFG;
1017 	block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
1018 	block->lfreset_reg = TIM_AF_LF_RST;
1019 	sprintf(block->name, "TIM");
1020 	err = rvu_alloc_bitmap(&block->lf);
1021 	if (err) {
1022 		dev_err(rvu->dev,
1023 			"%s: Failed to allocate TIM LF bitmap\n", __func__);
1024 		return err;
1025 	}
1026 
1027 cpt:
1028 	err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
1029 	if (err) {
1030 		dev_err(rvu->dev,
1031 			"%s: Failed to allocate CPT0 LF bitmap\n", __func__);
1032 		return err;
1033 	}
1034 	err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
1035 	if (err) {
1036 		dev_err(rvu->dev,
1037 			"%s: Failed to allocate CPT1 LF bitmap\n", __func__);
1038 		return err;
1039 	}
1040 
1041 	/* Allocate memory for PFVF data */
1042 	rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
1043 			       sizeof(struct rvu_pfvf), GFP_KERNEL);
1044 	if (!rvu->pf) {
1045 		dev_err(rvu->dev,
1046 			"%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
1047 		return -ENOMEM;
1048 	}
1049 
1050 	rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
1051 				 sizeof(struct rvu_pfvf), GFP_KERNEL);
1052 	if (!rvu->hwvf) {
1053 		dev_err(rvu->dev,
1054 			"%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
1055 		return -ENOMEM;
1056 	}
1057 
1058 	mutex_init(&rvu->rsrc_lock);
1059 
1060 	rvu_fwdata_init(rvu);
1061 
1062 	err = rvu_setup_msix_resources(rvu);
1063 	if (err) {
1064 		dev_err(rvu->dev,
1065 			"%s: Failed to setup MSIX resources\n", __func__);
1066 		return err;
1067 	}
1068 
1069 	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1070 		block = &hw->block[blkid];
1071 		if (!block->lf.bmap)
1072 			continue;
1073 
1074 		/* Allocate memory for block LF/slot to pcifunc mapping info */
1075 		block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
1076 					     sizeof(u16), GFP_KERNEL);
1077 		if (!block->fn_map) {
1078 			err = -ENOMEM;
1079 			goto msix_err;
1080 		}
1081 
1082 		/* Scan all blocks to check if low level firmware has
1083 		 * already provisioned any of the resources to a PF/VF.
1084 		 */
1085 		rvu_scan_block(rvu, block);
1086 	}
1087 
1088 	err = rvu_set_channels_base(rvu);
1089 	if (err)
1090 		goto msix_err;
1091 
1092 	err = rvu_npc_init(rvu);
1093 	if (err) {
1094 		dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
1095 		goto npc_err;
1096 	}
1097 
1098 	err = rvu_cgx_init(rvu);
1099 	if (err) {
1100 		dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
1101 		goto cgx_err;
1102 	}
1103 
1104 	/* Assign MACs for CGX mapped functions */
1105 	rvu_setup_pfvf_macaddress(rvu);
1106 
1107 	err = rvu_npa_init(rvu);
1108 	if (err) {
1109 		dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
1110 		goto npa_err;
1111 	}
1112 
1113 	rvu_get_lbk_bufsize(rvu);
1114 
1115 	err = rvu_nix_init(rvu);
1116 	if (err) {
1117 		dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
1118 		goto nix_err;
1119 	}
1120 
1121 	err = rvu_sdp_init(rvu);
1122 	if (err) {
1123 		dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
1124 		goto nix_err;
1125 	}
1126 
1127 	rvu_program_channels(rvu);
1128 
1129 	return 0;
1130 
1131 nix_err:
1132 	rvu_nix_freemem(rvu);
1133 npa_err:
1134 	rvu_npa_freemem(rvu);
1135 cgx_err:
1136 	rvu_cgx_exit(rvu);
1137 npc_err:
1138 	rvu_npc_freemem(rvu);
1139 	rvu_fwdata_exit(rvu);
1140 msix_err:
1141 	rvu_reset_msix(rvu);
1142 	return err;
1143 }
1144 
1145 /* NPA and NIX admin queue APIs */
1146 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
1147 {
1148 	if (!aq)
1149 		return;
1150 
1151 	qmem_free(rvu->dev, aq->inst);
1152 	qmem_free(rvu->dev, aq->res);
1153 	devm_kfree(rvu->dev, aq);
1154 }
1155 
1156 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1157 		 int qsize, int inst_size, int res_size)
1158 {
1159 	struct admin_queue *aq;
1160 	int err;
1161 
1162 	*ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1163 	if (!*ad_queue)
1164 		return -ENOMEM;
1165 	aq = *ad_queue;
1166 
1167 	/* Alloc memory for instructions i.e AQ */
1168 	err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1169 	if (err) {
1170 		devm_kfree(rvu->dev, aq);
1171 		return err;
1172 	}
1173 
1174 	/* Alloc memory for results */
1175 	err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1176 	if (err) {
1177 		rvu_aq_free(rvu, aq);
1178 		return err;
1179 	}
1180 
1181 	spin_lock_init(&aq->lock);
1182 	return 0;
1183 }
1184 
1185 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1186 			   struct ready_msg_rsp *rsp)
1187 {
1188 	if (rvu->fwdata) {
1189 		rsp->rclk_freq = rvu->fwdata->rclk;
1190 		rsp->sclk_freq = rvu->fwdata->sclk;
1191 	}
1192 	return 0;
1193 }
1194 
1195 /* Get current count of a RVU block's LF/slots
1196  * provisioned to a given RVU func.
1197  */
1198 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1199 {
1200 	switch (blkaddr) {
1201 	case BLKADDR_NPA:
1202 		return pfvf->npalf ? 1 : 0;
1203 	case BLKADDR_NIX0:
1204 	case BLKADDR_NIX1:
1205 		return pfvf->nixlf ? 1 : 0;
1206 	case BLKADDR_SSO:
1207 		return pfvf->sso;
1208 	case BLKADDR_SSOW:
1209 		return pfvf->ssow;
1210 	case BLKADDR_TIM:
1211 		return pfvf->timlfs;
1212 	case BLKADDR_CPT0:
1213 		return pfvf->cptlfs;
1214 	case BLKADDR_CPT1:
1215 		return pfvf->cpt1_lfs;
1216 	}
1217 	return 0;
1218 }
1219 
1220 /* Return true if LFs of block type are attached to pcifunc */
1221 static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1222 {
1223 	switch (blktype) {
1224 	case BLKTYPE_NPA:
1225 		return pfvf->npalf ? 1 : 0;
1226 	case BLKTYPE_NIX:
1227 		return pfvf->nixlf ? 1 : 0;
1228 	case BLKTYPE_SSO:
1229 		return !!pfvf->sso;
1230 	case BLKTYPE_SSOW:
1231 		return !!pfvf->ssow;
1232 	case BLKTYPE_TIM:
1233 		return !!pfvf->timlfs;
1234 	case BLKTYPE_CPT:
1235 		return pfvf->cptlfs || pfvf->cpt1_lfs;
1236 	}
1237 
1238 	return false;
1239 }
1240 
1241 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1242 {
1243 	struct rvu_pfvf *pfvf;
1244 
1245 	if (!is_pf_func_valid(rvu, pcifunc))
1246 		return false;
1247 
1248 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1249 
1250 	/* Check if this PFFUNC has a LF of type blktype attached */
1251 	if (!is_blktype_attached(pfvf, blktype))
1252 		return false;
1253 
1254 	return true;
1255 }
1256 
1257 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1258 			   int pcifunc, int slot)
1259 {
1260 	u64 val;
1261 
1262 	val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1263 	rvu_write64(rvu, block->addr, block->lookup_reg, val);
1264 	/* Wait for the lookup to finish */
1265 	/* TODO: put some timeout here */
1266 	while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1267 		;
1268 
1269 	val = rvu_read64(rvu, block->addr, block->lookup_reg);
1270 
1271 	/* Check LF valid bit */
1272 	if (!(val & (1ULL << 12)))
1273 		return -1;
1274 
1275 	return (val & 0xFFF);
1276 }
1277 
1278 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1279 {
1280 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1281 	struct rvu_hwinfo *hw = rvu->hw;
1282 	struct rvu_block *block;
1283 	int slot, lf, num_lfs;
1284 	int blkaddr;
1285 
1286 	blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1287 	if (blkaddr < 0)
1288 		return;
1289 
1290 	if (blktype == BLKTYPE_NIX)
1291 		rvu_nix_reset_mac(pfvf, pcifunc);
1292 
1293 	block = &hw->block[blkaddr];
1294 
1295 	num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1296 	if (!num_lfs)
1297 		return;
1298 
1299 	for (slot = 0; slot < num_lfs; slot++) {
1300 		lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1301 		if (lf < 0) /* This should never happen */
1302 			continue;
1303 
1304 		/* Disable the LF */
1305 		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1306 			    (lf << block->lfshift), 0x00ULL);
1307 
1308 		/* Update SW maintained mapping info as well */
1309 		rvu_update_rsrc_map(rvu, pfvf, block,
1310 				    pcifunc, lf, false);
1311 
1312 		/* Free the resource */
1313 		rvu_free_rsrc(&block->lf, lf);
1314 
1315 		/* Clear MSIX vector offset for this LF */
1316 		rvu_clear_msix_offset(rvu, pfvf, block, lf);
1317 	}
1318 }
1319 
1320 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1321 			    u16 pcifunc)
1322 {
1323 	struct rvu_hwinfo *hw = rvu->hw;
1324 	bool detach_all = true;
1325 	struct rvu_block *block;
1326 	int blkid;
1327 
1328 	mutex_lock(&rvu->rsrc_lock);
1329 
1330 	/* Check for partial resource detach */
1331 	if (detach && detach->partial)
1332 		detach_all = false;
1333 
1334 	/* Check for RVU block's LFs attached to this func,
1335 	 * if so, detach them.
1336 	 */
1337 	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1338 		block = &hw->block[blkid];
1339 		if (!block->lf.bmap)
1340 			continue;
1341 		if (!detach_all && detach) {
1342 			if (blkid == BLKADDR_NPA && !detach->npalf)
1343 				continue;
1344 			else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1345 				continue;
1346 			else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
1347 				continue;
1348 			else if ((blkid == BLKADDR_SSO) && !detach->sso)
1349 				continue;
1350 			else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1351 				continue;
1352 			else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1353 				continue;
1354 			else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1355 				continue;
1356 			else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
1357 				continue;
1358 		}
1359 		rvu_detach_block(rvu, pcifunc, block->type);
1360 	}
1361 
1362 	mutex_unlock(&rvu->rsrc_lock);
1363 	return 0;
1364 }
1365 
1366 int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1367 				      struct rsrc_detach *detach,
1368 				      struct msg_rsp *rsp)
1369 {
1370 	return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1371 }
1372 
1373 int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1374 {
1375 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1376 	int blkaddr = BLKADDR_NIX0, vf;
1377 	struct rvu_pfvf *pf;
1378 
1379 	pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1380 
1381 	/* All CGX mapped PFs are set with assigned NIX block during init */
1382 	if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
1383 		blkaddr = pf->nix_blkaddr;
1384 	} else if (is_afvf(pcifunc)) {
1385 		vf = pcifunc - 1;
1386 		/* Assign NIX based on VF number. All even numbered VFs get
1387 		 * NIX0 and odd numbered gets NIX1
1388 		 */
1389 		blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1390 		/* NIX1 is not present on all silicons */
1391 		if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1392 			blkaddr = BLKADDR_NIX0;
1393 	}
1394 
1395 	/* if SDP1 then the blkaddr is NIX1 */
1396 	if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
1397 		blkaddr = BLKADDR_NIX1;
1398 
1399 	switch (blkaddr) {
1400 	case BLKADDR_NIX1:
1401 		pfvf->nix_blkaddr = BLKADDR_NIX1;
1402 		pfvf->nix_rx_intf = NIX_INTFX_RX(1);
1403 		pfvf->nix_tx_intf = NIX_INTFX_TX(1);
1404 		break;
1405 	case BLKADDR_NIX0:
1406 	default:
1407 		pfvf->nix_blkaddr = BLKADDR_NIX0;
1408 		pfvf->nix_rx_intf = NIX_INTFX_RX(0);
1409 		pfvf->nix_tx_intf = NIX_INTFX_TX(0);
1410 		break;
1411 	}
1412 
1413 	return pfvf->nix_blkaddr;
1414 }
1415 
1416 static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
1417 				  u16 pcifunc, struct rsrc_attach *attach)
1418 {
1419 	int blkaddr;
1420 
1421 	switch (blktype) {
1422 	case BLKTYPE_NIX:
1423 		blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1424 		break;
1425 	case BLKTYPE_CPT:
1426 		if (attach->hdr.ver < RVU_MULTI_BLK_VER)
1427 			return rvu_get_blkaddr(rvu, blktype, 0);
1428 		blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
1429 			  BLKADDR_CPT0;
1430 		if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
1431 			return -ENODEV;
1432 		break;
1433 	default:
1434 		return rvu_get_blkaddr(rvu, blktype, 0);
1435 	}
1436 
1437 	if (is_block_implemented(rvu->hw, blkaddr))
1438 		return blkaddr;
1439 
1440 	return -ENODEV;
1441 }
1442 
1443 static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
1444 			     int num_lfs, struct rsrc_attach *attach)
1445 {
1446 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1447 	struct rvu_hwinfo *hw = rvu->hw;
1448 	struct rvu_block *block;
1449 	int slot, lf;
1450 	int blkaddr;
1451 	u64 cfg;
1452 
1453 	if (!num_lfs)
1454 		return;
1455 
1456 	blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
1457 	if (blkaddr < 0)
1458 		return;
1459 
1460 	block = &hw->block[blkaddr];
1461 	if (!block->lf.bmap)
1462 		return;
1463 
1464 	for (slot = 0; slot < num_lfs; slot++) {
1465 		/* Allocate the resource */
1466 		lf = rvu_alloc_rsrc(&block->lf);
1467 		if (lf < 0)
1468 			return;
1469 
1470 		cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1471 		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1472 			    (lf << block->lfshift), cfg);
1473 		rvu_update_rsrc_map(rvu, pfvf, block,
1474 				    pcifunc, lf, true);
1475 
1476 		/* Set start MSIX vector for this LF within this PF/VF */
1477 		rvu_set_msix_offset(rvu, pfvf, block, lf);
1478 	}
1479 }
1480 
1481 static int rvu_check_rsrc_availability(struct rvu *rvu,
1482 				       struct rsrc_attach *req, u16 pcifunc)
1483 {
1484 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1485 	int free_lfs, mappedlfs, blkaddr;
1486 	struct rvu_hwinfo *hw = rvu->hw;
1487 	struct rvu_block *block;
1488 
1489 	/* Only one NPA LF can be attached */
1490 	if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1491 		block = &hw->block[BLKADDR_NPA];
1492 		free_lfs = rvu_rsrc_free_count(&block->lf);
1493 		if (!free_lfs)
1494 			goto fail;
1495 	} else if (req->npalf) {
1496 		dev_err(&rvu->pdev->dev,
1497 			"Func 0x%x: Invalid req, already has NPA\n",
1498 			 pcifunc);
1499 		return -EINVAL;
1500 	}
1501 
1502 	/* Only one NIX LF can be attached */
1503 	if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1504 		blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
1505 						 pcifunc, req);
1506 		if (blkaddr < 0)
1507 			return blkaddr;
1508 		block = &hw->block[blkaddr];
1509 		free_lfs = rvu_rsrc_free_count(&block->lf);
1510 		if (!free_lfs)
1511 			goto fail;
1512 	} else if (req->nixlf) {
1513 		dev_err(&rvu->pdev->dev,
1514 			"Func 0x%x: Invalid req, already has NIX\n",
1515 			pcifunc);
1516 		return -EINVAL;
1517 	}
1518 
1519 	if (req->sso) {
1520 		block = &hw->block[BLKADDR_SSO];
1521 		/* Is request within limits ? */
1522 		if (req->sso > block->lf.max) {
1523 			dev_err(&rvu->pdev->dev,
1524 				"Func 0x%x: Invalid SSO req, %d > max %d\n",
1525 				 pcifunc, req->sso, block->lf.max);
1526 			return -EINVAL;
1527 		}
1528 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1529 		free_lfs = rvu_rsrc_free_count(&block->lf);
1530 		/* Check if additional resources are available */
1531 		if (req->sso > mappedlfs &&
1532 		    ((req->sso - mappedlfs) > free_lfs))
1533 			goto fail;
1534 	}
1535 
1536 	if (req->ssow) {
1537 		block = &hw->block[BLKADDR_SSOW];
1538 		if (req->ssow > block->lf.max) {
1539 			dev_err(&rvu->pdev->dev,
1540 				"Func 0x%x: Invalid SSOW req, %d > max %d\n",
1541 				 pcifunc, req->sso, block->lf.max);
1542 			return -EINVAL;
1543 		}
1544 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1545 		free_lfs = rvu_rsrc_free_count(&block->lf);
1546 		if (req->ssow > mappedlfs &&
1547 		    ((req->ssow - mappedlfs) > free_lfs))
1548 			goto fail;
1549 	}
1550 
1551 	if (req->timlfs) {
1552 		block = &hw->block[BLKADDR_TIM];
1553 		if (req->timlfs > block->lf.max) {
1554 			dev_err(&rvu->pdev->dev,
1555 				"Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1556 				 pcifunc, req->timlfs, block->lf.max);
1557 			return -EINVAL;
1558 		}
1559 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1560 		free_lfs = rvu_rsrc_free_count(&block->lf);
1561 		if (req->timlfs > mappedlfs &&
1562 		    ((req->timlfs - mappedlfs) > free_lfs))
1563 			goto fail;
1564 	}
1565 
1566 	if (req->cptlfs) {
1567 		blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
1568 						 pcifunc, req);
1569 		if (blkaddr < 0)
1570 			return blkaddr;
1571 		block = &hw->block[blkaddr];
1572 		if (req->cptlfs > block->lf.max) {
1573 			dev_err(&rvu->pdev->dev,
1574 				"Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1575 				 pcifunc, req->cptlfs, block->lf.max);
1576 			return -EINVAL;
1577 		}
1578 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1579 		free_lfs = rvu_rsrc_free_count(&block->lf);
1580 		if (req->cptlfs > mappedlfs &&
1581 		    ((req->cptlfs - mappedlfs) > free_lfs))
1582 			goto fail;
1583 	}
1584 
1585 	return 0;
1586 
1587 fail:
1588 	dev_info(rvu->dev, "Request for %s failed\n", block->name);
1589 	return -ENOSPC;
1590 }
1591 
1592 static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
1593 				       struct rsrc_attach *attach)
1594 {
1595 	int blkaddr, num_lfs;
1596 
1597 	blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
1598 					 attach->hdr.pcifunc, attach);
1599 	if (blkaddr < 0)
1600 		return false;
1601 
1602 	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
1603 					blkaddr);
1604 	/* Requester already has LFs from given block ? */
1605 	return !!num_lfs;
1606 }
1607 
1608 int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1609 				      struct rsrc_attach *attach,
1610 				      struct msg_rsp *rsp)
1611 {
1612 	u16 pcifunc = attach->hdr.pcifunc;
1613 	int err;
1614 
1615 	/* If first request, detach all existing attached resources */
1616 	if (!attach->modify)
1617 		rvu_detach_rsrcs(rvu, NULL, pcifunc);
1618 
1619 	mutex_lock(&rvu->rsrc_lock);
1620 
1621 	/* Check if the request can be accommodated */
1622 	err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1623 	if (err)
1624 		goto exit;
1625 
1626 	/* Now attach the requested resources */
1627 	if (attach->npalf)
1628 		rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
1629 
1630 	if (attach->nixlf)
1631 		rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
1632 
1633 	if (attach->sso) {
1634 		/* RVU func doesn't know which exact LF or slot is attached
1635 		 * to it, it always sees as slot 0,1,2. So for a 'modify'
1636 		 * request, simply detach all existing attached LFs/slots
1637 		 * and attach a fresh.
1638 		 */
1639 		if (attach->modify)
1640 			rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1641 		rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
1642 				 attach->sso, attach);
1643 	}
1644 
1645 	if (attach->ssow) {
1646 		if (attach->modify)
1647 			rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1648 		rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
1649 				 attach->ssow, attach);
1650 	}
1651 
1652 	if (attach->timlfs) {
1653 		if (attach->modify)
1654 			rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1655 		rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
1656 				 attach->timlfs, attach);
1657 	}
1658 
1659 	if (attach->cptlfs) {
1660 		if (attach->modify &&
1661 		    rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
1662 			rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1663 		rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
1664 				 attach->cptlfs, attach);
1665 	}
1666 
1667 exit:
1668 	mutex_unlock(&rvu->rsrc_lock);
1669 	return err;
1670 }
1671 
1672 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1673 			       int blkaddr, int lf)
1674 {
1675 	u16 vec;
1676 
1677 	if (lf < 0)
1678 		return MSIX_VECTOR_INVALID;
1679 
1680 	for (vec = 0; vec < pfvf->msix.max; vec++) {
1681 		if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1682 			return vec;
1683 	}
1684 	return MSIX_VECTOR_INVALID;
1685 }
1686 
1687 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1688 				struct rvu_block *block, int lf)
1689 {
1690 	u16 nvecs, vec, offset;
1691 	u64 cfg;
1692 
1693 	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1694 			 (lf << block->lfshift));
1695 	nvecs = (cfg >> 12) & 0xFF;
1696 
1697 	/* Check and alloc MSIX vectors, must be contiguous */
1698 	if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1699 		return;
1700 
1701 	offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1702 
1703 	/* Config MSIX offset in LF */
1704 	rvu_write64(rvu, block->addr, block->msixcfg_reg |
1705 		    (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1706 
1707 	/* Update the bitmap as well */
1708 	for (vec = 0; vec < nvecs; vec++)
1709 		pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1710 }
1711 
1712 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1713 				  struct rvu_block *block, int lf)
1714 {
1715 	u16 nvecs, vec, offset;
1716 	u64 cfg;
1717 
1718 	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1719 			 (lf << block->lfshift));
1720 	nvecs = (cfg >> 12) & 0xFF;
1721 
1722 	/* Clear MSIX offset in LF */
1723 	rvu_write64(rvu, block->addr, block->msixcfg_reg |
1724 		    (lf << block->lfshift), cfg & ~0x7FFULL);
1725 
1726 	offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1727 
1728 	/* Update the mapping */
1729 	for (vec = 0; vec < nvecs; vec++)
1730 		pfvf->msix_lfmap[offset + vec] = 0;
1731 
1732 	/* Free the same in MSIX bitmap */
1733 	rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1734 }
1735 
1736 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1737 				 struct msix_offset_rsp *rsp)
1738 {
1739 	struct rvu_hwinfo *hw = rvu->hw;
1740 	u16 pcifunc = req->hdr.pcifunc;
1741 	struct rvu_pfvf *pfvf;
1742 	int lf, slot, blkaddr;
1743 
1744 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1745 	if (!pfvf->msix.bmap)
1746 		return 0;
1747 
1748 	/* Set MSIX offsets for each block's LFs attached to this PF/VF */
1749 	lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1750 	rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1751 
1752 	/* Get BLKADDR from which LFs are attached to pcifunc */
1753 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1754 	if (blkaddr < 0) {
1755 		rsp->nix_msixoff = MSIX_VECTOR_INVALID;
1756 	} else {
1757 		lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1758 		rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
1759 	}
1760 
1761 	rsp->sso = pfvf->sso;
1762 	for (slot = 0; slot < rsp->sso; slot++) {
1763 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1764 		rsp->sso_msixoff[slot] =
1765 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1766 	}
1767 
1768 	rsp->ssow = pfvf->ssow;
1769 	for (slot = 0; slot < rsp->ssow; slot++) {
1770 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1771 		rsp->ssow_msixoff[slot] =
1772 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1773 	}
1774 
1775 	rsp->timlfs = pfvf->timlfs;
1776 	for (slot = 0; slot < rsp->timlfs; slot++) {
1777 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1778 		rsp->timlf_msixoff[slot] =
1779 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1780 	}
1781 
1782 	rsp->cptlfs = pfvf->cptlfs;
1783 	for (slot = 0; slot < rsp->cptlfs; slot++) {
1784 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1785 		rsp->cptlf_msixoff[slot] =
1786 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1787 	}
1788 
1789 	rsp->cpt1_lfs = pfvf->cpt1_lfs;
1790 	for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
1791 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
1792 		rsp->cpt1_lf_msixoff[slot] =
1793 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
1794 	}
1795 
1796 	return 0;
1797 }
1798 
1799 int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
1800 				   struct free_rsrcs_rsp *rsp)
1801 {
1802 	struct rvu_hwinfo *hw = rvu->hw;
1803 	struct rvu_block *block;
1804 	struct nix_txsch *txsch;
1805 	struct nix_hw *nix_hw;
1806 
1807 	mutex_lock(&rvu->rsrc_lock);
1808 
1809 	block = &hw->block[BLKADDR_NPA];
1810 	rsp->npa = rvu_rsrc_free_count(&block->lf);
1811 
1812 	block = &hw->block[BLKADDR_NIX0];
1813 	rsp->nix = rvu_rsrc_free_count(&block->lf);
1814 
1815 	block = &hw->block[BLKADDR_NIX1];
1816 	rsp->nix1 = rvu_rsrc_free_count(&block->lf);
1817 
1818 	block = &hw->block[BLKADDR_SSO];
1819 	rsp->sso = rvu_rsrc_free_count(&block->lf);
1820 
1821 	block = &hw->block[BLKADDR_SSOW];
1822 	rsp->ssow = rvu_rsrc_free_count(&block->lf);
1823 
1824 	block = &hw->block[BLKADDR_TIM];
1825 	rsp->tim = rvu_rsrc_free_count(&block->lf);
1826 
1827 	block = &hw->block[BLKADDR_CPT0];
1828 	rsp->cpt = rvu_rsrc_free_count(&block->lf);
1829 
1830 	block = &hw->block[BLKADDR_CPT1];
1831 	rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
1832 
1833 	if (rvu->hw->cap.nix_fixed_txschq_mapping) {
1834 		rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
1835 		rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
1836 		rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
1837 		rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
1838 		/* NIX1 */
1839 		if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1840 			goto out;
1841 		rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
1842 		rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
1843 		rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
1844 		rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
1845 	} else {
1846 		nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
1847 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1848 		rsp->schq[NIX_TXSCH_LVL_SMQ] =
1849 				rvu_rsrc_free_count(&txsch->schq);
1850 
1851 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1852 		rsp->schq[NIX_TXSCH_LVL_TL4] =
1853 				rvu_rsrc_free_count(&txsch->schq);
1854 
1855 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1856 		rsp->schq[NIX_TXSCH_LVL_TL3] =
1857 				rvu_rsrc_free_count(&txsch->schq);
1858 
1859 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1860 		rsp->schq[NIX_TXSCH_LVL_TL2] =
1861 				rvu_rsrc_free_count(&txsch->schq);
1862 
1863 		if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1864 			goto out;
1865 
1866 		nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
1867 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1868 		rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
1869 				rvu_rsrc_free_count(&txsch->schq);
1870 
1871 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1872 		rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
1873 				rvu_rsrc_free_count(&txsch->schq);
1874 
1875 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1876 		rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
1877 				rvu_rsrc_free_count(&txsch->schq);
1878 
1879 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1880 		rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
1881 				rvu_rsrc_free_count(&txsch->schq);
1882 	}
1883 
1884 	rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
1885 out:
1886 	rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
1887 	mutex_unlock(&rvu->rsrc_lock);
1888 
1889 	return 0;
1890 }
1891 
1892 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1893 			    struct msg_rsp *rsp)
1894 {
1895 	u16 pcifunc = req->hdr.pcifunc;
1896 	u16 vf, numvfs;
1897 	u64 cfg;
1898 
1899 	vf = pcifunc & RVU_PFVF_FUNC_MASK;
1900 	cfg = rvu_read64(rvu, BLKADDR_RVUM,
1901 			 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1902 	numvfs = (cfg >> 12) & 0xFF;
1903 
1904 	if (vf && vf <= numvfs)
1905 		__rvu_flr_handler(rvu, pcifunc);
1906 	else
1907 		return RVU_INVALID_VF_ID;
1908 
1909 	return 0;
1910 }
1911 
1912 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1913 				struct get_hw_cap_rsp *rsp)
1914 {
1915 	struct rvu_hwinfo *hw = rvu->hw;
1916 
1917 	rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1918 	rsp->nix_shaping = hw->cap.nix_shaping;
1919 
1920 	return 0;
1921 }
1922 
1923 int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
1924 				 struct msg_rsp *rsp)
1925 {
1926 	struct rvu_hwinfo *hw = rvu->hw;
1927 	u16 pcifunc = req->hdr.pcifunc;
1928 	struct rvu_pfvf *pfvf;
1929 	int blkaddr, nixlf;
1930 	u16 target;
1931 
1932 	/* Only PF can add VF permissions */
1933 	if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
1934 		return -EOPNOTSUPP;
1935 
1936 	target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
1937 	pfvf = rvu_get_pfvf(rvu, target);
1938 
1939 	if (req->flags & RESET_VF_PERM) {
1940 		pfvf->flags &= RVU_CLEAR_VF_PERM;
1941 	} else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
1942 		 (req->flags & VF_TRUSTED)) {
1943 		change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
1944 		/* disable multicast and promisc entries */
1945 		if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
1946 			blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
1947 			if (blkaddr < 0)
1948 				return 0;
1949 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1950 					   target, 0);
1951 			if (nixlf < 0)
1952 				return 0;
1953 			npc_enadis_default_mce_entry(rvu, target, nixlf,
1954 						     NIXLF_ALLMULTI_ENTRY,
1955 						     false);
1956 			npc_enadis_default_mce_entry(rvu, target, nixlf,
1957 						     NIXLF_PROMISC_ENTRY,
1958 						     false);
1959 		}
1960 	}
1961 
1962 	return 0;
1963 }
1964 
1965 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
1966 				struct mbox_msghdr *req)
1967 {
1968 	struct rvu *rvu = pci_get_drvdata(mbox->pdev);
1969 
1970 	/* Check if valid, if not reply with a invalid msg */
1971 	if (req->sig != OTX2_MBOX_REQ_SIG)
1972 		goto bad_message;
1973 
1974 	switch (req->id) {
1975 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
1976 	case _id: {							\
1977 		struct _rsp_type *rsp;					\
1978 		int err;						\
1979 									\
1980 		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
1981 			mbox, devid,					\
1982 			sizeof(struct _rsp_type));			\
1983 		/* some handlers should complete even if reply */	\
1984 		/* could not be allocated */				\
1985 		if (!rsp &&						\
1986 		    _id != MBOX_MSG_DETACH_RESOURCES &&			\
1987 		    _id != MBOX_MSG_NIX_TXSCH_FREE &&			\
1988 		    _id != MBOX_MSG_VF_FLR)				\
1989 			return -ENOMEM;					\
1990 		if (rsp) {						\
1991 			rsp->hdr.id = _id;				\
1992 			rsp->hdr.sig = OTX2_MBOX_RSP_SIG;		\
1993 			rsp->hdr.pcifunc = req->pcifunc;		\
1994 			rsp->hdr.rc = 0;				\
1995 		}							\
1996 									\
1997 		err = rvu_mbox_handler_ ## _fn_name(rvu,		\
1998 						    (struct _req_type *)req, \
1999 						    rsp);		\
2000 		if (rsp && err)						\
2001 			rsp->hdr.rc = err;				\
2002 									\
2003 		trace_otx2_msg_process(mbox->pdev, _id, err);		\
2004 		return rsp ? err : -ENOMEM;				\
2005 	}
2006 MBOX_MESSAGES
2007 #undef M
2008 
2009 bad_message:
2010 	default:
2011 		otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
2012 		return -ENODEV;
2013 	}
2014 }
2015 
2016 static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
2017 {
2018 	struct rvu *rvu = mwork->rvu;
2019 	int offset, err, id, devid;
2020 	struct otx2_mbox_dev *mdev;
2021 	struct mbox_hdr *req_hdr;
2022 	struct mbox_msghdr *msg;
2023 	struct mbox_wq_info *mw;
2024 	struct otx2_mbox *mbox;
2025 
2026 	switch (type) {
2027 	case TYPE_AFPF:
2028 		mw = &rvu->afpf_wq_info;
2029 		break;
2030 	case TYPE_AFVF:
2031 		mw = &rvu->afvf_wq_info;
2032 		break;
2033 	default:
2034 		return;
2035 	}
2036 
2037 	devid = mwork - mw->mbox_wrk;
2038 	mbox = &mw->mbox;
2039 	mdev = &mbox->dev[devid];
2040 
2041 	/* Process received mbox messages */
2042 	req_hdr = mdev->mbase + mbox->rx_start;
2043 	if (mw->mbox_wrk[devid].num_msgs == 0)
2044 		return;
2045 
2046 	offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
2047 
2048 	for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
2049 		msg = mdev->mbase + offset;
2050 
2051 		/* Set which PF/VF sent this message based on mbox IRQ */
2052 		switch (type) {
2053 		case TYPE_AFPF:
2054 			msg->pcifunc &=
2055 				~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
2056 			msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
2057 			break;
2058 		case TYPE_AFVF:
2059 			msg->pcifunc &=
2060 				~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
2061 			msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
2062 			break;
2063 		}
2064 
2065 		err = rvu_process_mbox_msg(mbox, devid, msg);
2066 		if (!err) {
2067 			offset = mbox->rx_start + msg->next_msgoff;
2068 			continue;
2069 		}
2070 
2071 		if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
2072 			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
2073 				 err, otx2_mbox_id2name(msg->id),
2074 				 msg->id, rvu_get_pf(msg->pcifunc),
2075 				 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2076 		else
2077 			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
2078 				 err, otx2_mbox_id2name(msg->id),
2079 				 msg->id, devid);
2080 	}
2081 	mw->mbox_wrk[devid].num_msgs = 0;
2082 
2083 	/* Send mbox responses to VF/PF */
2084 	otx2_mbox_msg_send(mbox, devid);
2085 }
2086 
2087 static inline void rvu_afpf_mbox_handler(struct work_struct *work)
2088 {
2089 	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2090 
2091 	__rvu_mbox_handler(mwork, TYPE_AFPF);
2092 }
2093 
2094 static inline void rvu_afvf_mbox_handler(struct work_struct *work)
2095 {
2096 	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2097 
2098 	__rvu_mbox_handler(mwork, TYPE_AFVF);
2099 }
2100 
2101 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
2102 {
2103 	struct rvu *rvu = mwork->rvu;
2104 	struct otx2_mbox_dev *mdev;
2105 	struct mbox_hdr *rsp_hdr;
2106 	struct mbox_msghdr *msg;
2107 	struct mbox_wq_info *mw;
2108 	struct otx2_mbox *mbox;
2109 	int offset, id, devid;
2110 
2111 	switch (type) {
2112 	case TYPE_AFPF:
2113 		mw = &rvu->afpf_wq_info;
2114 		break;
2115 	case TYPE_AFVF:
2116 		mw = &rvu->afvf_wq_info;
2117 		break;
2118 	default:
2119 		return;
2120 	}
2121 
2122 	devid = mwork - mw->mbox_wrk_up;
2123 	mbox = &mw->mbox_up;
2124 	mdev = &mbox->dev[devid];
2125 
2126 	rsp_hdr = mdev->mbase + mbox->rx_start;
2127 	if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
2128 		dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
2129 		return;
2130 	}
2131 
2132 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
2133 
2134 	for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
2135 		msg = mdev->mbase + offset;
2136 
2137 		if (msg->id >= MBOX_MSG_MAX) {
2138 			dev_err(rvu->dev,
2139 				"Mbox msg with unknown ID 0x%x\n", msg->id);
2140 			goto end;
2141 		}
2142 
2143 		if (msg->sig != OTX2_MBOX_RSP_SIG) {
2144 			dev_err(rvu->dev,
2145 				"Mbox msg with wrong signature %x, ID 0x%x\n",
2146 				msg->sig, msg->id);
2147 			goto end;
2148 		}
2149 
2150 		switch (msg->id) {
2151 		case MBOX_MSG_CGX_LINK_EVENT:
2152 			break;
2153 		default:
2154 			if (msg->rc)
2155 				dev_err(rvu->dev,
2156 					"Mbox msg response has err %d, ID 0x%x\n",
2157 					msg->rc, msg->id);
2158 			break;
2159 		}
2160 end:
2161 		offset = mbox->rx_start + msg->next_msgoff;
2162 		mdev->msgs_acked++;
2163 	}
2164 	mw->mbox_wrk_up[devid].up_num_msgs = 0;
2165 
2166 	otx2_mbox_reset(mbox, devid);
2167 }
2168 
2169 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
2170 {
2171 	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2172 
2173 	__rvu_mbox_up_handler(mwork, TYPE_AFPF);
2174 }
2175 
2176 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
2177 {
2178 	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2179 
2180 	__rvu_mbox_up_handler(mwork, TYPE_AFVF);
2181 }
2182 
2183 static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
2184 				int num, int type)
2185 {
2186 	struct rvu_hwinfo *hw = rvu->hw;
2187 	int region;
2188 	u64 bar4;
2189 
2190 	/* For cn10k platform VF mailbox regions of a PF follows after the
2191 	 * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2192 	 * RVU_PF_VF_BAR4_ADDR register.
2193 	 */
2194 	if (type == TYPE_AFVF) {
2195 		for (region = 0; region < num; region++) {
2196 			if (hw->cap.per_pf_mbox_regs) {
2197 				bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2198 						  RVU_AF_PFX_BAR4_ADDR(0)) +
2199 						  MBOX_SIZE;
2200 				bar4 += region * MBOX_SIZE;
2201 			} else {
2202 				bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
2203 				bar4 += region * MBOX_SIZE;
2204 			}
2205 			mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2206 			if (!mbox_addr[region])
2207 				goto error;
2208 		}
2209 		return 0;
2210 	}
2211 
2212 	/* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2213 	 * PF registers. Whereas for Octeontx2 it is read from
2214 	 * RVU_AF_PF_BAR4_ADDR register.
2215 	 */
2216 	for (region = 0; region < num; region++) {
2217 		if (hw->cap.per_pf_mbox_regs) {
2218 			bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2219 					  RVU_AF_PFX_BAR4_ADDR(region));
2220 		} else {
2221 			bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2222 					  RVU_AF_PF_BAR4_ADDR);
2223 			bar4 += region * MBOX_SIZE;
2224 		}
2225 		mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2226 		if (!mbox_addr[region])
2227 			goto error;
2228 	}
2229 	return 0;
2230 
2231 error:
2232 	while (region--)
2233 		iounmap((void __iomem *)mbox_addr[region]);
2234 	return -ENOMEM;
2235 }
2236 
2237 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2238 			 int type, int num,
2239 			 void (mbox_handler)(struct work_struct *),
2240 			 void (mbox_up_handler)(struct work_struct *))
2241 {
2242 	int err = -EINVAL, i, dir, dir_up;
2243 	void __iomem *reg_base;
2244 	struct rvu_work *mwork;
2245 	void **mbox_regions;
2246 	const char *name;
2247 
2248 	mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
2249 	if (!mbox_regions)
2250 		return -ENOMEM;
2251 
2252 	switch (type) {
2253 	case TYPE_AFPF:
2254 		name = "rvu_afpf_mailbox";
2255 		dir = MBOX_DIR_AFPF;
2256 		dir_up = MBOX_DIR_AFPF_UP;
2257 		reg_base = rvu->afreg_base;
2258 		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
2259 		if (err)
2260 			goto free_regions;
2261 		break;
2262 	case TYPE_AFVF:
2263 		name = "rvu_afvf_mailbox";
2264 		dir = MBOX_DIR_PFVF;
2265 		dir_up = MBOX_DIR_PFVF_UP;
2266 		reg_base = rvu->pfreg_base;
2267 		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
2268 		if (err)
2269 			goto free_regions;
2270 		break;
2271 	default:
2272 		return err;
2273 	}
2274 
2275 	mw->mbox_wq = alloc_workqueue(name,
2276 				      WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2277 				      num);
2278 	if (!mw->mbox_wq) {
2279 		err = -ENOMEM;
2280 		goto unmap_regions;
2281 	}
2282 
2283 	mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
2284 				    sizeof(struct rvu_work), GFP_KERNEL);
2285 	if (!mw->mbox_wrk) {
2286 		err = -ENOMEM;
2287 		goto exit;
2288 	}
2289 
2290 	mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
2291 				       sizeof(struct rvu_work), GFP_KERNEL);
2292 	if (!mw->mbox_wrk_up) {
2293 		err = -ENOMEM;
2294 		goto exit;
2295 	}
2296 
2297 	err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
2298 				     reg_base, dir, num);
2299 	if (err)
2300 		goto exit;
2301 
2302 	err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
2303 				     reg_base, dir_up, num);
2304 	if (err)
2305 		goto exit;
2306 
2307 	for (i = 0; i < num; i++) {
2308 		mwork = &mw->mbox_wrk[i];
2309 		mwork->rvu = rvu;
2310 		INIT_WORK(&mwork->work, mbox_handler);
2311 
2312 		mwork = &mw->mbox_wrk_up[i];
2313 		mwork->rvu = rvu;
2314 		INIT_WORK(&mwork->work, mbox_up_handler);
2315 	}
2316 	kfree(mbox_regions);
2317 	return 0;
2318 
2319 exit:
2320 	destroy_workqueue(mw->mbox_wq);
2321 unmap_regions:
2322 	while (num--)
2323 		iounmap((void __iomem *)mbox_regions[num]);
2324 free_regions:
2325 	kfree(mbox_regions);
2326 	return err;
2327 }
2328 
2329 static void rvu_mbox_destroy(struct mbox_wq_info *mw)
2330 {
2331 	struct otx2_mbox *mbox = &mw->mbox;
2332 	struct otx2_mbox_dev *mdev;
2333 	int devid;
2334 
2335 	if (mw->mbox_wq) {
2336 		flush_workqueue(mw->mbox_wq);
2337 		destroy_workqueue(mw->mbox_wq);
2338 		mw->mbox_wq = NULL;
2339 	}
2340 
2341 	for (devid = 0; devid < mbox->ndevs; devid++) {
2342 		mdev = &mbox->dev[devid];
2343 		if (mdev->hwbase)
2344 			iounmap((void __iomem *)mdev->hwbase);
2345 	}
2346 
2347 	otx2_mbox_destroy(&mw->mbox);
2348 	otx2_mbox_destroy(&mw->mbox_up);
2349 }
2350 
2351 static void rvu_queue_work(struct mbox_wq_info *mw, int first,
2352 			   int mdevs, u64 intr)
2353 {
2354 	struct otx2_mbox_dev *mdev;
2355 	struct otx2_mbox *mbox;
2356 	struct mbox_hdr *hdr;
2357 	int i;
2358 
2359 	for (i = first; i < mdevs; i++) {
2360 		/* start from 0 */
2361 		if (!(intr & BIT_ULL(i - first)))
2362 			continue;
2363 
2364 		mbox = &mw->mbox;
2365 		mdev = &mbox->dev[i];
2366 		hdr = mdev->mbase + mbox->rx_start;
2367 
2368 		/*The hdr->num_msgs is set to zero immediately in the interrupt
2369 		 * handler to  ensure that it holds a correct value next time
2370 		 * when the interrupt handler is called.
2371 		 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2372 		 * pf>mbox.up_num_msgs holds the data for use in
2373 		 * pfaf_mbox_up_handler.
2374 		 */
2375 
2376 		if (hdr->num_msgs) {
2377 			mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
2378 			hdr->num_msgs = 0;
2379 			queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
2380 		}
2381 		mbox = &mw->mbox_up;
2382 		mdev = &mbox->dev[i];
2383 		hdr = mdev->mbase + mbox->rx_start;
2384 		if (hdr->num_msgs) {
2385 			mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
2386 			hdr->num_msgs = 0;
2387 			queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
2388 		}
2389 	}
2390 }
2391 
2392 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
2393 {
2394 	struct rvu *rvu = (struct rvu *)rvu_irq;
2395 	int vfs = rvu->vfs;
2396 	u64 intr;
2397 
2398 	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
2399 	/* Clear interrupts */
2400 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
2401 	if (intr)
2402 		trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
2403 
2404 	/* Sync with mbox memory region */
2405 	rmb();
2406 
2407 	rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
2408 
2409 	/* Handle VF interrupts */
2410 	if (vfs > 64) {
2411 		intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
2412 		rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
2413 
2414 		rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
2415 		vfs -= 64;
2416 	}
2417 
2418 	intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
2419 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2420 	if (intr)
2421 		trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2422 
2423 	rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2424 
2425 	return IRQ_HANDLED;
2426 }
2427 
2428 static void rvu_enable_mbox_intr(struct rvu *rvu)
2429 {
2430 	struct rvu_hwinfo *hw = rvu->hw;
2431 
2432 	/* Clear spurious irqs, if any */
2433 	rvu_write64(rvu, BLKADDR_RVUM,
2434 		    RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2435 
2436 	/* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2437 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2438 		    INTR_MASK(hw->total_pfs) & ~1ULL);
2439 }
2440 
2441 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2442 {
2443 	struct rvu_block *block;
2444 	int slot, lf, num_lfs;
2445 	int err;
2446 
2447 	block = &rvu->hw->block[blkaddr];
2448 	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2449 					block->addr);
2450 	if (!num_lfs)
2451 		return;
2452 	for (slot = 0; slot < num_lfs; slot++) {
2453 		lf = rvu_get_lf(rvu, block, pcifunc, slot);
2454 		if (lf < 0)
2455 			continue;
2456 
2457 		/* Cleanup LF and reset it */
2458 		if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
2459 			rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2460 		else if (block->addr == BLKADDR_NPA)
2461 			rvu_npa_lf_teardown(rvu, pcifunc, lf);
2462 		else if ((block->addr == BLKADDR_CPT0) ||
2463 			 (block->addr == BLKADDR_CPT1))
2464 			rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot);
2465 
2466 		err = rvu_lf_reset(rvu, block, lf);
2467 		if (err) {
2468 			dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2469 				block->addr, lf);
2470 		}
2471 	}
2472 }
2473 
2474 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2475 {
2476 	mutex_lock(&rvu->flr_lock);
2477 	/* Reset order should reflect inter-block dependencies:
2478 	 * 1. Reset any packet/work sources (NIX, CPT, TIM)
2479 	 * 2. Flush and reset SSO/SSOW
2480 	 * 3. Cleanup pools (NPA)
2481 	 */
2482 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2483 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
2484 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2485 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
2486 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2487 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2488 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2489 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2490 	rvu_reset_lmt_map_tbl(rvu, pcifunc);
2491 	rvu_detach_rsrcs(rvu, NULL, pcifunc);
2492 	mutex_unlock(&rvu->flr_lock);
2493 }
2494 
2495 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2496 {
2497 	int reg = 0;
2498 
2499 	/* pcifunc = 0(PF0) | (vf + 1) */
2500 	__rvu_flr_handler(rvu, vf + 1);
2501 
2502 	if (vf >= 64) {
2503 		reg = 1;
2504 		vf = vf - 64;
2505 	}
2506 
2507 	/* Signal FLR finish and enable IRQ */
2508 	rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2509 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2510 }
2511 
2512 static void rvu_flr_handler(struct work_struct *work)
2513 {
2514 	struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2515 	struct rvu *rvu = flrwork->rvu;
2516 	u16 pcifunc, numvfs, vf;
2517 	u64 cfg;
2518 	int pf;
2519 
2520 	pf = flrwork - rvu->flr_wrk;
2521 	if (pf >= rvu->hw->total_pfs) {
2522 		rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2523 		return;
2524 	}
2525 
2526 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2527 	numvfs = (cfg >> 12) & 0xFF;
2528 	pcifunc  = pf << RVU_PFVF_PF_SHIFT;
2529 
2530 	for (vf = 0; vf < numvfs; vf++)
2531 		__rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2532 
2533 	__rvu_flr_handler(rvu, pcifunc);
2534 
2535 	/* Signal FLR finish */
2536 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2537 
2538 	/* Enable interrupt */
2539 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,  BIT_ULL(pf));
2540 }
2541 
2542 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2543 {
2544 	int dev, vf, reg = 0;
2545 	u64 intr;
2546 
2547 	if (start_vf >= 64)
2548 		reg = 1;
2549 
2550 	intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2551 	if (!intr)
2552 		return;
2553 
2554 	for (vf = 0; vf < numvfs; vf++) {
2555 		if (!(intr & BIT_ULL(vf)))
2556 			continue;
2557 		/* Clear and disable the interrupt */
2558 		rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2559 		rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2560 
2561 		dev = vf + start_vf + rvu->hw->total_pfs;
2562 		queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2563 	}
2564 }
2565 
2566 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2567 {
2568 	struct rvu *rvu = (struct rvu *)rvu_irq;
2569 	u64 intr;
2570 	u8  pf;
2571 
2572 	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2573 	if (!intr)
2574 		goto afvf_flr;
2575 
2576 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2577 		if (intr & (1ULL << pf)) {
2578 			/* clear interrupt */
2579 			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2580 				    BIT_ULL(pf));
2581 			/* Disable the interrupt */
2582 			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2583 				    BIT_ULL(pf));
2584 			/* PF is already dead do only AF related operations */
2585 			queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2586 		}
2587 	}
2588 
2589 afvf_flr:
2590 	rvu_afvf_queue_flr_work(rvu, 0, 64);
2591 	if (rvu->vfs > 64)
2592 		rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2593 
2594 	return IRQ_HANDLED;
2595 }
2596 
2597 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2598 {
2599 	int vf;
2600 
2601 	/* Nothing to be done here other than clearing the
2602 	 * TRPEND bit.
2603 	 */
2604 	for (vf = 0; vf < 64; vf++) {
2605 		if (intr & (1ULL << vf)) {
2606 			/* clear the trpend due to ME(master enable) */
2607 			rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2608 			/* clear interrupt */
2609 			rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2610 		}
2611 	}
2612 }
2613 
2614 /* Handles ME interrupts from VFs of AF */
2615 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2616 {
2617 	struct rvu *rvu = (struct rvu *)rvu_irq;
2618 	int vfset;
2619 	u64 intr;
2620 
2621 	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2622 
2623 	for (vfset = 0; vfset <= 1; vfset++) {
2624 		intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2625 		if (intr)
2626 			rvu_me_handle_vfset(rvu, vfset, intr);
2627 	}
2628 
2629 	return IRQ_HANDLED;
2630 }
2631 
2632 /* Handles ME interrupts from PFs */
2633 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2634 {
2635 	struct rvu *rvu = (struct rvu *)rvu_irq;
2636 	u64 intr;
2637 	u8  pf;
2638 
2639 	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2640 
2641 	/* Nothing to be done here other than clearing the
2642 	 * TRPEND bit.
2643 	 */
2644 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2645 		if (intr & (1ULL << pf)) {
2646 			/* clear the trpend due to ME(master enable) */
2647 			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2648 				    BIT_ULL(pf));
2649 			/* clear interrupt */
2650 			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2651 				    BIT_ULL(pf));
2652 		}
2653 	}
2654 
2655 	return IRQ_HANDLED;
2656 }
2657 
2658 static void rvu_unregister_interrupts(struct rvu *rvu)
2659 {
2660 	int irq;
2661 
2662 	/* Disable the Mbox interrupt */
2663 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2664 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2665 
2666 	/* Disable the PF FLR interrupt */
2667 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2668 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2669 
2670 	/* Disable the PF ME interrupt */
2671 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2672 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2673 
2674 	for (irq = 0; irq < rvu->num_vec; irq++) {
2675 		if (rvu->irq_allocated[irq]) {
2676 			free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2677 			rvu->irq_allocated[irq] = false;
2678 		}
2679 	}
2680 
2681 	pci_free_irq_vectors(rvu->pdev);
2682 	rvu->num_vec = 0;
2683 }
2684 
2685 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2686 {
2687 	struct rvu_pfvf *pfvf = &rvu->pf[0];
2688 	int offset;
2689 
2690 	pfvf = &rvu->pf[0];
2691 	offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2692 
2693 	/* Make sure there are enough MSIX vectors configured so that
2694 	 * VF interrupts can be handled. Offset equal to zero means
2695 	 * that PF vectors are not configured and overlapping AF vectors.
2696 	 */
2697 	return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2698 	       offset;
2699 }
2700 
2701 static int rvu_register_interrupts(struct rvu *rvu)
2702 {
2703 	int ret, offset, pf_vec_start;
2704 
2705 	rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2706 
2707 	rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2708 					   NAME_SIZE, GFP_KERNEL);
2709 	if (!rvu->irq_name)
2710 		return -ENOMEM;
2711 
2712 	rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2713 					  sizeof(bool), GFP_KERNEL);
2714 	if (!rvu->irq_allocated)
2715 		return -ENOMEM;
2716 
2717 	/* Enable MSI-X */
2718 	ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2719 				    rvu->num_vec, PCI_IRQ_MSIX);
2720 	if (ret < 0) {
2721 		dev_err(rvu->dev,
2722 			"RVUAF: Request for %d msix vectors failed, ret %d\n",
2723 			rvu->num_vec, ret);
2724 		return ret;
2725 	}
2726 
2727 	/* Register mailbox interrupt handler */
2728 	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2729 	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2730 			  rvu_mbox_intr_handler, 0,
2731 			  &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2732 	if (ret) {
2733 		dev_err(rvu->dev,
2734 			"RVUAF: IRQ registration failed for mbox irq\n");
2735 		goto fail;
2736 	}
2737 
2738 	rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2739 
2740 	/* Enable mailbox interrupts from all PFs */
2741 	rvu_enable_mbox_intr(rvu);
2742 
2743 	/* Register FLR interrupt handler */
2744 	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2745 		"RVUAF FLR");
2746 	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2747 			  rvu_flr_intr_handler, 0,
2748 			  &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2749 			  rvu);
2750 	if (ret) {
2751 		dev_err(rvu->dev,
2752 			"RVUAF: IRQ registration failed for FLR\n");
2753 		goto fail;
2754 	}
2755 	rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2756 
2757 	/* Enable FLR interrupt for all PFs*/
2758 	rvu_write64(rvu, BLKADDR_RVUM,
2759 		    RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2760 
2761 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2762 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2763 
2764 	/* Register ME interrupt handler */
2765 	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2766 		"RVUAF ME");
2767 	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2768 			  rvu_me_pf_intr_handler, 0,
2769 			  &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2770 			  rvu);
2771 	if (ret) {
2772 		dev_err(rvu->dev,
2773 			"RVUAF: IRQ registration failed for ME\n");
2774 	}
2775 	rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2776 
2777 	/* Clear TRPEND bit for all PF */
2778 	rvu_write64(rvu, BLKADDR_RVUM,
2779 		    RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2780 	/* Enable ME interrupt for all PFs*/
2781 	rvu_write64(rvu, BLKADDR_RVUM,
2782 		    RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2783 
2784 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2785 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2786 
2787 	if (!rvu_afvf_msix_vectors_num_ok(rvu))
2788 		return 0;
2789 
2790 	/* Get PF MSIX vectors offset. */
2791 	pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2792 				  RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2793 
2794 	/* Register MBOX0 interrupt. */
2795 	offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2796 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2797 	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2798 			  rvu_mbox_intr_handler, 0,
2799 			  &rvu->irq_name[offset * NAME_SIZE],
2800 			  rvu);
2801 	if (ret)
2802 		dev_err(rvu->dev,
2803 			"RVUAF: IRQ registration failed for Mbox0\n");
2804 
2805 	rvu->irq_allocated[offset] = true;
2806 
2807 	/* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2808 	 * simply increment current offset by 1.
2809 	 */
2810 	offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2811 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2812 	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2813 			  rvu_mbox_intr_handler, 0,
2814 			  &rvu->irq_name[offset * NAME_SIZE],
2815 			  rvu);
2816 	if (ret)
2817 		dev_err(rvu->dev,
2818 			"RVUAF: IRQ registration failed for Mbox1\n");
2819 
2820 	rvu->irq_allocated[offset] = true;
2821 
2822 	/* Register FLR interrupt handler for AF's VFs */
2823 	offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2824 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2825 	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2826 			  rvu_flr_intr_handler, 0,
2827 			  &rvu->irq_name[offset * NAME_SIZE], rvu);
2828 	if (ret) {
2829 		dev_err(rvu->dev,
2830 			"RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2831 		goto fail;
2832 	}
2833 	rvu->irq_allocated[offset] = true;
2834 
2835 	offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2836 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2837 	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2838 			  rvu_flr_intr_handler, 0,
2839 			  &rvu->irq_name[offset * NAME_SIZE], rvu);
2840 	if (ret) {
2841 		dev_err(rvu->dev,
2842 			"RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2843 		goto fail;
2844 	}
2845 	rvu->irq_allocated[offset] = true;
2846 
2847 	/* Register ME interrupt handler for AF's VFs */
2848 	offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2849 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2850 	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2851 			  rvu_me_vf_intr_handler, 0,
2852 			  &rvu->irq_name[offset * NAME_SIZE], rvu);
2853 	if (ret) {
2854 		dev_err(rvu->dev,
2855 			"RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2856 		goto fail;
2857 	}
2858 	rvu->irq_allocated[offset] = true;
2859 
2860 	offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2861 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2862 	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2863 			  rvu_me_vf_intr_handler, 0,
2864 			  &rvu->irq_name[offset * NAME_SIZE], rvu);
2865 	if (ret) {
2866 		dev_err(rvu->dev,
2867 			"RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2868 		goto fail;
2869 	}
2870 	rvu->irq_allocated[offset] = true;
2871 	return 0;
2872 
2873 fail:
2874 	rvu_unregister_interrupts(rvu);
2875 	return ret;
2876 }
2877 
2878 static void rvu_flr_wq_destroy(struct rvu *rvu)
2879 {
2880 	if (rvu->flr_wq) {
2881 		flush_workqueue(rvu->flr_wq);
2882 		destroy_workqueue(rvu->flr_wq);
2883 		rvu->flr_wq = NULL;
2884 	}
2885 }
2886 
2887 static int rvu_flr_init(struct rvu *rvu)
2888 {
2889 	int dev, num_devs;
2890 	u64 cfg;
2891 	int pf;
2892 
2893 	/* Enable FLR for all PFs*/
2894 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2895 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2896 		rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2897 			    cfg | BIT_ULL(22));
2898 	}
2899 
2900 	rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2901 				      WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2902 				       1);
2903 	if (!rvu->flr_wq)
2904 		return -ENOMEM;
2905 
2906 	num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2907 	rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2908 				    sizeof(struct rvu_work), GFP_KERNEL);
2909 	if (!rvu->flr_wrk) {
2910 		destroy_workqueue(rvu->flr_wq);
2911 		return -ENOMEM;
2912 	}
2913 
2914 	for (dev = 0; dev < num_devs; dev++) {
2915 		rvu->flr_wrk[dev].rvu = rvu;
2916 		INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
2917 	}
2918 
2919 	mutex_init(&rvu->flr_lock);
2920 
2921 	return 0;
2922 }
2923 
2924 static void rvu_disable_afvf_intr(struct rvu *rvu)
2925 {
2926 	int vfs = rvu->vfs;
2927 
2928 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
2929 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
2930 	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
2931 	if (vfs <= 64)
2932 		return;
2933 
2934 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
2935 		      INTR_MASK(vfs - 64));
2936 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2937 	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2938 }
2939 
2940 static void rvu_enable_afvf_intr(struct rvu *rvu)
2941 {
2942 	int vfs = rvu->vfs;
2943 
2944 	/* Clear any pending interrupts and enable AF VF interrupts for
2945 	 * the first 64 VFs.
2946 	 */
2947 	/* Mbox */
2948 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
2949 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
2950 
2951 	/* FLR */
2952 	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
2953 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
2954 	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
2955 
2956 	/* Same for remaining VFs, if any. */
2957 	if (vfs <= 64)
2958 		return;
2959 
2960 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
2961 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
2962 		      INTR_MASK(vfs - 64));
2963 
2964 	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
2965 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2966 	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2967 }
2968 
2969 int rvu_get_num_lbk_chans(void)
2970 {
2971 	struct pci_dev *pdev;
2972 	void __iomem *base;
2973 	int ret = -EIO;
2974 
2975 	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
2976 			      NULL);
2977 	if (!pdev)
2978 		goto err;
2979 
2980 	base = pci_ioremap_bar(pdev, 0);
2981 	if (!base)
2982 		goto err_put;
2983 
2984 	/* Read number of available LBK channels from LBK(0)_CONST register. */
2985 	ret = (readq(base + 0x10) >> 32) & 0xffff;
2986 	iounmap(base);
2987 err_put:
2988 	pci_dev_put(pdev);
2989 err:
2990 	return ret;
2991 }
2992 
2993 static int rvu_enable_sriov(struct rvu *rvu)
2994 {
2995 	struct pci_dev *pdev = rvu->pdev;
2996 	int err, chans, vfs;
2997 
2998 	if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
2999 		dev_warn(&pdev->dev,
3000 			 "Skipping SRIOV enablement since not enough IRQs are available\n");
3001 		return 0;
3002 	}
3003 
3004 	chans = rvu_get_num_lbk_chans();
3005 	if (chans < 0)
3006 		return chans;
3007 
3008 	vfs = pci_sriov_get_totalvfs(pdev);
3009 
3010 	/* Limit VFs in case we have more VFs than LBK channels available. */
3011 	if (vfs > chans)
3012 		vfs = chans;
3013 
3014 	if (!vfs)
3015 		return 0;
3016 
3017 	/* LBK channel number 63 is used for switching packets between
3018 	 * CGX mapped VFs. Hence limit LBK pairs till 62 only.
3019 	 */
3020 	if (vfs > 62)
3021 		vfs = 62;
3022 
3023 	/* Save VFs number for reference in VF interrupts handlers.
3024 	 * Since interrupts might start arriving during SRIOV enablement
3025 	 * ordinary API cannot be used to get number of enabled VFs.
3026 	 */
3027 	rvu->vfs = vfs;
3028 
3029 	err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
3030 			    rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
3031 	if (err)
3032 		return err;
3033 
3034 	rvu_enable_afvf_intr(rvu);
3035 	/* Make sure IRQs are enabled before SRIOV. */
3036 	mb();
3037 
3038 	err = pci_enable_sriov(pdev, vfs);
3039 	if (err) {
3040 		rvu_disable_afvf_intr(rvu);
3041 		rvu_mbox_destroy(&rvu->afvf_wq_info);
3042 		return err;
3043 	}
3044 
3045 	return 0;
3046 }
3047 
3048 static void rvu_disable_sriov(struct rvu *rvu)
3049 {
3050 	rvu_disable_afvf_intr(rvu);
3051 	rvu_mbox_destroy(&rvu->afvf_wq_info);
3052 	pci_disable_sriov(rvu->pdev);
3053 }
3054 
3055 static void rvu_update_module_params(struct rvu *rvu)
3056 {
3057 	const char *default_pfl_name = "default";
3058 
3059 	strscpy(rvu->mkex_pfl_name,
3060 		mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
3061 	strscpy(rvu->kpu_pfl_name,
3062 		kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
3063 }
3064 
3065 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3066 {
3067 	struct device *dev = &pdev->dev;
3068 	struct rvu *rvu;
3069 	int    err;
3070 
3071 	rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
3072 	if (!rvu)
3073 		return -ENOMEM;
3074 
3075 	rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
3076 	if (!rvu->hw) {
3077 		devm_kfree(dev, rvu);
3078 		return -ENOMEM;
3079 	}
3080 
3081 	pci_set_drvdata(pdev, rvu);
3082 	rvu->pdev = pdev;
3083 	rvu->dev = &pdev->dev;
3084 
3085 	err = pci_enable_device(pdev);
3086 	if (err) {
3087 		dev_err(dev, "Failed to enable PCI device\n");
3088 		goto err_freemem;
3089 	}
3090 
3091 	err = pci_request_regions(pdev, DRV_NAME);
3092 	if (err) {
3093 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
3094 		goto err_disable_device;
3095 	}
3096 
3097 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
3098 	if (err) {
3099 		dev_err(dev, "DMA mask config failed, abort\n");
3100 		goto err_release_regions;
3101 	}
3102 
3103 	pci_set_master(pdev);
3104 
3105 	rvu->ptp = ptp_get();
3106 	if (IS_ERR(rvu->ptp)) {
3107 		err = PTR_ERR(rvu->ptp);
3108 		if (err == -EPROBE_DEFER)
3109 			goto err_release_regions;
3110 		rvu->ptp = NULL;
3111 	}
3112 
3113 	/* Map Admin function CSRs */
3114 	rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
3115 	rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
3116 	if (!rvu->afreg_base || !rvu->pfreg_base) {
3117 		dev_err(dev, "Unable to map admin function CSRs, aborting\n");
3118 		err = -ENOMEM;
3119 		goto err_put_ptp;
3120 	}
3121 
3122 	/* Store module params in rvu structure */
3123 	rvu_update_module_params(rvu);
3124 
3125 	/* Check which blocks the HW supports */
3126 	rvu_check_block_implemented(rvu);
3127 
3128 	rvu_reset_all_blocks(rvu);
3129 
3130 	rvu_setup_hw_capabilities(rvu);
3131 
3132 	err = rvu_setup_hw_resources(rvu);
3133 	if (err)
3134 		goto err_put_ptp;
3135 
3136 	/* Init mailbox btw AF and PFs */
3137 	err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
3138 			    rvu->hw->total_pfs, rvu_afpf_mbox_handler,
3139 			    rvu_afpf_mbox_up_handler);
3140 	if (err) {
3141 		dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
3142 		goto err_hwsetup;
3143 	}
3144 
3145 	err = rvu_flr_init(rvu);
3146 	if (err) {
3147 		dev_err(dev, "%s: Failed to initialize flr\n", __func__);
3148 		goto err_mbox;
3149 	}
3150 
3151 	err = rvu_register_interrupts(rvu);
3152 	if (err) {
3153 		dev_err(dev, "%s: Failed to register interrupts\n", __func__);
3154 		goto err_flr;
3155 	}
3156 
3157 	err = rvu_register_dl(rvu);
3158 	if (err) {
3159 		dev_err(dev, "%s: Failed to register devlink\n", __func__);
3160 		goto err_irq;
3161 	}
3162 
3163 	rvu_setup_rvum_blk_revid(rvu);
3164 
3165 	/* Enable AF's VFs (if any) */
3166 	err = rvu_enable_sriov(rvu);
3167 	if (err) {
3168 		dev_err(dev, "%s: Failed to enable sriov\n", __func__);
3169 		goto err_dl;
3170 	}
3171 
3172 	/* Initialize debugfs */
3173 	rvu_dbg_init(rvu);
3174 
3175 	mutex_init(&rvu->rswitch.switch_lock);
3176 
3177 	return 0;
3178 err_dl:
3179 	rvu_unregister_dl(rvu);
3180 err_irq:
3181 	rvu_unregister_interrupts(rvu);
3182 err_flr:
3183 	rvu_flr_wq_destroy(rvu);
3184 err_mbox:
3185 	rvu_mbox_destroy(&rvu->afpf_wq_info);
3186 err_hwsetup:
3187 	rvu_cgx_exit(rvu);
3188 	rvu_fwdata_exit(rvu);
3189 	rvu_reset_all_blocks(rvu);
3190 	rvu_free_hw_resources(rvu);
3191 	rvu_clear_rvum_blk_revid(rvu);
3192 err_put_ptp:
3193 	ptp_put(rvu->ptp);
3194 err_release_regions:
3195 	pci_release_regions(pdev);
3196 err_disable_device:
3197 	pci_disable_device(pdev);
3198 err_freemem:
3199 	pci_set_drvdata(pdev, NULL);
3200 	devm_kfree(&pdev->dev, rvu->hw);
3201 	devm_kfree(dev, rvu);
3202 	return err;
3203 }
3204 
3205 static void rvu_remove(struct pci_dev *pdev)
3206 {
3207 	struct rvu *rvu = pci_get_drvdata(pdev);
3208 
3209 	rvu_dbg_exit(rvu);
3210 	rvu_unregister_dl(rvu);
3211 	rvu_unregister_interrupts(rvu);
3212 	rvu_flr_wq_destroy(rvu);
3213 	rvu_cgx_exit(rvu);
3214 	rvu_fwdata_exit(rvu);
3215 	rvu_mbox_destroy(&rvu->afpf_wq_info);
3216 	rvu_disable_sriov(rvu);
3217 	rvu_reset_all_blocks(rvu);
3218 	rvu_free_hw_resources(rvu);
3219 	rvu_clear_rvum_blk_revid(rvu);
3220 	ptp_put(rvu->ptp);
3221 	pci_release_regions(pdev);
3222 	pci_disable_device(pdev);
3223 	pci_set_drvdata(pdev, NULL);
3224 
3225 	devm_kfree(&pdev->dev, rvu->hw);
3226 	devm_kfree(&pdev->dev, rvu);
3227 }
3228 
3229 static struct pci_driver rvu_driver = {
3230 	.name = DRV_NAME,
3231 	.id_table = rvu_id_table,
3232 	.probe = rvu_probe,
3233 	.remove = rvu_remove,
3234 };
3235 
3236 static int __init rvu_init_module(void)
3237 {
3238 	int err;
3239 
3240 	pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3241 
3242 	err = pci_register_driver(&cgx_driver);
3243 	if (err < 0)
3244 		return err;
3245 
3246 	err = pci_register_driver(&ptp_driver);
3247 	if (err < 0)
3248 		goto ptp_err;
3249 
3250 	err =  pci_register_driver(&rvu_driver);
3251 	if (err < 0)
3252 		goto rvu_err;
3253 
3254 	return 0;
3255 rvu_err:
3256 	pci_unregister_driver(&ptp_driver);
3257 ptp_err:
3258 	pci_unregister_driver(&cgx_driver);
3259 
3260 	return err;
3261 }
3262 
3263 static void __exit rvu_cleanup_module(void)
3264 {
3265 	pci_unregister_driver(&rvu_driver);
3266 	pci_unregister_driver(&ptp_driver);
3267 	pci_unregister_driver(&cgx_driver);
3268 }
3269 
3270 module_init(rvu_init_module);
3271 module_exit(rvu_cleanup_module);
3272