1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function Devlink
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include<linux/bitfield.h>
9 
10 #include "rvu.h"
11 #include "rvu_reg.h"
12 #include "rvu_struct.h"
13 
14 #define DRV_NAME "octeontx2-af"
15 
16 static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
17 {
18 	int err;
19 
20 	err = devlink_fmsg_pair_nest_start(fmsg, name);
21 	if (err)
22 		return err;
23 
24 	return  devlink_fmsg_obj_nest_start(fmsg);
25 }
26 
27 static int rvu_report_pair_end(struct devlink_fmsg *fmsg)
28 {
29 	int err;
30 
31 	err = devlink_fmsg_obj_nest_end(fmsg);
32 	if (err)
33 		return err;
34 
35 	return devlink_fmsg_pair_nest_end(fmsg);
36 }
37 
38 static bool rvu_common_request_irq(struct rvu *rvu, int offset,
39 				   const char *name, irq_handler_t fn)
40 {
41 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
42 	int rc;
43 
44 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
45 	rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
46 			 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
47 	if (rc)
48 		dev_warn(rvu->dev, "Failed to register %s irq\n", name);
49 	else
50 		rvu->irq_allocated[offset] = true;
51 
52 	return rvu->irq_allocated[offset];
53 }
54 
55 static void rvu_nix_intr_work(struct work_struct *work)
56 {
57 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
58 
59 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
60 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
61 			      "NIX_AF_RVU Error",
62 			      rvu_nix_health_reporter->nix_event_ctx);
63 }
64 
65 static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
66 {
67 	struct rvu_nix_event_ctx *nix_event_context;
68 	struct rvu_devlink *rvu_dl = rvu_irq;
69 	struct rvu *rvu;
70 	int blkaddr;
71 	u64 intr;
72 
73 	rvu = rvu_dl->rvu;
74 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
75 	if (blkaddr < 0)
76 		return IRQ_NONE;
77 
78 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
79 	intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
80 	nix_event_context->nix_af_rvu_int = intr;
81 
82 	/* Clear interrupts */
83 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
84 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
85 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
86 
87 	return IRQ_HANDLED;
88 }
89 
90 static void rvu_nix_gen_work(struct work_struct *work)
91 {
92 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
93 
94 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
95 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
96 			      "NIX_AF_GEN Error",
97 			      rvu_nix_health_reporter->nix_event_ctx);
98 }
99 
100 static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
101 {
102 	struct rvu_nix_event_ctx *nix_event_context;
103 	struct rvu_devlink *rvu_dl = rvu_irq;
104 	struct rvu *rvu;
105 	int blkaddr;
106 	u64 intr;
107 
108 	rvu = rvu_dl->rvu;
109 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
110 	if (blkaddr < 0)
111 		return IRQ_NONE;
112 
113 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
114 	intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
115 	nix_event_context->nix_af_rvu_gen = intr;
116 
117 	/* Clear interrupts */
118 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
119 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
120 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
121 
122 	return IRQ_HANDLED;
123 }
124 
125 static void rvu_nix_err_work(struct work_struct *work)
126 {
127 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
128 
129 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
130 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
131 			      "NIX_AF_ERR Error",
132 			      rvu_nix_health_reporter->nix_event_ctx);
133 }
134 
135 static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
136 {
137 	struct rvu_nix_event_ctx *nix_event_context;
138 	struct rvu_devlink *rvu_dl = rvu_irq;
139 	struct rvu *rvu;
140 	int blkaddr;
141 	u64 intr;
142 
143 	rvu = rvu_dl->rvu;
144 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
145 	if (blkaddr < 0)
146 		return IRQ_NONE;
147 
148 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
149 	intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
150 	nix_event_context->nix_af_rvu_err = intr;
151 
152 	/* Clear interrupts */
153 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
154 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
155 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
156 
157 	return IRQ_HANDLED;
158 }
159 
160 static void rvu_nix_ras_work(struct work_struct *work)
161 {
162 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
163 
164 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
165 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
166 			      "NIX_AF_RAS Error",
167 			      rvu_nix_health_reporter->nix_event_ctx);
168 }
169 
170 static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
171 {
172 	struct rvu_nix_event_ctx *nix_event_context;
173 	struct rvu_devlink *rvu_dl = rvu_irq;
174 	struct rvu *rvu;
175 	int blkaddr;
176 	u64 intr;
177 
178 	rvu = rvu_dl->rvu;
179 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
180 	if (blkaddr < 0)
181 		return IRQ_NONE;
182 
183 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
184 	intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
185 	nix_event_context->nix_af_rvu_ras = intr;
186 
187 	/* Clear interrupts */
188 	rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
189 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
190 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
191 
192 	return IRQ_HANDLED;
193 }
194 
195 static void rvu_nix_unregister_interrupts(struct rvu *rvu)
196 {
197 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
198 	int offs, i, blkaddr;
199 
200 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
201 	if (blkaddr < 0)
202 		return;
203 
204 	offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
205 	if (!offs)
206 		return;
207 
208 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
209 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
210 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
211 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
212 
213 	if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
214 		free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
215 			 rvu_dl);
216 		rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
217 	}
218 
219 	for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
220 		if (rvu->irq_allocated[offs + i]) {
221 			free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
222 			rvu->irq_allocated[offs + i] = false;
223 		}
224 }
225 
226 static int rvu_nix_register_interrupts(struct rvu *rvu)
227 {
228 	int blkaddr, base;
229 	bool rc;
230 
231 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
232 	if (blkaddr < 0)
233 		return blkaddr;
234 
235 	/* Get NIX AF MSIX vectors offset. */
236 	base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
237 	if (!base) {
238 		dev_warn(rvu->dev,
239 			 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
240 			 blkaddr - BLKADDR_NIX0);
241 		return 0;
242 	}
243 	/* Register and enable NIX_AF_RVU_INT interrupt */
244 	rc = rvu_common_request_irq(rvu, base +  NIX_AF_INT_VEC_RVU,
245 				    "NIX_AF_RVU_INT",
246 				    rvu_nix_af_rvu_intr_handler);
247 	if (!rc)
248 		goto err;
249 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
250 
251 	/* Register and enable NIX_AF_GEN_INT interrupt */
252 	rc = rvu_common_request_irq(rvu, base +  NIX_AF_INT_VEC_GEN,
253 				    "NIX_AF_GEN_INT",
254 				    rvu_nix_af_rvu_gen_handler);
255 	if (!rc)
256 		goto err;
257 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
258 
259 	/* Register and enable NIX_AF_ERR_INT interrupt */
260 	rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
261 				    "NIX_AF_ERR_INT",
262 				    rvu_nix_af_rvu_err_handler);
263 	if (!rc)
264 		goto err;
265 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
266 
267 	/* Register and enable NIX_AF_RAS interrupt */
268 	rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
269 				    "NIX_AF_RAS",
270 				    rvu_nix_af_rvu_ras_handler);
271 	if (!rc)
272 		goto err;
273 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
274 
275 	return 0;
276 err:
277 	rvu_nix_unregister_interrupts(rvu);
278 	return rc;
279 }
280 
281 static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
282 			       enum nix_af_rvu_health health_reporter)
283 {
284 	struct rvu_nix_event_ctx *nix_event_context;
285 	u64 intr_val;
286 	int err;
287 
288 	nix_event_context = ctx;
289 	switch (health_reporter) {
290 	case NIX_AF_RVU_INTR:
291 		intr_val = nix_event_context->nix_af_rvu_int;
292 		err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
293 		if (err)
294 			return err;
295 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
296 						nix_event_context->nix_af_rvu_int);
297 		if (err)
298 			return err;
299 		if (intr_val & BIT_ULL(0)) {
300 			err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
301 			if (err)
302 				return err;
303 		}
304 		err = rvu_report_pair_end(fmsg);
305 		if (err)
306 			return err;
307 		break;
308 	case NIX_AF_RVU_GEN:
309 		intr_val = nix_event_context->nix_af_rvu_gen;
310 		err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
311 		if (err)
312 			return err;
313 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
314 						nix_event_context->nix_af_rvu_gen);
315 		if (err)
316 			return err;
317 		if (intr_val & BIT_ULL(0)) {
318 			err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
319 			if (err)
320 				return err;
321 		}
322 		if (intr_val & BIT_ULL(1)) {
323 			err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
324 			if (err)
325 				return err;
326 		}
327 		if (intr_val & BIT_ULL(4)) {
328 			err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
329 			if (err)
330 				return err;
331 		}
332 		err = rvu_report_pair_end(fmsg);
333 		if (err)
334 			return err;
335 		break;
336 	case NIX_AF_RVU_ERR:
337 		intr_val = nix_event_context->nix_af_rvu_err;
338 		err = rvu_report_pair_start(fmsg, "NIX_AF_ERR");
339 		if (err)
340 			return err;
341 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
342 						nix_event_context->nix_af_rvu_err);
343 		if (err)
344 			return err;
345 		if (intr_val & BIT_ULL(14)) {
346 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
347 			if (err)
348 				return err;
349 		}
350 		if (intr_val & BIT_ULL(13)) {
351 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
352 			if (err)
353 				return err;
354 		}
355 		if (intr_val & BIT_ULL(12)) {
356 			err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
357 			if (err)
358 				return err;
359 		}
360 		if (intr_val & BIT_ULL(6)) {
361 			err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
362 			if (err)
363 				return err;
364 		}
365 		if (intr_val & BIT_ULL(5)) {
366 			err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
367 			if (err)
368 				return err;
369 		}
370 		if (intr_val & BIT_ULL(4)) {
371 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
372 			if (err)
373 				return err;
374 		}
375 		if (intr_val & BIT_ULL(3)) {
376 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
377 			if (err)
378 				return err;
379 		}
380 		if (intr_val & BIT_ULL(2)) {
381 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
382 			if (err)
383 				return err;
384 		}
385 		if (intr_val & BIT_ULL(1)) {
386 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
387 			if (err)
388 				return err;
389 		}
390 		if (intr_val & BIT_ULL(0)) {
391 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
392 			if (err)
393 				return err;
394 		}
395 		err = rvu_report_pair_end(fmsg);
396 		if (err)
397 			return err;
398 		break;
399 	case NIX_AF_RVU_RAS:
400 		intr_val = nix_event_context->nix_af_rvu_err;
401 		err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
402 		if (err)
403 			return err;
404 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
405 						nix_event_context->nix_af_rvu_err);
406 		if (err)
407 			return err;
408 		err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
409 		if (err)
410 			return err;
411 		if (intr_val & BIT_ULL(34)) {
412 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
413 			if (err)
414 				return err;
415 		}
416 		if (intr_val & BIT_ULL(33)) {
417 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
418 			if (err)
419 				return err;
420 		}
421 		if (intr_val & BIT_ULL(32)) {
422 			err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
423 			if (err)
424 				return err;
425 		}
426 		if (intr_val & BIT_ULL(4)) {
427 			err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
428 			if (err)
429 				return err;
430 		}
431 		if (intr_val & BIT_ULL(3)) {
432 			err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
433 
434 			if (err)
435 				return err;
436 		}
437 		if (intr_val & BIT_ULL(2)) {
438 			err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
439 			if (err)
440 				return err;
441 		}
442 		if (intr_val & BIT_ULL(1)) {
443 			err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
444 			if (err)
445 				return err;
446 		}
447 		if (intr_val & BIT_ULL(0)) {
448 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
449 			if (err)
450 				return err;
451 		}
452 		err = rvu_report_pair_end(fmsg);
453 		if (err)
454 			return err;
455 		break;
456 	default:
457 		return -EINVAL;
458 	}
459 
460 	return 0;
461 }
462 
463 static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
464 				struct devlink_fmsg *fmsg, void *ctx,
465 				struct netlink_ext_ack *netlink_extack)
466 {
467 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
468 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
469 	struct rvu_nix_event_ctx *nix_ctx;
470 
471 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
472 
473 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
474 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
475 }
476 
477 static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
478 				   void *ctx, struct netlink_ext_ack *netlink_extack)
479 {
480 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
481 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
482 	int blkaddr;
483 
484 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
485 	if (blkaddr < 0)
486 		return blkaddr;
487 
488 	if (nix_event_ctx->nix_af_rvu_int)
489 		rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
490 
491 	return 0;
492 }
493 
494 static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
495 			       struct devlink_fmsg *fmsg, void *ctx,
496 			       struct netlink_ext_ack *netlink_extack)
497 {
498 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
499 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
500 	struct rvu_nix_event_ctx *nix_ctx;
501 
502 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
503 
504 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
505 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
506 }
507 
508 static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
509 				  void *ctx, struct netlink_ext_ack *netlink_extack)
510 {
511 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
512 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
513 	int blkaddr;
514 
515 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
516 	if (blkaddr < 0)
517 		return blkaddr;
518 
519 	if (nix_event_ctx->nix_af_rvu_gen)
520 		rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
521 
522 	return 0;
523 }
524 
525 static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
526 			       struct devlink_fmsg *fmsg, void *ctx,
527 			       struct netlink_ext_ack *netlink_extack)
528 {
529 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
530 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
531 	struct rvu_nix_event_ctx *nix_ctx;
532 
533 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
534 
535 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
536 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
537 }
538 
539 static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
540 				  void *ctx, struct netlink_ext_ack *netlink_extack)
541 {
542 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
543 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
544 	int blkaddr;
545 
546 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
547 	if (blkaddr < 0)
548 		return blkaddr;
549 
550 	if (nix_event_ctx->nix_af_rvu_err)
551 		rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
552 
553 	return 0;
554 }
555 
556 static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
557 			       struct devlink_fmsg *fmsg, void *ctx,
558 			       struct netlink_ext_ack *netlink_extack)
559 {
560 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
561 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
562 	struct rvu_nix_event_ctx *nix_ctx;
563 
564 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
565 
566 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
567 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
568 }
569 
570 static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
571 				  void *ctx, struct netlink_ext_ack *netlink_extack)
572 {
573 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
574 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
575 	int blkaddr;
576 
577 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
578 	if (blkaddr < 0)
579 		return blkaddr;
580 
581 	if (nix_event_ctx->nix_af_rvu_int)
582 		rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
583 
584 	return 0;
585 }
586 
587 RVU_REPORTERS(hw_nix_intr);
588 RVU_REPORTERS(hw_nix_gen);
589 RVU_REPORTERS(hw_nix_err);
590 RVU_REPORTERS(hw_nix_ras);
591 
592 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
593 
594 static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
595 {
596 	struct rvu_nix_health_reporters *rvu_reporters;
597 	struct rvu_nix_event_ctx *nix_event_context;
598 	struct rvu *rvu = rvu_dl->rvu;
599 
600 	rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
601 	if (!rvu_reporters)
602 		return -ENOMEM;
603 
604 	rvu_dl->rvu_nix_health_reporter = rvu_reporters;
605 	nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
606 	if (!nix_event_context)
607 		return -ENOMEM;
608 
609 	rvu_reporters->nix_event_ctx = nix_event_context;
610 	rvu_reporters->rvu_hw_nix_intr_reporter =
611 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
612 	if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
613 		dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
614 			 PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
615 		return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
616 	}
617 
618 	rvu_reporters->rvu_hw_nix_gen_reporter =
619 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
620 	if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
621 		dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
622 			 PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
623 		return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
624 	}
625 
626 	rvu_reporters->rvu_hw_nix_err_reporter =
627 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
628 	if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
629 		dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
630 			 PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
631 		return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
632 	}
633 
634 	rvu_reporters->rvu_hw_nix_ras_reporter =
635 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
636 	if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
637 		dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
638 			 PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
639 		return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
640 	}
641 
642 	rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
643 	if (!rvu_dl->devlink_wq)
644 		goto err;
645 
646 	INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
647 	INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
648 	INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
649 	INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
650 
651 	return 0;
652 err:
653 	rvu_nix_health_reporters_destroy(rvu_dl);
654 	return -ENOMEM;
655 }
656 
657 static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
658 {
659 	struct rvu *rvu = rvu_dl->rvu;
660 	int err;
661 
662 	err = rvu_nix_register_reporters(rvu_dl);
663 	if (err) {
664 		dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
665 			 err);
666 		return err;
667 	}
668 	rvu_nix_register_interrupts(rvu);
669 
670 	return 0;
671 }
672 
673 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
674 {
675 	struct rvu_nix_health_reporters *nix_reporters;
676 	struct rvu *rvu = rvu_dl->rvu;
677 
678 	nix_reporters = rvu_dl->rvu_nix_health_reporter;
679 
680 	if (!nix_reporters->rvu_hw_nix_ras_reporter)
681 		return;
682 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
683 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
684 
685 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
686 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
687 
688 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
689 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
690 
691 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
692 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
693 
694 	rvu_nix_unregister_interrupts(rvu);
695 	kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
696 	kfree(rvu_dl->rvu_nix_health_reporter);
697 }
698 
699 static void rvu_npa_intr_work(struct work_struct *work)
700 {
701 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
702 
703 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
704 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
705 			      "NPA_AF_RVU Error",
706 			      rvu_npa_health_reporter->npa_event_ctx);
707 }
708 
709 static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
710 {
711 	struct rvu_npa_event_ctx *npa_event_context;
712 	struct rvu_devlink *rvu_dl = rvu_irq;
713 	struct rvu *rvu;
714 	int blkaddr;
715 	u64 intr;
716 
717 	rvu = rvu_dl->rvu;
718 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
719 	if (blkaddr < 0)
720 		return IRQ_NONE;
721 
722 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
723 	intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
724 	npa_event_context->npa_af_rvu_int = intr;
725 
726 	/* Clear interrupts */
727 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
728 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
729 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
730 
731 	return IRQ_HANDLED;
732 }
733 
734 static void rvu_npa_gen_work(struct work_struct *work)
735 {
736 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
737 
738 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
739 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
740 			      "NPA_AF_GEN Error",
741 			      rvu_npa_health_reporter->npa_event_ctx);
742 }
743 
744 static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
745 {
746 	struct rvu_npa_event_ctx *npa_event_context;
747 	struct rvu_devlink *rvu_dl = rvu_irq;
748 	struct rvu *rvu;
749 	int blkaddr;
750 	u64 intr;
751 
752 	rvu = rvu_dl->rvu;
753 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
754 	if (blkaddr < 0)
755 		return IRQ_NONE;
756 
757 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
758 	intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
759 	npa_event_context->npa_af_rvu_gen = intr;
760 
761 	/* Clear interrupts */
762 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
763 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
764 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
765 
766 	return IRQ_HANDLED;
767 }
768 
769 static void rvu_npa_err_work(struct work_struct *work)
770 {
771 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
772 
773 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
774 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
775 			      "NPA_AF_ERR Error",
776 			      rvu_npa_health_reporter->npa_event_ctx);
777 }
778 
779 static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
780 {
781 	struct rvu_npa_event_ctx *npa_event_context;
782 	struct rvu_devlink *rvu_dl = rvu_irq;
783 	struct rvu *rvu;
784 	int blkaddr;
785 	u64 intr;
786 
787 	rvu = rvu_dl->rvu;
788 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
789 	if (blkaddr < 0)
790 		return IRQ_NONE;
791 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
792 	intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
793 	npa_event_context->npa_af_rvu_err = intr;
794 
795 	/* Clear interrupts */
796 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
797 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
798 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
799 
800 	return IRQ_HANDLED;
801 }
802 
803 static void rvu_npa_ras_work(struct work_struct *work)
804 {
805 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
806 
807 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
808 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
809 			      "HW NPA_AF_RAS Error reported",
810 			      rvu_npa_health_reporter->npa_event_ctx);
811 }
812 
813 static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
814 {
815 	struct rvu_npa_event_ctx *npa_event_context;
816 	struct rvu_devlink *rvu_dl = rvu_irq;
817 	struct rvu *rvu;
818 	int blkaddr;
819 	u64 intr;
820 
821 	rvu = rvu_dl->rvu;
822 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
823 	if (blkaddr < 0)
824 		return IRQ_NONE;
825 
826 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
827 	intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
828 	npa_event_context->npa_af_rvu_ras = intr;
829 
830 	/* Clear interrupts */
831 	rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
832 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
833 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
834 
835 	return IRQ_HANDLED;
836 }
837 
838 static void rvu_npa_unregister_interrupts(struct rvu *rvu)
839 {
840 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
841 	int i, offs, blkaddr;
842 	u64 reg;
843 
844 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
845 	if (blkaddr < 0)
846 		return;
847 
848 	reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
849 	offs = reg & 0x3FF;
850 
851 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
852 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
853 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
854 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
855 
856 	for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
857 		if (rvu->irq_allocated[offs + i]) {
858 			free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
859 			rvu->irq_allocated[offs + i] = false;
860 		}
861 }
862 
863 static int rvu_npa_register_interrupts(struct rvu *rvu)
864 {
865 	int blkaddr, base;
866 	bool rc;
867 
868 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
869 	if (blkaddr < 0)
870 		return blkaddr;
871 
872 	/* Get NPA AF MSIX vectors offset. */
873 	base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
874 	if (!base) {
875 		dev_warn(rvu->dev,
876 			 "Failed to get NPA_AF_INT vector offsets\n");
877 		return 0;
878 	}
879 
880 	/* Register and enable NPA_AF_RVU_INT interrupt */
881 	rc = rvu_common_request_irq(rvu, base +  NPA_AF_INT_VEC_RVU,
882 				    "NPA_AF_RVU_INT",
883 				    rvu_npa_af_rvu_intr_handler);
884 	if (!rc)
885 		goto err;
886 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
887 
888 	/* Register and enable NPA_AF_GEN_INT interrupt */
889 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
890 				    "NPA_AF_RVU_GEN",
891 				    rvu_npa_af_gen_intr_handler);
892 	if (!rc)
893 		goto err;
894 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
895 
896 	/* Register and enable NPA_AF_ERR_INT interrupt */
897 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
898 				    "NPA_AF_ERR_INT",
899 				    rvu_npa_af_err_intr_handler);
900 	if (!rc)
901 		goto err;
902 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
903 
904 	/* Register and enable NPA_AF_RAS interrupt */
905 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
906 				    "NPA_AF_RAS",
907 				    rvu_npa_af_ras_intr_handler);
908 	if (!rc)
909 		goto err;
910 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
911 
912 	return 0;
913 err:
914 	rvu_npa_unregister_interrupts(rvu);
915 	return rc;
916 }
917 
918 static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
919 			       enum npa_af_rvu_health health_reporter)
920 {
921 	struct rvu_npa_event_ctx *npa_event_context;
922 	unsigned int alloc_dis, free_dis;
923 	u64 intr_val;
924 	int err;
925 
926 	npa_event_context = ctx;
927 	switch (health_reporter) {
928 	case NPA_AF_RVU_GEN:
929 		intr_val = npa_event_context->npa_af_rvu_gen;
930 		err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
931 		if (err)
932 			return err;
933 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
934 						npa_event_context->npa_af_rvu_gen);
935 		if (err)
936 			return err;
937 		if (intr_val & BIT_ULL(32)) {
938 			err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
939 			if (err)
940 				return err;
941 		}
942 
943 		free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
944 		if (free_dis & BIT(NPA_INPQ_NIX0_RX)) {
945 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
946 			if (err)
947 				return err;
948 		}
949 		if (free_dis & BIT(NPA_INPQ_NIX0_TX)) {
950 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
951 			if (err)
952 				return err;
953 		}
954 		if (free_dis & BIT(NPA_INPQ_NIX1_RX)) {
955 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
956 			if (err)
957 				return err;
958 		}
959 		if (free_dis & BIT(NPA_INPQ_NIX1_TX)) {
960 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
961 			if (err)
962 				return err;
963 		}
964 		if (free_dis & BIT(NPA_INPQ_SSO)) {
965 			err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
966 			if (err)
967 				return err;
968 		}
969 		if (free_dis & BIT(NPA_INPQ_TIM)) {
970 			err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
971 			if (err)
972 				return err;
973 		}
974 		if (free_dis & BIT(NPA_INPQ_DPI)) {
975 			err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
976 			if (err)
977 				return err;
978 		}
979 		if (free_dis & BIT(NPA_INPQ_AURA_OP)) {
980 			err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
981 			if (err)
982 				return err;
983 		}
984 
985 		alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
986 		if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) {
987 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
988 			if (err)
989 				return err;
990 		}
991 		if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) {
992 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
993 			if (err)
994 				return err;
995 		}
996 		if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) {
997 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
998 			if (err)
999 				return err;
1000 		}
1001 		if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) {
1002 			err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
1003 			if (err)
1004 				return err;
1005 		}
1006 		if (alloc_dis & BIT(NPA_INPQ_SSO)) {
1007 			err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
1008 			if (err)
1009 				return err;
1010 		}
1011 		if (alloc_dis & BIT(NPA_INPQ_TIM)) {
1012 			err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
1013 			if (err)
1014 				return err;
1015 		}
1016 		if (alloc_dis & BIT(NPA_INPQ_DPI)) {
1017 			err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
1018 			if (err)
1019 				return err;
1020 		}
1021 		if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) {
1022 			err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
1023 			if (err)
1024 				return err;
1025 		}
1026 		err = rvu_report_pair_end(fmsg);
1027 		if (err)
1028 			return err;
1029 		break;
1030 	case NPA_AF_RVU_ERR:
1031 		err = rvu_report_pair_start(fmsg, "NPA_AF_ERR");
1032 		if (err)
1033 			return err;
1034 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
1035 						npa_event_context->npa_af_rvu_err);
1036 		if (err)
1037 			return err;
1038 
1039 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) {
1040 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
1041 			if (err)
1042 				return err;
1043 		}
1044 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) {
1045 			err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
1046 			if (err)
1047 				return err;
1048 		}
1049 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) {
1050 			err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
1051 			if (err)
1052 				return err;
1053 		}
1054 		err = rvu_report_pair_end(fmsg);
1055 		if (err)
1056 			return err;
1057 		break;
1058 	case NPA_AF_RVU_RAS:
1059 		err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
1060 		if (err)
1061 			return err;
1062 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
1063 						npa_event_context->npa_af_rvu_ras);
1064 		if (err)
1065 			return err;
1066 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) {
1067 			err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
1068 			if (err)
1069 				return err;
1070 		}
1071 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) {
1072 			err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
1073 			if (err)
1074 				return err;
1075 		}
1076 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) {
1077 			err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
1078 			if (err)
1079 				return err;
1080 		}
1081 		err = rvu_report_pair_end(fmsg);
1082 		if (err)
1083 			return err;
1084 		break;
1085 	case NPA_AF_RVU_INTR:
1086 		err = rvu_report_pair_start(fmsg, "NPA_AF_RVU");
1087 		if (err)
1088 			return err;
1089 		err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
1090 						npa_event_context->npa_af_rvu_int);
1091 		if (err)
1092 			return err;
1093 		if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) {
1094 			err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
1095 			if (err)
1096 				return err;
1097 		}
1098 		return rvu_report_pair_end(fmsg);
1099 	default:
1100 		return -EINVAL;
1101 	}
1102 
1103 	return 0;
1104 }
1105 
1106 static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
1107 				struct devlink_fmsg *fmsg, void *ctx,
1108 				struct netlink_ext_ack *netlink_extack)
1109 {
1110 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1111 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1112 	struct rvu_npa_event_ctx *npa_ctx;
1113 
1114 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1115 
1116 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
1117 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
1118 }
1119 
1120 static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
1121 				   void *ctx, struct netlink_ext_ack *netlink_extack)
1122 {
1123 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1124 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1125 	int blkaddr;
1126 
1127 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1128 	if (blkaddr < 0)
1129 		return blkaddr;
1130 
1131 	if (npa_event_ctx->npa_af_rvu_int)
1132 		rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
1133 
1134 	return 0;
1135 }
1136 
1137 static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
1138 			       struct devlink_fmsg *fmsg, void *ctx,
1139 			       struct netlink_ext_ack *netlink_extack)
1140 {
1141 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1142 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1143 	struct rvu_npa_event_ctx *npa_ctx;
1144 
1145 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1146 
1147 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
1148 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
1149 }
1150 
1151 static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
1152 				  void *ctx, struct netlink_ext_ack *netlink_extack)
1153 {
1154 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1155 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1156 	int blkaddr;
1157 
1158 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1159 	if (blkaddr < 0)
1160 		return blkaddr;
1161 
1162 	if (npa_event_ctx->npa_af_rvu_gen)
1163 		rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
1164 
1165 	return 0;
1166 }
1167 
1168 static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
1169 			       struct devlink_fmsg *fmsg, void *ctx,
1170 			       struct netlink_ext_ack *netlink_extack)
1171 {
1172 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1173 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1174 	struct rvu_npa_event_ctx *npa_ctx;
1175 
1176 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1177 
1178 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
1179 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
1180 }
1181 
1182 static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
1183 				  void *ctx, struct netlink_ext_ack *netlink_extack)
1184 {
1185 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1186 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1187 	int blkaddr;
1188 
1189 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1190 	if (blkaddr < 0)
1191 		return blkaddr;
1192 
1193 	if (npa_event_ctx->npa_af_rvu_err)
1194 		rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
1195 
1196 	return 0;
1197 }
1198 
1199 static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
1200 			       struct devlink_fmsg *fmsg, void *ctx,
1201 			       struct netlink_ext_ack *netlink_extack)
1202 {
1203 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1204 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1205 	struct rvu_npa_event_ctx *npa_ctx;
1206 
1207 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1208 
1209 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
1210 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
1211 }
1212 
1213 static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
1214 				  void *ctx, struct netlink_ext_ack *netlink_extack)
1215 {
1216 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1217 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1218 	int blkaddr;
1219 
1220 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1221 	if (blkaddr < 0)
1222 		return blkaddr;
1223 
1224 	if (npa_event_ctx->npa_af_rvu_ras)
1225 		rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
1226 
1227 	return 0;
1228 }
1229 
1230 RVU_REPORTERS(hw_npa_intr);
1231 RVU_REPORTERS(hw_npa_gen);
1232 RVU_REPORTERS(hw_npa_err);
1233 RVU_REPORTERS(hw_npa_ras);
1234 
1235 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
1236 
1237 static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
1238 {
1239 	struct rvu_npa_health_reporters *rvu_reporters;
1240 	struct rvu_npa_event_ctx *npa_event_context;
1241 	struct rvu *rvu = rvu_dl->rvu;
1242 
1243 	rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
1244 	if (!rvu_reporters)
1245 		return -ENOMEM;
1246 
1247 	rvu_dl->rvu_npa_health_reporter = rvu_reporters;
1248 	npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL);
1249 	if (!npa_event_context)
1250 		return -ENOMEM;
1251 
1252 	rvu_reporters->npa_event_ctx = npa_event_context;
1253 	rvu_reporters->rvu_hw_npa_intr_reporter =
1254 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
1255 	if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
1256 		dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
1257 			 PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
1258 		return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
1259 	}
1260 
1261 	rvu_reporters->rvu_hw_npa_gen_reporter =
1262 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
1263 	if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
1264 		dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
1265 			 PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
1266 		return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
1267 	}
1268 
1269 	rvu_reporters->rvu_hw_npa_err_reporter =
1270 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
1271 	if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
1272 		dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
1273 			 PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
1274 		return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
1275 	}
1276 
1277 	rvu_reporters->rvu_hw_npa_ras_reporter =
1278 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
1279 	if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
1280 		dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
1281 			 PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
1282 		return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
1283 	}
1284 
1285 	rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
1286 	if (!rvu_dl->devlink_wq)
1287 		goto err;
1288 
1289 	INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
1290 	INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
1291 	INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
1292 	INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
1293 
1294 	return 0;
1295 err:
1296 	rvu_npa_health_reporters_destroy(rvu_dl);
1297 	return -ENOMEM;
1298 }
1299 
1300 static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
1301 {
1302 	struct rvu *rvu = rvu_dl->rvu;
1303 	int err;
1304 
1305 	err = rvu_npa_register_reporters(rvu_dl);
1306 	if (err) {
1307 		dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
1308 			 err);
1309 		return err;
1310 	}
1311 	rvu_npa_register_interrupts(rvu);
1312 
1313 	return 0;
1314 }
1315 
1316 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
1317 {
1318 	struct rvu_npa_health_reporters *npa_reporters;
1319 	struct rvu *rvu = rvu_dl->rvu;
1320 
1321 	npa_reporters = rvu_dl->rvu_npa_health_reporter;
1322 
1323 	if (!npa_reporters->rvu_hw_npa_ras_reporter)
1324 		return;
1325 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
1326 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
1327 
1328 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
1329 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
1330 
1331 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
1332 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
1333 
1334 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
1335 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
1336 
1337 	rvu_npa_unregister_interrupts(rvu);
1338 	kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
1339 	kfree(rvu_dl->rvu_npa_health_reporter);
1340 }
1341 
1342 static int rvu_health_reporters_create(struct rvu *rvu)
1343 {
1344 	struct rvu_devlink *rvu_dl;
1345 	int err;
1346 
1347 	rvu_dl = rvu->rvu_dl;
1348 	err = rvu_npa_health_reporters_create(rvu_dl);
1349 	if (err)
1350 		return err;
1351 
1352 	return rvu_nix_health_reporters_create(rvu_dl);
1353 }
1354 
1355 static void rvu_health_reporters_destroy(struct rvu *rvu)
1356 {
1357 	struct rvu_devlink *rvu_dl;
1358 
1359 	if (!rvu->rvu_dl)
1360 		return;
1361 
1362 	rvu_dl = rvu->rvu_dl;
1363 	rvu_npa_health_reporters_destroy(rvu_dl);
1364 	rvu_nix_health_reporters_destroy(rvu_dl);
1365 }
1366 
1367 /* Devlink Params APIs */
1368 static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
1369 				       union devlink_param_value val,
1370 				       struct netlink_ext_ack *extack)
1371 {
1372 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1373 	struct rvu *rvu = rvu_dl->rvu;
1374 	int dwrr_mtu = val.vu32;
1375 	struct nix_txsch *txsch;
1376 	struct nix_hw *nix_hw;
1377 
1378 	if (!rvu->hw->cap.nix_common_dwrr_mtu) {
1379 		NL_SET_ERR_MSG_MOD(extack,
1380 				   "Setting DWRR_MTU is not supported on this silicon");
1381 		return -EOPNOTSUPP;
1382 	}
1383 
1384 	if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
1385 	    (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
1386 		NL_SET_ERR_MSG_MOD(extack,
1387 				   "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
1388 		return -EINVAL;
1389 	}
1390 
1391 	nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
1392 	if (!nix_hw)
1393 		return -ENODEV;
1394 
1395 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1396 	if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
1397 		NL_SET_ERR_MSG_MOD(extack,
1398 				   "Changing DWRR MTU is not supported when there are active NIXLFs");
1399 		NL_SET_ERR_MSG_MOD(extack,
1400 				   "Make sure none of the PF/VF interfaces are initialized and retry");
1401 		return -EOPNOTSUPP;
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
1408 				  struct devlink_param_gset_ctx *ctx)
1409 {
1410 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1411 	struct rvu *rvu = rvu_dl->rvu;
1412 	u64 dwrr_mtu;
1413 
1414 	dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
1415 	rvu_write64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU, dwrr_mtu);
1416 
1417 	return 0;
1418 }
1419 
1420 static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
1421 				  struct devlink_param_gset_ctx *ctx)
1422 {
1423 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1424 	struct rvu *rvu = rvu_dl->rvu;
1425 	u64 dwrr_mtu;
1426 
1427 	if (!rvu->hw->cap.nix_common_dwrr_mtu)
1428 		return -EOPNOTSUPP;
1429 
1430 	dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
1431 	ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
1432 
1433 	return 0;
1434 }
1435 
1436 enum rvu_af_dl_param_id {
1437 	RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
1438 	RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1439 };
1440 
1441 static const struct devlink_param rvu_af_dl_params[] = {
1442 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1443 			     "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
1444 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1445 			     rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
1446 			     rvu_af_dl_dwrr_mtu_validate),
1447 };
1448 
1449 /* Devlink switch mode */
1450 static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1451 {
1452 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1453 	struct rvu *rvu = rvu_dl->rvu;
1454 	struct rvu_switch *rswitch;
1455 
1456 	rswitch = &rvu->rswitch;
1457 	*mode = rswitch->mode;
1458 
1459 	return 0;
1460 }
1461 
1462 static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1463 					struct netlink_ext_ack *extack)
1464 {
1465 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1466 	struct rvu *rvu = rvu_dl->rvu;
1467 	struct rvu_switch *rswitch;
1468 
1469 	rswitch = &rvu->rswitch;
1470 	switch (mode) {
1471 	case DEVLINK_ESWITCH_MODE_LEGACY:
1472 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1473 		if (rswitch->mode == mode)
1474 			return 0;
1475 		rswitch->mode = mode;
1476 		if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1477 			rvu_switch_enable(rvu);
1478 		else
1479 			rvu_switch_disable(rvu);
1480 		break;
1481 	default:
1482 		return -EINVAL;
1483 	}
1484 
1485 	return 0;
1486 }
1487 
1488 static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
1489 				struct netlink_ext_ack *extack)
1490 {
1491 	return devlink_info_driver_name_put(req, DRV_NAME);
1492 }
1493 
1494 static const struct devlink_ops rvu_devlink_ops = {
1495 	.info_get = rvu_devlink_info_get,
1496 	.eswitch_mode_get = rvu_devlink_eswitch_mode_get,
1497 	.eswitch_mode_set = rvu_devlink_eswitch_mode_set,
1498 };
1499 
1500 int rvu_register_dl(struct rvu *rvu)
1501 {
1502 	struct rvu_devlink *rvu_dl;
1503 	struct devlink *dl;
1504 	int err;
1505 
1506 	dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
1507 			   rvu->dev);
1508 	if (!dl) {
1509 		dev_warn(rvu->dev, "devlink_alloc failed\n");
1510 		return -ENOMEM;
1511 	}
1512 
1513 	rvu_dl = devlink_priv(dl);
1514 	rvu_dl->dl = dl;
1515 	rvu_dl->rvu = rvu;
1516 	rvu->rvu_dl = rvu_dl;
1517 
1518 	err = rvu_health_reporters_create(rvu);
1519 	if (err) {
1520 		dev_err(rvu->dev,
1521 			"devlink health reporter creation failed with error %d\n", err);
1522 		goto err_dl_health;
1523 	}
1524 
1525 	err = devlink_params_register(dl, rvu_af_dl_params,
1526 				      ARRAY_SIZE(rvu_af_dl_params));
1527 	if (err) {
1528 		dev_err(rvu->dev,
1529 			"devlink params register failed with error %d", err);
1530 		goto err_dl_health;
1531 	}
1532 
1533 	devlink_register(dl);
1534 	return 0;
1535 
1536 err_dl_health:
1537 	rvu_health_reporters_destroy(rvu);
1538 	devlink_free(dl);
1539 	return err;
1540 }
1541 
1542 void rvu_unregister_dl(struct rvu *rvu)
1543 {
1544 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1545 	struct devlink *dl = rvu_dl->dl;
1546 
1547 	devlink_unregister(dl);
1548 	devlink_params_unregister(dl, rvu_af_dl_params,
1549 				  ARRAY_SIZE(rvu_af_dl_params));
1550 	rvu_health_reporters_destroy(rvu);
1551 	devlink_free(dl);
1552 }
1553