1 /*
2  *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify it
5  *  under the terms and conditions of the GNU General Public License,
6  *  version 2, as published by the Free Software Foundation.
7  *
8  *  This program is distributed in the hope it will be useful, but WITHOUT
9  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  *  more details.
12  *
13  *  The full GNU General Public License is included in this distribution in
14  *  the file called "COPYING".
15  *
16  */
17 
18 #include "t4_regs.h"
19 #include "cxgb4.h"
20 #include "cudbg_if.h"
21 #include "cudbg_lib_common.h"
22 #include "cudbg_lib.h"
23 #include "cudbg_entity.h"
24 
25 static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
26 					 struct cudbg_buffer *dbg_buff)
27 {
28 	cudbg_update_buff(pin_buff, dbg_buff);
29 	cudbg_put_buff(pin_buff, dbg_buff);
30 }
31 
32 static int is_fw_attached(struct cudbg_init *pdbg_init)
33 {
34 	struct adapter *padap = pdbg_init->adap;
35 
36 	if (!(padap->flags & FW_OK) || padap->use_bd)
37 		return 0;
38 
39 	return 1;
40 }
41 
42 /* This function will add additional padding bytes into debug_buffer to make it
43  * 4 byte aligned.
44  */
45 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
46 			      struct cudbg_entity_hdr *entity_hdr)
47 {
48 	u8 zero_buf[4] = {0};
49 	u8 padding, remain;
50 
51 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
52 	padding = 4 - remain;
53 	if (remain) {
54 		memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
55 		       padding);
56 		dbg_buff->offset += padding;
57 		entity_hdr->num_pad = padding;
58 	}
59 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
60 }
61 
62 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
63 {
64 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
65 
66 	return (struct cudbg_entity_hdr *)
67 	       ((char *)outbuf + cudbg_hdr->hdr_len +
68 		(sizeof(struct cudbg_entity_hdr) * (i - 1)));
69 }
70 
71 static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
72 			      void *dest)
73 {
74 	int vaddr, rc;
75 
76 	vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE);
77 	if (vaddr < 0)
78 		return vaddr;
79 
80 	rc = pci_read_vpd(padap->pdev, vaddr, len, dest);
81 	if (rc < 0)
82 		return rc;
83 
84 	return 0;
85 }
86 
87 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
88 			   struct cudbg_buffer *dbg_buff,
89 			   struct cudbg_error *cudbg_err)
90 {
91 	struct adapter *padap = pdbg_init->adap;
92 	struct cudbg_buffer temp_buff = { 0 };
93 	u32 buf_size = 0;
94 	int rc = 0;
95 
96 	if (is_t4(padap->params.chip))
97 		buf_size = T4_REGMAP_SIZE;
98 	else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
99 		buf_size = T5_REGMAP_SIZE;
100 
101 	rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
102 	if (rc)
103 		return rc;
104 	t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
105 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
106 	return rc;
107 }
108 
109 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
110 			    struct cudbg_buffer *dbg_buff,
111 			    struct cudbg_error *cudbg_err)
112 {
113 	struct adapter *padap = pdbg_init->adap;
114 	struct cudbg_buffer temp_buff = { 0 };
115 	struct devlog_params *dparams;
116 	int rc = 0;
117 
118 	rc = t4_init_devlog_params(padap);
119 	if (rc < 0) {
120 		cudbg_err->sys_err = rc;
121 		return rc;
122 	}
123 
124 	dparams = &padap->params.devlog;
125 	rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
126 	if (rc)
127 		return rc;
128 
129 	/* Collect FW devlog */
130 	if (dparams->start != 0) {
131 		spin_lock(&padap->win0_lock);
132 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
133 				  dparams->memtype, dparams->start,
134 				  dparams->size,
135 				  (__be32 *)(char *)temp_buff.data,
136 				  1);
137 		spin_unlock(&padap->win0_lock);
138 		if (rc) {
139 			cudbg_err->sys_err = rc;
140 			cudbg_put_buff(&temp_buff, dbg_buff);
141 			return rc;
142 		}
143 	}
144 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
145 	return rc;
146 }
147 
148 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
149 			 struct cudbg_buffer *dbg_buff,
150 			 struct cudbg_error *cudbg_err)
151 {
152 	struct adapter *padap = pdbg_init->adap;
153 	struct cudbg_buffer temp_buff = { 0 };
154 	int size, rc;
155 	u32 cfg = 0;
156 
157 	if (is_t6(padap->params.chip)) {
158 		size = padap->params.cim_la_size / 10 + 1;
159 		size *= 11 * sizeof(u32);
160 	} else {
161 		size = padap->params.cim_la_size / 8;
162 		size *= 8 * sizeof(u32);
163 	}
164 
165 	size += sizeof(cfg);
166 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
167 	if (rc)
168 		return rc;
169 
170 	rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
171 	if (rc) {
172 		cudbg_err->sys_err = rc;
173 		cudbg_put_buff(&temp_buff, dbg_buff);
174 		return rc;
175 	}
176 
177 	memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
178 	rc = t4_cim_read_la(padap,
179 			    (u32 *)((char *)temp_buff.data + sizeof(cfg)),
180 			    NULL);
181 	if (rc < 0) {
182 		cudbg_err->sys_err = rc;
183 		cudbg_put_buff(&temp_buff, dbg_buff);
184 		return rc;
185 	}
186 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
187 	return rc;
188 }
189 
190 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
191 			    struct cudbg_buffer *dbg_buff,
192 			    struct cudbg_error *cudbg_err)
193 {
194 	struct adapter *padap = pdbg_init->adap;
195 	struct cudbg_buffer temp_buff = { 0 };
196 	int size, rc;
197 
198 	size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
199 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
200 	if (rc)
201 		return rc;
202 
203 	t4_cim_read_ma_la(padap,
204 			  (u32 *)temp_buff.data,
205 			  (u32 *)((char *)temp_buff.data +
206 				  5 * CIM_MALA_SIZE));
207 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
208 	return rc;
209 }
210 
211 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
212 			   struct cudbg_buffer *dbg_buff,
213 			   struct cudbg_error *cudbg_err)
214 {
215 	struct adapter *padap = pdbg_init->adap;
216 	struct cudbg_buffer temp_buff = { 0 };
217 	struct cudbg_cim_qcfg *cim_qcfg_data;
218 	int rc;
219 
220 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
221 			    &temp_buff);
222 	if (rc)
223 		return rc;
224 
225 	cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
226 	cim_qcfg_data->chip = padap->params.chip;
227 	rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
228 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
229 	if (rc) {
230 		cudbg_err->sys_err = rc;
231 		cudbg_put_buff(&temp_buff, dbg_buff);
232 		return rc;
233 	}
234 
235 	rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
236 			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
237 			 cim_qcfg_data->obq_wr);
238 	if (rc) {
239 		cudbg_err->sys_err = rc;
240 		cudbg_put_buff(&temp_buff, dbg_buff);
241 		return rc;
242 	}
243 
244 	t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
245 			 cim_qcfg_data->thres);
246 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
247 	return rc;
248 }
249 
250 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
251 			      struct cudbg_buffer *dbg_buff,
252 			      struct cudbg_error *cudbg_err, int qid)
253 {
254 	struct adapter *padap = pdbg_init->adap;
255 	struct cudbg_buffer temp_buff = { 0 };
256 	int no_of_read_words, rc = 0;
257 	u32 qsize;
258 
259 	/* collect CIM IBQ */
260 	qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
261 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
262 	if (rc)
263 		return rc;
264 
265 	/* t4_read_cim_ibq will return no. of read words or error */
266 	no_of_read_words = t4_read_cim_ibq(padap, qid,
267 					   (u32 *)temp_buff.data, qsize);
268 	/* no_of_read_words is less than or equal to 0 means error */
269 	if (no_of_read_words <= 0) {
270 		if (!no_of_read_words)
271 			rc = CUDBG_SYSTEM_ERROR;
272 		else
273 			rc = no_of_read_words;
274 		cudbg_err->sys_err = rc;
275 		cudbg_put_buff(&temp_buff, dbg_buff);
276 		return rc;
277 	}
278 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
279 	return rc;
280 }
281 
282 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
283 			      struct cudbg_buffer *dbg_buff,
284 			      struct cudbg_error *cudbg_err)
285 {
286 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
287 }
288 
289 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
290 			      struct cudbg_buffer *dbg_buff,
291 			      struct cudbg_error *cudbg_err)
292 {
293 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
294 }
295 
296 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
297 			      struct cudbg_buffer *dbg_buff,
298 			      struct cudbg_error *cudbg_err)
299 {
300 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
301 }
302 
303 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
304 			       struct cudbg_buffer *dbg_buff,
305 			       struct cudbg_error *cudbg_err)
306 {
307 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
308 }
309 
310 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
311 			       struct cudbg_buffer *dbg_buff,
312 			       struct cudbg_error *cudbg_err)
313 {
314 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
315 }
316 
317 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
318 			       struct cudbg_buffer *dbg_buff,
319 			       struct cudbg_error *cudbg_err)
320 {
321 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
322 }
323 
324 u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
325 {
326 	u32 value;
327 
328 	t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
329 		     QUENUMSELECT_V(qid));
330 	value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
331 	value = CIMQSIZE_G(value) * 64; /* size in number of words */
332 	return value * sizeof(u32);
333 }
334 
335 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
336 			      struct cudbg_buffer *dbg_buff,
337 			      struct cudbg_error *cudbg_err, int qid)
338 {
339 	struct adapter *padap = pdbg_init->adap;
340 	struct cudbg_buffer temp_buff = { 0 };
341 	int no_of_read_words, rc = 0;
342 	u32 qsize;
343 
344 	/* collect CIM OBQ */
345 	qsize =  cudbg_cim_obq_size(padap, qid);
346 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
347 	if (rc)
348 		return rc;
349 
350 	/* t4_read_cim_obq will return no. of read words or error */
351 	no_of_read_words = t4_read_cim_obq(padap, qid,
352 					   (u32 *)temp_buff.data, qsize);
353 	/* no_of_read_words is less than or equal to 0 means error */
354 	if (no_of_read_words <= 0) {
355 		if (!no_of_read_words)
356 			rc = CUDBG_SYSTEM_ERROR;
357 		else
358 			rc = no_of_read_words;
359 		cudbg_err->sys_err = rc;
360 		cudbg_put_buff(&temp_buff, dbg_buff);
361 		return rc;
362 	}
363 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
364 	return rc;
365 }
366 
367 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
368 			       struct cudbg_buffer *dbg_buff,
369 			       struct cudbg_error *cudbg_err)
370 {
371 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
372 }
373 
374 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
375 			       struct cudbg_buffer *dbg_buff,
376 			       struct cudbg_error *cudbg_err)
377 {
378 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
379 }
380 
381 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
382 			       struct cudbg_buffer *dbg_buff,
383 			       struct cudbg_error *cudbg_err)
384 {
385 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
386 }
387 
388 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
389 			       struct cudbg_buffer *dbg_buff,
390 			       struct cudbg_error *cudbg_err)
391 {
392 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
393 }
394 
395 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
396 			      struct cudbg_buffer *dbg_buff,
397 			      struct cudbg_error *cudbg_err)
398 {
399 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
400 }
401 
402 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
403 			       struct cudbg_buffer *dbg_buff,
404 			       struct cudbg_error *cudbg_err)
405 {
406 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
407 }
408 
409 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
410 				struct cudbg_buffer *dbg_buff,
411 				struct cudbg_error *cudbg_err)
412 {
413 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
414 }
415 
416 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
417 				struct cudbg_buffer *dbg_buff,
418 				struct cudbg_error *cudbg_err)
419 {
420 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
421 }
422 
423 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
424 			     struct cudbg_buffer *dbg_buff, u8 mem_type,
425 			     unsigned long tot_len,
426 			     struct cudbg_error *cudbg_err)
427 {
428 	unsigned long bytes, bytes_left, bytes_read = 0;
429 	struct adapter *padap = pdbg_init->adap;
430 	struct cudbg_buffer temp_buff = { 0 };
431 	int rc = 0;
432 
433 	bytes_left = tot_len;
434 	while (bytes_left > 0) {
435 		bytes = min_t(unsigned long, bytes_left,
436 			      (unsigned long)CUDBG_CHUNK_SIZE);
437 		rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
438 		if (rc)
439 			return rc;
440 		spin_lock(&padap->win0_lock);
441 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
442 				  bytes_read, bytes,
443 				  (__be32 *)temp_buff.data,
444 				  1);
445 		spin_unlock(&padap->win0_lock);
446 		if (rc) {
447 			cudbg_err->sys_err = rc;
448 			cudbg_put_buff(&temp_buff, dbg_buff);
449 			return rc;
450 		}
451 		bytes_left -= bytes;
452 		bytes_read += bytes;
453 		cudbg_write_and_release_buff(&temp_buff, dbg_buff);
454 	}
455 	return rc;
456 }
457 
458 static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
459 				   struct card_mem *mem_info)
460 {
461 	struct adapter *padap = pdbg_init->adap;
462 	u32 value;
463 
464 	value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
465 	value = EDRAM0_SIZE_G(value);
466 	mem_info->size_edc0 = (u16)value;
467 
468 	value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
469 	value = EDRAM1_SIZE_G(value);
470 	mem_info->size_edc1 = (u16)value;
471 
472 	value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
473 	if (value & EDRAM0_ENABLE_F)
474 		mem_info->mem_flag |= (1 << EDC0_FLAG);
475 	if (value & EDRAM1_ENABLE_F)
476 		mem_info->mem_flag |= (1 << EDC1_FLAG);
477 }
478 
479 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
480 			     struct cudbg_error *cudbg_err)
481 {
482 	struct adapter *padap = pdbg_init->adap;
483 	int rc;
484 
485 	if (is_fw_attached(pdbg_init)) {
486 		/* Flush uP dcache before reading edcX/mcX  */
487 		rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
488 		if (rc)
489 			cudbg_err->sys_warn = rc;
490 	}
491 }
492 
493 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
494 				    struct cudbg_buffer *dbg_buff,
495 				    struct cudbg_error *cudbg_err,
496 				    u8 mem_type)
497 {
498 	struct card_mem mem_info = {0};
499 	unsigned long flag, size;
500 	int rc;
501 
502 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
503 	cudbg_collect_mem_info(pdbg_init, &mem_info);
504 	switch (mem_type) {
505 	case MEM_EDC0:
506 		flag = (1 << EDC0_FLAG);
507 		size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
508 		break;
509 	case MEM_EDC1:
510 		flag = (1 << EDC1_FLAG);
511 		size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
512 		break;
513 	default:
514 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
515 		goto err;
516 	}
517 
518 	if (mem_info.mem_flag & flag) {
519 		rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
520 				       size, cudbg_err);
521 		if (rc)
522 			goto err;
523 	} else {
524 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
525 		goto err;
526 	}
527 err:
528 	return rc;
529 }
530 
531 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
532 			       struct cudbg_buffer *dbg_buff,
533 			       struct cudbg_error *cudbg_err)
534 {
535 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
536 					MEM_EDC0);
537 }
538 
539 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
540 			       struct cudbg_buffer *dbg_buff,
541 			       struct cudbg_error *cudbg_err)
542 {
543 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
544 					MEM_EDC1);
545 }
546 
547 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
548 		      struct cudbg_buffer *dbg_buff,
549 		      struct cudbg_error *cudbg_err)
550 {
551 	struct adapter *padap = pdbg_init->adap;
552 	struct cudbg_buffer temp_buff = { 0 };
553 	int rc;
554 
555 	rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
556 	if (rc)
557 		return rc;
558 
559 	rc = t4_read_rss(padap, (u16 *)temp_buff.data);
560 	if (rc) {
561 		cudbg_err->sys_err = rc;
562 		cudbg_put_buff(&temp_buff, dbg_buff);
563 		return rc;
564 	}
565 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
566 	return rc;
567 }
568 
569 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
570 				struct cudbg_buffer *dbg_buff,
571 				struct cudbg_error *cudbg_err)
572 {
573 	struct adapter *padap = pdbg_init->adap;
574 	struct cudbg_buffer temp_buff = { 0 };
575 	struct cudbg_rss_vf_conf *vfconf;
576 	int vf, rc, vf_count;
577 
578 	vf_count = padap->params.arch.vfcount;
579 	rc = cudbg_get_buff(dbg_buff,
580 			    vf_count * sizeof(struct cudbg_rss_vf_conf),
581 			    &temp_buff);
582 	if (rc)
583 		return rc;
584 
585 	vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
586 	for (vf = 0; vf < vf_count; vf++)
587 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
588 				      &vfconf[vf].rss_vf_vfh, true);
589 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
590 	return rc;
591 }
592 
593 int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
594 			   struct cudbg_buffer *dbg_buff,
595 			   struct cudbg_error *cudbg_err)
596 {
597 	struct adapter *padap = pdbg_init->adap;
598 	struct cudbg_buffer temp_buff = { 0 };
599 	int rc;
600 
601 	rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
602 	if (rc)
603 		return rc;
604 
605 	t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
606 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
607 	return rc;
608 }
609 
610 int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
611 			   struct cudbg_buffer *dbg_buff,
612 			   struct cudbg_error *cudbg_err)
613 {
614 	struct adapter *padap = pdbg_init->adap;
615 	struct cudbg_buffer temp_buff = { 0 };
616 	struct cudbg_pm_stats *pm_stats_buff;
617 	int rc;
618 
619 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
620 			    &temp_buff);
621 	if (rc)
622 		return rc;
623 
624 	pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
625 	t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
626 	t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
627 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
628 	return rc;
629 }
630 
631 int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
632 			   struct cudbg_buffer *dbg_buff,
633 			   struct cudbg_error *cudbg_err)
634 {
635 	struct adapter *padap = pdbg_init->adap;
636 	struct cudbg_buffer temp_buff = { 0 };
637 	struct cudbg_hw_sched *hw_sched_buff;
638 	int i, rc = 0;
639 
640 	if (!padap->params.vpd.cclk)
641 		return CUDBG_STATUS_CCLK_NOT_DEFINED;
642 
643 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
644 			    &temp_buff);
645 	hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
646 	hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
647 	hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
648 	t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
649 	for (i = 0; i < NTX_SCHED; ++i)
650 		t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
651 				&hw_sched_buff->ipg[i], true);
652 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
653 	return rc;
654 }
655 
656 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
657 			      struct cudbg_buffer *dbg_buff,
658 			      struct cudbg_error *cudbg_err)
659 {
660 	struct adapter *padap = pdbg_init->adap;
661 	struct cudbg_buffer temp_buff = { 0 };
662 	struct ireg_buf *ch_tp_pio;
663 	int i, rc, n = 0;
664 	u32 size;
665 
666 	if (is_t5(padap->params.chip))
667 		n = sizeof(t5_tp_pio_array) +
668 		    sizeof(t5_tp_tm_pio_array) +
669 		    sizeof(t5_tp_mib_index_array);
670 	else
671 		n = sizeof(t6_tp_pio_array) +
672 		    sizeof(t6_tp_tm_pio_array) +
673 		    sizeof(t6_tp_mib_index_array);
674 
675 	n = n / (IREG_NUM_ELEM * sizeof(u32));
676 	size = sizeof(struct ireg_buf) * n;
677 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
678 	if (rc)
679 		return rc;
680 
681 	ch_tp_pio = (struct ireg_buf *)temp_buff.data;
682 
683 	/* TP_PIO */
684 	if (is_t5(padap->params.chip))
685 		n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
686 	else if (is_t6(padap->params.chip))
687 		n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
688 
689 	for (i = 0; i < n; i++) {
690 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
691 		u32 *buff = ch_tp_pio->outbuf;
692 
693 		if (is_t5(padap->params.chip)) {
694 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
695 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
696 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
697 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
698 		} else if (is_t6(padap->params.chip)) {
699 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
700 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
701 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
702 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
703 		}
704 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
705 			       tp_pio->ireg_local_offset, true);
706 		ch_tp_pio++;
707 	}
708 
709 	/* TP_TM_PIO */
710 	if (is_t5(padap->params.chip))
711 		n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
712 	else if (is_t6(padap->params.chip))
713 		n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
714 
715 	for (i = 0; i < n; i++) {
716 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
717 		u32 *buff = ch_tp_pio->outbuf;
718 
719 		if (is_t5(padap->params.chip)) {
720 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
721 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
722 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
723 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
724 		} else if (is_t6(padap->params.chip)) {
725 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
726 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
727 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
728 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
729 		}
730 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
731 				  tp_pio->ireg_local_offset, true);
732 		ch_tp_pio++;
733 	}
734 
735 	/* TP_MIB_INDEX */
736 	if (is_t5(padap->params.chip))
737 		n = sizeof(t5_tp_mib_index_array) /
738 		    (IREG_NUM_ELEM * sizeof(u32));
739 	else if (is_t6(padap->params.chip))
740 		n = sizeof(t6_tp_mib_index_array) /
741 		    (IREG_NUM_ELEM * sizeof(u32));
742 
743 	for (i = 0; i < n ; i++) {
744 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
745 		u32 *buff = ch_tp_pio->outbuf;
746 
747 		if (is_t5(padap->params.chip)) {
748 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
749 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
750 			tp_pio->ireg_local_offset =
751 				t5_tp_mib_index_array[i][2];
752 			tp_pio->ireg_offset_range =
753 				t5_tp_mib_index_array[i][3];
754 		} else if (is_t6(padap->params.chip)) {
755 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
756 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
757 			tp_pio->ireg_local_offset =
758 				t6_tp_mib_index_array[i][2];
759 			tp_pio->ireg_offset_range =
760 				t6_tp_mib_index_array[i][3];
761 		}
762 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
763 			       tp_pio->ireg_local_offset, true);
764 		ch_tp_pio++;
765 	}
766 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
767 	return rc;
768 }
769 
770 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
771 			       struct cudbg_buffer *dbg_buff,
772 			       struct cudbg_error *cudbg_err)
773 {
774 	struct adapter *padap = pdbg_init->adap;
775 	struct cudbg_buffer temp_buff = { 0 };
776 	struct ireg_buf *ch_sge_dbg;
777 	int i, rc;
778 
779 	rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
780 	if (rc)
781 		return rc;
782 
783 	ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
784 	for (i = 0; i < 2; i++) {
785 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
786 		u32 *buff = ch_sge_dbg->outbuf;
787 
788 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
789 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
790 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
791 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
792 		t4_read_indirect(padap,
793 				 sge_pio->ireg_addr,
794 				 sge_pio->ireg_data,
795 				 buff,
796 				 sge_pio->ireg_offset_range,
797 				 sge_pio->ireg_local_offset);
798 		ch_sge_dbg++;
799 	}
800 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
801 	return rc;
802 }
803 
804 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
805 			   struct cudbg_buffer *dbg_buff,
806 			   struct cudbg_error *cudbg_err)
807 {
808 	struct adapter *padap = pdbg_init->adap;
809 	struct cudbg_buffer temp_buff = { 0 };
810 	struct cudbg_ulprx_la *ulprx_la_buff;
811 	int rc;
812 
813 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
814 			    &temp_buff);
815 	if (rc)
816 		return rc;
817 
818 	ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
819 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
820 	ulprx_la_buff->size = ULPRX_LA_SIZE;
821 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
822 	return rc;
823 }
824 
825 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
826 			struct cudbg_buffer *dbg_buff,
827 			struct cudbg_error *cudbg_err)
828 {
829 	struct adapter *padap = pdbg_init->adap;
830 	struct cudbg_buffer temp_buff = { 0 };
831 	struct cudbg_tp_la *tp_la_buff;
832 	int size, rc;
833 
834 	size = sizeof(struct cudbg_tp_la) + TPLA_SIZE *  sizeof(u64);
835 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
836 	if (rc)
837 		return rc;
838 
839 	tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
840 	tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
841 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
842 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
843 	return rc;
844 }
845 
846 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
847 			     struct cudbg_buffer *dbg_buff,
848 			     struct cudbg_error *cudbg_err)
849 {
850 	struct cudbg_cim_pif_la *cim_pif_la_buff;
851 	struct adapter *padap = pdbg_init->adap;
852 	struct cudbg_buffer temp_buff = { 0 };
853 	int size, rc;
854 
855 	size = sizeof(struct cudbg_cim_pif_la) +
856 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
857 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
858 	if (rc)
859 		return rc;
860 
861 	cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
862 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
863 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
864 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
865 			   NULL, NULL);
866 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
867 	return rc;
868 }
869 
870 int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
871 			   struct cudbg_buffer *dbg_buff,
872 			   struct cudbg_error *cudbg_err)
873 {
874 	struct adapter *padap = pdbg_init->adap;
875 	struct cudbg_buffer temp_buff = { 0 };
876 	struct cudbg_clk_info *clk_info_buff;
877 	u64 tp_tick_us;
878 	int rc;
879 
880 	if (!padap->params.vpd.cclk)
881 		return CUDBG_STATUS_CCLK_NOT_DEFINED;
882 
883 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
884 			    &temp_buff);
885 	if (rc)
886 		return rc;
887 
888 	clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
889 	clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */
890 	clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
891 	clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
892 	clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
893 	tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
894 
895 	clk_info_buff->dack_timer =
896 		(clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
897 		t4_read_reg(padap, TP_DACK_TIMER_A);
898 	clk_info_buff->retransmit_min =
899 		tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
900 	clk_info_buff->retransmit_max =
901 		tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
902 	clk_info_buff->persist_timer_min =
903 		tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
904 	clk_info_buff->persist_timer_max =
905 		tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
906 	clk_info_buff->keepalive_idle_timer =
907 		tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
908 	clk_info_buff->keepalive_interval =
909 		tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
910 	clk_info_buff->initial_srtt =
911 		tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
912 	clk_info_buff->finwait2_timer =
913 		tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
914 
915 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
916 	return rc;
917 }
918 
919 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
920 				struct cudbg_buffer *dbg_buff,
921 				struct cudbg_error *cudbg_err)
922 {
923 	struct adapter *padap = pdbg_init->adap;
924 	struct cudbg_buffer temp_buff = { 0 };
925 	struct ireg_buf *ch_pcie;
926 	int i, rc, n;
927 	u32 size;
928 
929 	n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
930 	size = sizeof(struct ireg_buf) * n * 2;
931 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
932 	if (rc)
933 		return rc;
934 
935 	ch_pcie = (struct ireg_buf *)temp_buff.data;
936 	/* PCIE_PDBG */
937 	for (i = 0; i < n; i++) {
938 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
939 		u32 *buff = ch_pcie->outbuf;
940 
941 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
942 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
943 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
944 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
945 		t4_read_indirect(padap,
946 				 pcie_pio->ireg_addr,
947 				 pcie_pio->ireg_data,
948 				 buff,
949 				 pcie_pio->ireg_offset_range,
950 				 pcie_pio->ireg_local_offset);
951 		ch_pcie++;
952 	}
953 
954 	/* PCIE_CDBG */
955 	n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
956 	for (i = 0; i < n; i++) {
957 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
958 		u32 *buff = ch_pcie->outbuf;
959 
960 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
961 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
962 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
963 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
964 		t4_read_indirect(padap,
965 				 pcie_pio->ireg_addr,
966 				 pcie_pio->ireg_data,
967 				 buff,
968 				 pcie_pio->ireg_offset_range,
969 				 pcie_pio->ireg_local_offset);
970 		ch_pcie++;
971 	}
972 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
973 	return rc;
974 }
975 
976 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
977 			      struct cudbg_buffer *dbg_buff,
978 			      struct cudbg_error *cudbg_err)
979 {
980 	struct adapter *padap = pdbg_init->adap;
981 	struct cudbg_buffer temp_buff = { 0 };
982 	struct ireg_buf *ch_pm;
983 	int i, rc, n;
984 	u32 size;
985 
986 	n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
987 	size = sizeof(struct ireg_buf) * n * 2;
988 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
989 	if (rc)
990 		return rc;
991 
992 	ch_pm = (struct ireg_buf *)temp_buff.data;
993 	/* PM_RX */
994 	for (i = 0; i < n; i++) {
995 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
996 		u32 *buff = ch_pm->outbuf;
997 
998 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
999 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
1000 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
1001 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
1002 		t4_read_indirect(padap,
1003 				 pm_pio->ireg_addr,
1004 				 pm_pio->ireg_data,
1005 				 buff,
1006 				 pm_pio->ireg_offset_range,
1007 				 pm_pio->ireg_local_offset);
1008 		ch_pm++;
1009 	}
1010 
1011 	/* PM_TX */
1012 	n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
1013 	for (i = 0; i < n; i++) {
1014 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
1015 		u32 *buff = ch_pm->outbuf;
1016 
1017 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
1018 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
1019 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
1020 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
1021 		t4_read_indirect(padap,
1022 				 pm_pio->ireg_addr,
1023 				 pm_pio->ireg_data,
1024 				 buff,
1025 				 pm_pio->ireg_offset_range,
1026 				 pm_pio->ireg_local_offset);
1027 		ch_pm++;
1028 	}
1029 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1030 	return rc;
1031 }
1032 
1033 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
1034 		      struct cudbg_buffer *dbg_buff,
1035 		      struct cudbg_error *cudbg_err)
1036 {
1037 	struct adapter *padap = pdbg_init->adap;
1038 	struct cudbg_tid_info_region_rev1 *tid1;
1039 	struct cudbg_buffer temp_buff = { 0 };
1040 	struct cudbg_tid_info_region *tid;
1041 	u32 para[2], val[2];
1042 	int rc;
1043 
1044 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
1045 			    &temp_buff);
1046 	if (rc)
1047 		return rc;
1048 
1049 	tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
1050 	tid = &tid1->tid;
1051 	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1052 	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
1053 	tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
1054 			     sizeof(struct cudbg_ver_hdr);
1055 
1056 #define FW_PARAM_PFVF_A(param) \
1057 	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
1058 	 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
1059 	 FW_PARAMS_PARAM_Y_V(0) | \
1060 	 FW_PARAMS_PARAM_Z_V(0))
1061 
1062 	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
1063 	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
1064 	rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
1065 	if (rc <  0) {
1066 		cudbg_err->sys_err = rc;
1067 		cudbg_put_buff(&temp_buff, dbg_buff);
1068 		return rc;
1069 	}
1070 	tid->uotid_base = val[0];
1071 	tid->nuotids = val[1] - val[0] + 1;
1072 
1073 	if (is_t5(padap->params.chip)) {
1074 		tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
1075 	} else if (is_t6(padap->params.chip)) {
1076 		tid1->tid_start =
1077 			t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
1078 		tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
1079 
1080 		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
1081 		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
1082 		rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
1083 				     para, val);
1084 		if (rc < 0) {
1085 			cudbg_err->sys_err = rc;
1086 			cudbg_put_buff(&temp_buff, dbg_buff);
1087 			return rc;
1088 		}
1089 		tid->hpftid_base = val[0];
1090 		tid->nhpftids = val[1] - val[0] + 1;
1091 	}
1092 
1093 	tid->ntids = padap->tids.ntids;
1094 	tid->nstids = padap->tids.nstids;
1095 	tid->stid_base = padap->tids.stid_base;
1096 	tid->hash_base = padap->tids.hash_base;
1097 
1098 	tid->natids = padap->tids.natids;
1099 	tid->nftids = padap->tids.nftids;
1100 	tid->ftid_base = padap->tids.ftid_base;
1101 	tid->aftid_base = padap->tids.aftid_base;
1102 	tid->aftid_end = padap->tids.aftid_end;
1103 
1104 	tid->sftid_base = padap->tids.sftid_base;
1105 	tid->nsftids = padap->tids.nsftids;
1106 
1107 	tid->flags = padap->flags;
1108 	tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
1109 	tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
1110 	tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
1111 
1112 #undef FW_PARAM_PFVF_A
1113 
1114 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1115 	return rc;
1116 }
1117 
1118 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
1119 {
1120 	*mask = x | y;
1121 	y = (__force u64)cpu_to_be64(y);
1122 	memcpy(addr, (char *)&y + 2, ETH_ALEN);
1123 }
1124 
1125 static void cudbg_mps_rpl_backdoor(struct adapter *padap,
1126 				   struct fw_ldst_mps_rplc *mps_rplc)
1127 {
1128 	if (is_t5(padap->params.chip)) {
1129 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1130 							  MPS_VF_RPLCT_MAP3_A));
1131 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1132 							  MPS_VF_RPLCT_MAP2_A));
1133 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1134 							  MPS_VF_RPLCT_MAP1_A));
1135 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1136 							  MPS_VF_RPLCT_MAP0_A));
1137 	} else {
1138 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1139 							  MPS_VF_RPLCT_MAP7_A));
1140 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1141 							  MPS_VF_RPLCT_MAP6_A));
1142 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1143 							  MPS_VF_RPLCT_MAP5_A));
1144 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1145 							  MPS_VF_RPLCT_MAP4_A));
1146 	}
1147 	mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
1148 	mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
1149 	mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
1150 	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
1151 }
1152 
1153 static int cudbg_collect_tcam_index(struct adapter *padap,
1154 				    struct cudbg_mps_tcam *tcam, u32 idx)
1155 {
1156 	u64 tcamy, tcamx, val;
1157 	u32 ctl, data2;
1158 	int rc = 0;
1159 
1160 	if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
1161 		/* CtlReqID   - 1: use Host Driver Requester ID
1162 		 * CtlCmdType - 0: Read, 1: Write
1163 		 * CtlTcamSel - 0: TCAM0, 1: TCAM1
1164 		 * CtlXYBitSel- 0: Y bit, 1: X bit
1165 		 */
1166 
1167 		/* Read tcamy */
1168 		ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
1169 		if (idx < 256)
1170 			ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
1171 		else
1172 			ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
1173 
1174 		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1175 		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1176 		tcamy = DMACH_G(val) << 32;
1177 		tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1178 		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1179 		tcam->lookup_type = DATALKPTYPE_G(data2);
1180 
1181 		/* 0 - Outer header, 1 - Inner header
1182 		 * [71:48] bit locations are overloaded for
1183 		 * outer vs. inner lookup types.
1184 		 */
1185 		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1186 			/* Inner header VNI */
1187 			tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1188 			tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
1189 			tcam->dip_hit = data2 & DATADIPHIT_F;
1190 		} else {
1191 			tcam->vlan_vld = data2 & DATAVIDH2_F;
1192 			tcam->ivlan = VIDL_G(val);
1193 		}
1194 
1195 		tcam->port_num = DATAPORTNUM_G(data2);
1196 
1197 		/* Read tcamx. Change the control param */
1198 		ctl |= CTLXYBITSEL_V(1);
1199 		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1200 		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1201 		tcamx = DMACH_G(val) << 32;
1202 		tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1203 		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1204 		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1205 			/* Inner header VNI mask */
1206 			tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1207 			tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
1208 		}
1209 	} else {
1210 		tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
1211 		tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
1212 	}
1213 
1214 	/* If no entry, return */
1215 	if (tcamx & tcamy)
1216 		return rc;
1217 
1218 	tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
1219 	tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
1220 
1221 	if (is_t5(padap->params.chip))
1222 		tcam->repli = (tcam->cls_lo & REPLICATE_F);
1223 	else if (is_t6(padap->params.chip))
1224 		tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
1225 
1226 	if (tcam->repli) {
1227 		struct fw_ldst_cmd ldst_cmd;
1228 		struct fw_ldst_mps_rplc mps_rplc;
1229 
1230 		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
1231 		ldst_cmd.op_to_addrspace =
1232 			htonl(FW_CMD_OP_V(FW_LDST_CMD) |
1233 			      FW_CMD_REQUEST_F | FW_CMD_READ_F |
1234 			      FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
1235 		ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
1236 		ldst_cmd.u.mps.rplc.fid_idx =
1237 			htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
1238 			      FW_LDST_CMD_IDX_V(idx));
1239 
1240 		rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
1241 				&ldst_cmd);
1242 		if (rc)
1243 			cudbg_mps_rpl_backdoor(padap, &mps_rplc);
1244 		else
1245 			mps_rplc = ldst_cmd.u.mps.rplc;
1246 
1247 		tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
1248 		tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
1249 		tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
1250 		tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
1251 		if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
1252 			tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
1253 			tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
1254 			tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
1255 			tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
1256 		}
1257 	}
1258 	cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
1259 	tcam->idx = idx;
1260 	tcam->rplc_size = padap->params.arch.mps_rplc_size;
1261 	return rc;
1262 }
1263 
1264 int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
1265 			   struct cudbg_buffer *dbg_buff,
1266 			   struct cudbg_error *cudbg_err)
1267 {
1268 	struct adapter *padap = pdbg_init->adap;
1269 	struct cudbg_buffer temp_buff = { 0 };
1270 	u32 size = 0, i, n, total_size = 0;
1271 	struct cudbg_mps_tcam *tcam;
1272 	int rc;
1273 
1274 	n = padap->params.arch.mps_tcam_size;
1275 	size = sizeof(struct cudbg_mps_tcam) * n;
1276 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1277 	if (rc)
1278 		return rc;
1279 
1280 	tcam = (struct cudbg_mps_tcam *)temp_buff.data;
1281 	for (i = 0; i < n; i++) {
1282 		rc = cudbg_collect_tcam_index(padap, tcam, i);
1283 		if (rc) {
1284 			cudbg_err->sys_err = rc;
1285 			cudbg_put_buff(&temp_buff, dbg_buff);
1286 			return rc;
1287 		}
1288 		total_size += sizeof(struct cudbg_mps_tcam);
1289 		tcam++;
1290 	}
1291 
1292 	if (!total_size) {
1293 		rc = CUDBG_SYSTEM_ERROR;
1294 		cudbg_err->sys_err = rc;
1295 		cudbg_put_buff(&temp_buff, dbg_buff);
1296 		return rc;
1297 	}
1298 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1299 	return rc;
1300 }
1301 
1302 int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
1303 			   struct cudbg_buffer *dbg_buff,
1304 			   struct cudbg_error *cudbg_err)
1305 {
1306 	struct adapter *padap = pdbg_init->adap;
1307 	struct cudbg_buffer temp_buff = { 0 };
1308 	char vpd_str[CUDBG_VPD_VER_LEN + 1];
1309 	u32 scfg_vers, vpd_vers, fw_vers;
1310 	struct cudbg_vpd_data *vpd_data;
1311 	struct vpd_params vpd = { 0 };
1312 	int rc, ret;
1313 
1314 	rc = t4_get_raw_vpd_params(padap, &vpd);
1315 	if (rc)
1316 		return rc;
1317 
1318 	rc = t4_get_fw_version(padap, &fw_vers);
1319 	if (rc)
1320 		return rc;
1321 
1322 	/* Serial Configuration Version is located beyond the PF's vpd size.
1323 	 * Temporarily give access to entire EEPROM to get it.
1324 	 */
1325 	rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE);
1326 	if (rc < 0)
1327 		return rc;
1328 
1329 	ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN,
1330 				 &scfg_vers);
1331 
1332 	/* Restore back to original PF's vpd size */
1333 	rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE);
1334 	if (rc < 0)
1335 		return rc;
1336 
1337 	if (ret)
1338 		return ret;
1339 
1340 	rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN,
1341 				vpd_str);
1342 	if (rc)
1343 		return rc;
1344 
1345 	vpd_str[CUDBG_VPD_VER_LEN] = '\0';
1346 	rc = kstrtouint(vpd_str, 0, &vpd_vers);
1347 	if (rc)
1348 		return rc;
1349 
1350 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
1351 			    &temp_buff);
1352 	if (rc)
1353 		return rc;
1354 
1355 	vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
1356 	memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1);
1357 	memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1);
1358 	memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1);
1359 	memcpy(vpd_data->mn, vpd.id, ID_LEN + 1);
1360 	vpd_data->scfg_vers = scfg_vers;
1361 	vpd_data->vpd_vers = vpd_vers;
1362 	vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers);
1363 	vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
1364 	vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
1365 	vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
1366 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1367 	return rc;
1368 }
1369 
1370 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
1371 			  struct cudbg_tid_data *tid_data)
1372 {
1373 	struct adapter *padap = pdbg_init->adap;
1374 	int i, cmd_retry = 8;
1375 	u32 val;
1376 
1377 	/* Fill REQ_DATA regs with 0's */
1378 	for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++)
1379 		t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0);
1380 
1381 	/* Write DBIG command */
1382 	val = DBGICMD_V(4) | DBGITID_V(tid);
1383 	t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val);
1384 	tid_data->dbig_cmd = val;
1385 
1386 	val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */
1387 	t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val);
1388 	tid_data->dbig_conf = val;
1389 
1390 	/* Poll the DBGICMDBUSY bit */
1391 	val = 1;
1392 	while (val) {
1393 		val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A);
1394 		val = val & DBGICMDBUSY_F;
1395 		cmd_retry--;
1396 		if (!cmd_retry)
1397 			return CUDBG_SYSTEM_ERROR;
1398 	}
1399 
1400 	/* Check RESP status */
1401 	val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A);
1402 	tid_data->dbig_rsp_stat = val;
1403 	if (!(val & 1))
1404 		return CUDBG_SYSTEM_ERROR;
1405 
1406 	/* Read RESP data */
1407 	for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++)
1408 		tid_data->data[i] = t4_read_reg(padap,
1409 						LE_DB_DBGI_RSP_DATA_A +
1410 						(i << 2));
1411 	tid_data->tid = tid;
1412 	return 0;
1413 }
1414 
1415 static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region)
1416 {
1417 	int type = LE_ET_UNKNOWN;
1418 
1419 	if (tid < tcam_region.server_start)
1420 		type = LE_ET_TCAM_CON;
1421 	else if (tid < tcam_region.filter_start)
1422 		type = LE_ET_TCAM_SERVER;
1423 	else if (tid < tcam_region.clip_start)
1424 		type = LE_ET_TCAM_FILTER;
1425 	else if (tid < tcam_region.routing_start)
1426 		type = LE_ET_TCAM_CLIP;
1427 	else if (tid < tcam_region.tid_hash_base)
1428 		type = LE_ET_TCAM_ROUTING;
1429 	else if (tid < tcam_region.max_tid)
1430 		type = LE_ET_HASH_CON;
1431 	else
1432 		type = LE_ET_INVALID_TID;
1433 
1434 	return type;
1435 }
1436 
1437 static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data,
1438 			       struct cudbg_tcam tcam_region)
1439 {
1440 	int ipv6 = 0;
1441 	int le_type;
1442 
1443 	le_type = cudbg_get_le_type(tid_data->tid, tcam_region);
1444 	if (tid_data->tid & 1)
1445 		return 0;
1446 
1447 	if (le_type == LE_ET_HASH_CON) {
1448 		ipv6 = tid_data->data[16] & 0x8000;
1449 	} else if (le_type == LE_ET_TCAM_CON) {
1450 		ipv6 = tid_data->data[16] & 0x8000;
1451 		if (ipv6)
1452 			ipv6 = tid_data->data[9] == 0x00C00000;
1453 	} else {
1454 		ipv6 = 0;
1455 	}
1456 	return ipv6;
1457 }
1458 
1459 void cudbg_fill_le_tcam_info(struct adapter *padap,
1460 			     struct cudbg_tcam *tcam_region)
1461 {
1462 	u32 value;
1463 
1464 	/* Get the LE regions */
1465 	value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A); /* hash base index */
1466 	tcam_region->tid_hash_base = value;
1467 
1468 	/* Get routing table index */
1469 	value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A);
1470 	tcam_region->routing_start = value;
1471 
1472 	/*Get clip table index */
1473 	value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
1474 	tcam_region->clip_start = value;
1475 
1476 	/* Get filter table index */
1477 	value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A);
1478 	tcam_region->filter_start = value;
1479 
1480 	/* Get server table index */
1481 	value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A);
1482 	tcam_region->server_start = value;
1483 
1484 	/* Check whether hash is enabled and calculate the max tids */
1485 	value = t4_read_reg(padap, LE_DB_CONFIG_A);
1486 	if ((value >> HASHEN_S) & 1) {
1487 		value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A);
1488 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
1489 			tcam_region->max_tid = (value & 0xFFFFF) +
1490 					       tcam_region->tid_hash_base;
1491 		} else {
1492 			value = HASHTIDSIZE_G(value);
1493 			value = 1 << value;
1494 			tcam_region->max_tid = value +
1495 					       tcam_region->tid_hash_base;
1496 		}
1497 	} else { /* hash not enabled */
1498 		tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
1499 	}
1500 }
1501 
1502 int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
1503 			  struct cudbg_buffer *dbg_buff,
1504 			  struct cudbg_error *cudbg_err)
1505 {
1506 	struct adapter *padap = pdbg_init->adap;
1507 	struct cudbg_buffer temp_buff = { 0 };
1508 	struct cudbg_tcam tcam_region = { 0 };
1509 	struct cudbg_tid_data *tid_data;
1510 	u32 bytes = 0;
1511 	int rc, size;
1512 	u32 i;
1513 
1514 	cudbg_fill_le_tcam_info(padap, &tcam_region);
1515 
1516 	size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
1517 	size += sizeof(struct cudbg_tcam);
1518 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1519 	if (rc)
1520 		return rc;
1521 
1522 	memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
1523 	bytes = sizeof(struct cudbg_tcam);
1524 	tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes);
1525 	/* read all tid */
1526 	for (i = 0; i < tcam_region.max_tid; ) {
1527 		rc = cudbg_read_tid(pdbg_init, i, tid_data);
1528 		if (rc) {
1529 			cudbg_err->sys_err = rc;
1530 			cudbg_put_buff(&temp_buff, dbg_buff);
1531 			return rc;
1532 		}
1533 
1534 		/* ipv6 takes two tids */
1535 		cudbg_is_ipv6_entry(tid_data, tcam_region) ? i += 2 : i++;
1536 
1537 		tid_data++;
1538 		bytes += sizeof(struct cudbg_tid_data);
1539 	}
1540 
1541 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1542 	return rc;
1543 }
1544 
1545 int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
1546 			struct cudbg_buffer *dbg_buff,
1547 			struct cudbg_error *cudbg_err)
1548 {
1549 	struct adapter *padap = pdbg_init->adap;
1550 	struct cudbg_buffer temp_buff = { 0 };
1551 	u32 size;
1552 	int rc;
1553 
1554 	size = sizeof(u16) * NMTUS * NCCTRL_WIN;
1555 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1556 	if (rc)
1557 		return rc;
1558 
1559 	t4_read_cong_tbl(padap, (void *)temp_buff.data);
1560 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1561 	return rc;
1562 }
1563 
1564 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
1565 			      struct cudbg_buffer *dbg_buff,
1566 			      struct cudbg_error *cudbg_err)
1567 {
1568 	struct adapter *padap = pdbg_init->adap;
1569 	struct cudbg_buffer temp_buff = { 0 };
1570 	struct ireg_buf *ma_indr;
1571 	int i, rc, n;
1572 	u32 size, j;
1573 
1574 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1575 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1576 
1577 	n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1578 	size = sizeof(struct ireg_buf) * n * 2;
1579 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1580 	if (rc)
1581 		return rc;
1582 
1583 	ma_indr = (struct ireg_buf *)temp_buff.data;
1584 	for (i = 0; i < n; i++) {
1585 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
1586 		u32 *buff = ma_indr->outbuf;
1587 
1588 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
1589 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
1590 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
1591 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
1592 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
1593 				 buff, ma_fli->ireg_offset_range,
1594 				 ma_fli->ireg_local_offset);
1595 		ma_indr++;
1596 	}
1597 
1598 	n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
1599 	for (i = 0; i < n; i++) {
1600 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
1601 		u32 *buff = ma_indr->outbuf;
1602 
1603 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
1604 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
1605 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
1606 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
1607 			t4_read_indirect(padap, ma_fli->ireg_addr,
1608 					 ma_fli->ireg_data, buff, 1,
1609 					 ma_fli->ireg_local_offset);
1610 			buff++;
1611 			ma_fli->ireg_local_offset += 0x20;
1612 		}
1613 		ma_indr++;
1614 	}
1615 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1616 	return rc;
1617 }
1618 
1619 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
1620 			   struct cudbg_buffer *dbg_buff,
1621 			   struct cudbg_error *cudbg_err)
1622 {
1623 	struct adapter *padap = pdbg_init->adap;
1624 	struct cudbg_buffer temp_buff = { 0 };
1625 	struct cudbg_ulptx_la *ulptx_la_buff;
1626 	u32 i, j;
1627 	int rc;
1628 
1629 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
1630 			    &temp_buff);
1631 	if (rc)
1632 		return rc;
1633 
1634 	ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
1635 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
1636 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
1637 						      ULP_TX_LA_RDPTR_0_A +
1638 						      0x10 * i);
1639 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
1640 						      ULP_TX_LA_WRPTR_0_A +
1641 						      0x10 * i);
1642 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
1643 						       ULP_TX_LA_RDDATA_0_A +
1644 						       0x10 * i);
1645 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
1646 			ulptx_la_buff->rd_data[i][j] =
1647 				t4_read_reg(padap,
1648 					    ULP_TX_LA_RDDATA_0_A + 0x10 * i);
1649 	}
1650 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1651 	return rc;
1652 }
1653 
1654 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
1655 				  struct cudbg_buffer *dbg_buff,
1656 				  struct cudbg_error *cudbg_err)
1657 {
1658 	struct adapter *padap = pdbg_init->adap;
1659 	struct cudbg_buffer temp_buff = { 0 };
1660 	struct ireg_buf *up_cim;
1661 	int i, rc, n;
1662 	u32 size;
1663 
1664 	n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
1665 	size = sizeof(struct ireg_buf) * n;
1666 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1667 	if (rc)
1668 		return rc;
1669 
1670 	up_cim = (struct ireg_buf *)temp_buff.data;
1671 	for (i = 0; i < n; i++) {
1672 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
1673 		u32 *buff = up_cim->outbuf;
1674 
1675 		if (is_t5(padap->params.chip)) {
1676 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
1677 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
1678 			up_cim_reg->ireg_local_offset =
1679 						t5_up_cim_reg_array[i][2];
1680 			up_cim_reg->ireg_offset_range =
1681 						t5_up_cim_reg_array[i][3];
1682 		} else if (is_t6(padap->params.chip)) {
1683 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
1684 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
1685 			up_cim_reg->ireg_local_offset =
1686 						t6_up_cim_reg_array[i][2];
1687 			up_cim_reg->ireg_offset_range =
1688 						t6_up_cim_reg_array[i][3];
1689 		}
1690 
1691 		rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
1692 				 up_cim_reg->ireg_offset_range, buff);
1693 		if (rc) {
1694 			cudbg_put_buff(&temp_buff, dbg_buff);
1695 			return rc;
1696 		}
1697 		up_cim++;
1698 	}
1699 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1700 	return rc;
1701 }
1702 
1703 int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
1704 			     struct cudbg_buffer *dbg_buff,
1705 			     struct cudbg_error *cudbg_err)
1706 {
1707 	struct adapter *padap = pdbg_init->adap;
1708 	struct cudbg_buffer temp_buff = { 0 };
1709 	struct cudbg_pbt_tables *pbt;
1710 	int i, rc;
1711 	u32 addr;
1712 
1713 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
1714 			    &temp_buff);
1715 	if (rc)
1716 		return rc;
1717 
1718 	pbt = (struct cudbg_pbt_tables *)temp_buff.data;
1719 	/* PBT dynamic entries */
1720 	addr = CUDBG_CHAC_PBT_ADDR;
1721 	for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
1722 		rc = t4_cim_read(padap, addr + (i * 4), 1,
1723 				 &pbt->pbt_dynamic[i]);
1724 		if (rc) {
1725 			cudbg_err->sys_err = rc;
1726 			cudbg_put_buff(&temp_buff, dbg_buff);
1727 			return rc;
1728 		}
1729 	}
1730 
1731 	/* PBT static entries */
1732 	/* static entries start when bit 6 is set */
1733 	addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
1734 	for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
1735 		rc = t4_cim_read(padap, addr + (i * 4), 1,
1736 				 &pbt->pbt_static[i]);
1737 		if (rc) {
1738 			cudbg_err->sys_err = rc;
1739 			cudbg_put_buff(&temp_buff, dbg_buff);
1740 			return rc;
1741 		}
1742 	}
1743 
1744 	/* LRF entries */
1745 	addr = CUDBG_CHAC_PBT_LRF;
1746 	for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
1747 		rc = t4_cim_read(padap, addr + (i * 4), 1,
1748 				 &pbt->lrf_table[i]);
1749 		if (rc) {
1750 			cudbg_err->sys_err = rc;
1751 			cudbg_put_buff(&temp_buff, dbg_buff);
1752 			return rc;
1753 		}
1754 	}
1755 
1756 	/* PBT data entries */
1757 	addr = CUDBG_CHAC_PBT_DATA;
1758 	for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
1759 		rc = t4_cim_read(padap, addr + (i * 4), 1,
1760 				 &pbt->pbt_data[i]);
1761 		if (rc) {
1762 			cudbg_err->sys_err = rc;
1763 			cudbg_put_buff(&temp_buff, dbg_buff);
1764 			return rc;
1765 		}
1766 	}
1767 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1768 	return rc;
1769 }
1770 
1771 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
1772 			   struct cudbg_buffer *dbg_buff,
1773 			   struct cudbg_error *cudbg_err)
1774 {
1775 	struct adapter *padap = pdbg_init->adap;
1776 	struct cudbg_mbox_log *mboxlog = NULL;
1777 	struct cudbg_buffer temp_buff = { 0 };
1778 	struct mbox_cmd_log *log = NULL;
1779 	struct mbox_cmd *entry;
1780 	unsigned int entry_idx;
1781 	u16 mbox_cmds;
1782 	int i, k, rc;
1783 	u64 flit;
1784 	u32 size;
1785 
1786 	log = padap->mbox_log;
1787 	mbox_cmds = padap->mbox_log->size;
1788 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
1789 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1790 	if (rc)
1791 		return rc;
1792 
1793 	mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
1794 	for (k = 0; k < mbox_cmds; k++) {
1795 		entry_idx = log->cursor + k;
1796 		if (entry_idx >= log->size)
1797 			entry_idx -= log->size;
1798 
1799 		entry = mbox_cmd_log_entry(log, entry_idx);
1800 		/* skip over unused entries */
1801 		if (entry->timestamp == 0)
1802 			continue;
1803 
1804 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
1805 		for (i = 0; i < MBOX_LEN / 8; i++) {
1806 			flit = entry->cmd[i];
1807 			mboxlog->hi[i] = (u32)(flit >> 32);
1808 			mboxlog->lo[i] = (u32)flit;
1809 		}
1810 		mboxlog++;
1811 	}
1812 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1813 	return rc;
1814 }
1815 
1816 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
1817 			       struct cudbg_buffer *dbg_buff,
1818 			       struct cudbg_error *cudbg_err)
1819 {
1820 	struct adapter *padap = pdbg_init->adap;
1821 	struct cudbg_buffer temp_buff = { 0 };
1822 	struct ireg_buf *hma_indr;
1823 	int i, rc, n;
1824 	u32 size;
1825 
1826 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1827 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1828 
1829 	n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1830 	size = sizeof(struct ireg_buf) * n;
1831 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1832 	if (rc)
1833 		return rc;
1834 
1835 	hma_indr = (struct ireg_buf *)temp_buff.data;
1836 	for (i = 0; i < n; i++) {
1837 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
1838 		u32 *buff = hma_indr->outbuf;
1839 
1840 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
1841 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
1842 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
1843 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
1844 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
1845 				 buff, hma_fli->ireg_offset_range,
1846 				 hma_fli->ireg_local_offset);
1847 		hma_indr++;
1848 	}
1849 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1850 	return rc;
1851 }
1852