1 /*
2  *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify it
5  *  under the terms and conditions of the GNU General Public License,
6  *  version 2, as published by the Free Software Foundation.
7  *
8  *  This program is distributed in the hope it will be useful, but WITHOUT
9  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  *  more details.
12  *
13  *  The full GNU General Public License is included in this distribution in
14  *  the file called "COPYING".
15  *
16  */
17 
18 #include "t4_regs.h"
19 #include "cxgb4.h"
20 #include "cudbg_if.h"
21 #include "cudbg_lib_common.h"
22 #include "cudbg_lib.h"
23 #include "cudbg_entity.h"
24 
25 static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
26 					 struct cudbg_buffer *dbg_buff)
27 {
28 	cudbg_update_buff(pin_buff, dbg_buff);
29 	cudbg_put_buff(pin_buff, dbg_buff);
30 }
31 
32 static int is_fw_attached(struct cudbg_init *pdbg_init)
33 {
34 	struct adapter *padap = pdbg_init->adap;
35 
36 	if (!(padap->flags & FW_OK) || padap->use_bd)
37 		return 0;
38 
39 	return 1;
40 }
41 
42 /* This function will add additional padding bytes into debug_buffer to make it
43  * 4 byte aligned.
44  */
45 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
46 			      struct cudbg_entity_hdr *entity_hdr)
47 {
48 	u8 zero_buf[4] = {0};
49 	u8 padding, remain;
50 
51 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
52 	padding = 4 - remain;
53 	if (remain) {
54 		memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
55 		       padding);
56 		dbg_buff->offset += padding;
57 		entity_hdr->num_pad = padding;
58 	}
59 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
60 }
61 
62 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
63 {
64 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
65 
66 	return (struct cudbg_entity_hdr *)
67 	       ((char *)outbuf + cudbg_hdr->hdr_len +
68 		(sizeof(struct cudbg_entity_hdr) * (i - 1)));
69 }
70 
71 static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
72 			      void *dest)
73 {
74 	int vaddr, rc;
75 
76 	vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE);
77 	if (vaddr < 0)
78 		return vaddr;
79 
80 	rc = pci_read_vpd(padap->pdev, vaddr, len, dest);
81 	if (rc < 0)
82 		return rc;
83 
84 	return 0;
85 }
86 
87 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
88 			   struct cudbg_buffer *dbg_buff,
89 			   struct cudbg_error *cudbg_err)
90 {
91 	struct adapter *padap = pdbg_init->adap;
92 	struct cudbg_buffer temp_buff = { 0 };
93 	u32 buf_size = 0;
94 	int rc = 0;
95 
96 	if (is_t4(padap->params.chip))
97 		buf_size = T4_REGMAP_SIZE;
98 	else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
99 		buf_size = T5_REGMAP_SIZE;
100 
101 	rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
102 	if (rc)
103 		return rc;
104 	t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
105 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
106 	return rc;
107 }
108 
109 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
110 			    struct cudbg_buffer *dbg_buff,
111 			    struct cudbg_error *cudbg_err)
112 {
113 	struct adapter *padap = pdbg_init->adap;
114 	struct cudbg_buffer temp_buff = { 0 };
115 	struct devlog_params *dparams;
116 	int rc = 0;
117 
118 	rc = t4_init_devlog_params(padap);
119 	if (rc < 0) {
120 		cudbg_err->sys_err = rc;
121 		return rc;
122 	}
123 
124 	dparams = &padap->params.devlog;
125 	rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
126 	if (rc)
127 		return rc;
128 
129 	/* Collect FW devlog */
130 	if (dparams->start != 0) {
131 		spin_lock(&padap->win0_lock);
132 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
133 				  dparams->memtype, dparams->start,
134 				  dparams->size,
135 				  (__be32 *)(char *)temp_buff.data,
136 				  1);
137 		spin_unlock(&padap->win0_lock);
138 		if (rc) {
139 			cudbg_err->sys_err = rc;
140 			cudbg_put_buff(&temp_buff, dbg_buff);
141 			return rc;
142 		}
143 	}
144 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
145 	return rc;
146 }
147 
148 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
149 			 struct cudbg_buffer *dbg_buff,
150 			 struct cudbg_error *cudbg_err)
151 {
152 	struct adapter *padap = pdbg_init->adap;
153 	struct cudbg_buffer temp_buff = { 0 };
154 	int size, rc;
155 	u32 cfg = 0;
156 
157 	if (is_t6(padap->params.chip)) {
158 		size = padap->params.cim_la_size / 10 + 1;
159 		size *= 11 * sizeof(u32);
160 	} else {
161 		size = padap->params.cim_la_size / 8;
162 		size *= 8 * sizeof(u32);
163 	}
164 
165 	size += sizeof(cfg);
166 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
167 	if (rc)
168 		return rc;
169 
170 	rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
171 	if (rc) {
172 		cudbg_err->sys_err = rc;
173 		cudbg_put_buff(&temp_buff, dbg_buff);
174 		return rc;
175 	}
176 
177 	memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
178 	rc = t4_cim_read_la(padap,
179 			    (u32 *)((char *)temp_buff.data + sizeof(cfg)),
180 			    NULL);
181 	if (rc < 0) {
182 		cudbg_err->sys_err = rc;
183 		cudbg_put_buff(&temp_buff, dbg_buff);
184 		return rc;
185 	}
186 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
187 	return rc;
188 }
189 
190 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
191 			    struct cudbg_buffer *dbg_buff,
192 			    struct cudbg_error *cudbg_err)
193 {
194 	struct adapter *padap = pdbg_init->adap;
195 	struct cudbg_buffer temp_buff = { 0 };
196 	int size, rc;
197 
198 	size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
199 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
200 	if (rc)
201 		return rc;
202 
203 	t4_cim_read_ma_la(padap,
204 			  (u32 *)temp_buff.data,
205 			  (u32 *)((char *)temp_buff.data +
206 				  5 * CIM_MALA_SIZE));
207 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
208 	return rc;
209 }
210 
211 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
212 			   struct cudbg_buffer *dbg_buff,
213 			   struct cudbg_error *cudbg_err)
214 {
215 	struct adapter *padap = pdbg_init->adap;
216 	struct cudbg_buffer temp_buff = { 0 };
217 	struct cudbg_cim_qcfg *cim_qcfg_data;
218 	int rc;
219 
220 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
221 			    &temp_buff);
222 	if (rc)
223 		return rc;
224 
225 	cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
226 	cim_qcfg_data->chip = padap->params.chip;
227 	rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
228 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
229 	if (rc) {
230 		cudbg_err->sys_err = rc;
231 		cudbg_put_buff(&temp_buff, dbg_buff);
232 		return rc;
233 	}
234 
235 	rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
236 			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
237 			 cim_qcfg_data->obq_wr);
238 	if (rc) {
239 		cudbg_err->sys_err = rc;
240 		cudbg_put_buff(&temp_buff, dbg_buff);
241 		return rc;
242 	}
243 
244 	t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
245 			 cim_qcfg_data->thres);
246 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
247 	return rc;
248 }
249 
250 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
251 			      struct cudbg_buffer *dbg_buff,
252 			      struct cudbg_error *cudbg_err, int qid)
253 {
254 	struct adapter *padap = pdbg_init->adap;
255 	struct cudbg_buffer temp_buff = { 0 };
256 	int no_of_read_words, rc = 0;
257 	u32 qsize;
258 
259 	/* collect CIM IBQ */
260 	qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
261 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
262 	if (rc)
263 		return rc;
264 
265 	/* t4_read_cim_ibq will return no. of read words or error */
266 	no_of_read_words = t4_read_cim_ibq(padap, qid,
267 					   (u32 *)temp_buff.data, qsize);
268 	/* no_of_read_words is less than or equal to 0 means error */
269 	if (no_of_read_words <= 0) {
270 		if (!no_of_read_words)
271 			rc = CUDBG_SYSTEM_ERROR;
272 		else
273 			rc = no_of_read_words;
274 		cudbg_err->sys_err = rc;
275 		cudbg_put_buff(&temp_buff, dbg_buff);
276 		return rc;
277 	}
278 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
279 	return rc;
280 }
281 
282 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
283 			      struct cudbg_buffer *dbg_buff,
284 			      struct cudbg_error *cudbg_err)
285 {
286 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
287 }
288 
289 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
290 			      struct cudbg_buffer *dbg_buff,
291 			      struct cudbg_error *cudbg_err)
292 {
293 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
294 }
295 
296 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
297 			      struct cudbg_buffer *dbg_buff,
298 			      struct cudbg_error *cudbg_err)
299 {
300 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
301 }
302 
303 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
304 			       struct cudbg_buffer *dbg_buff,
305 			       struct cudbg_error *cudbg_err)
306 {
307 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
308 }
309 
310 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
311 			       struct cudbg_buffer *dbg_buff,
312 			       struct cudbg_error *cudbg_err)
313 {
314 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
315 }
316 
317 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
318 			       struct cudbg_buffer *dbg_buff,
319 			       struct cudbg_error *cudbg_err)
320 {
321 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
322 }
323 
324 u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
325 {
326 	u32 value;
327 
328 	t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
329 		     QUENUMSELECT_V(qid));
330 	value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
331 	value = CIMQSIZE_G(value) * 64; /* size in number of words */
332 	return value * sizeof(u32);
333 }
334 
335 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
336 			      struct cudbg_buffer *dbg_buff,
337 			      struct cudbg_error *cudbg_err, int qid)
338 {
339 	struct adapter *padap = pdbg_init->adap;
340 	struct cudbg_buffer temp_buff = { 0 };
341 	int no_of_read_words, rc = 0;
342 	u32 qsize;
343 
344 	/* collect CIM OBQ */
345 	qsize =  cudbg_cim_obq_size(padap, qid);
346 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
347 	if (rc)
348 		return rc;
349 
350 	/* t4_read_cim_obq will return no. of read words or error */
351 	no_of_read_words = t4_read_cim_obq(padap, qid,
352 					   (u32 *)temp_buff.data, qsize);
353 	/* no_of_read_words is less than or equal to 0 means error */
354 	if (no_of_read_words <= 0) {
355 		if (!no_of_read_words)
356 			rc = CUDBG_SYSTEM_ERROR;
357 		else
358 			rc = no_of_read_words;
359 		cudbg_err->sys_err = rc;
360 		cudbg_put_buff(&temp_buff, dbg_buff);
361 		return rc;
362 	}
363 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
364 	return rc;
365 }
366 
367 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
368 			       struct cudbg_buffer *dbg_buff,
369 			       struct cudbg_error *cudbg_err)
370 {
371 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
372 }
373 
374 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
375 			       struct cudbg_buffer *dbg_buff,
376 			       struct cudbg_error *cudbg_err)
377 {
378 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
379 }
380 
381 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
382 			       struct cudbg_buffer *dbg_buff,
383 			       struct cudbg_error *cudbg_err)
384 {
385 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
386 }
387 
388 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
389 			       struct cudbg_buffer *dbg_buff,
390 			       struct cudbg_error *cudbg_err)
391 {
392 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
393 }
394 
395 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
396 			      struct cudbg_buffer *dbg_buff,
397 			      struct cudbg_error *cudbg_err)
398 {
399 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
400 }
401 
402 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
403 			       struct cudbg_buffer *dbg_buff,
404 			       struct cudbg_error *cudbg_err)
405 {
406 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
407 }
408 
409 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
410 				struct cudbg_buffer *dbg_buff,
411 				struct cudbg_error *cudbg_err)
412 {
413 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
414 }
415 
416 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
417 				struct cudbg_buffer *dbg_buff,
418 				struct cudbg_error *cudbg_err)
419 {
420 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
421 }
422 
423 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
424 			     struct cudbg_buffer *dbg_buff, u8 mem_type,
425 			     unsigned long tot_len,
426 			     struct cudbg_error *cudbg_err)
427 {
428 	unsigned long bytes, bytes_left, bytes_read = 0;
429 	struct adapter *padap = pdbg_init->adap;
430 	struct cudbg_buffer temp_buff = { 0 };
431 	int rc = 0;
432 
433 	bytes_left = tot_len;
434 	while (bytes_left > 0) {
435 		bytes = min_t(unsigned long, bytes_left,
436 			      (unsigned long)CUDBG_CHUNK_SIZE);
437 		rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
438 		if (rc)
439 			return rc;
440 		spin_lock(&padap->win0_lock);
441 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
442 				  bytes_read, bytes,
443 				  (__be32 *)temp_buff.data,
444 				  1);
445 		spin_unlock(&padap->win0_lock);
446 		if (rc) {
447 			cudbg_err->sys_err = rc;
448 			cudbg_put_buff(&temp_buff, dbg_buff);
449 			return rc;
450 		}
451 		bytes_left -= bytes;
452 		bytes_read += bytes;
453 		cudbg_write_and_release_buff(&temp_buff, dbg_buff);
454 	}
455 	return rc;
456 }
457 
458 static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
459 				   struct card_mem *mem_info)
460 {
461 	struct adapter *padap = pdbg_init->adap;
462 	u32 value;
463 
464 	value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
465 	value = EDRAM0_SIZE_G(value);
466 	mem_info->size_edc0 = (u16)value;
467 
468 	value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
469 	value = EDRAM1_SIZE_G(value);
470 	mem_info->size_edc1 = (u16)value;
471 
472 	value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
473 	if (value & EDRAM0_ENABLE_F)
474 		mem_info->mem_flag |= (1 << EDC0_FLAG);
475 	if (value & EDRAM1_ENABLE_F)
476 		mem_info->mem_flag |= (1 << EDC1_FLAG);
477 }
478 
479 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
480 			     struct cudbg_error *cudbg_err)
481 {
482 	struct adapter *padap = pdbg_init->adap;
483 	int rc;
484 
485 	if (is_fw_attached(pdbg_init)) {
486 		/* Flush uP dcache before reading edcX/mcX  */
487 		rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
488 		if (rc)
489 			cudbg_err->sys_warn = rc;
490 	}
491 }
492 
493 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
494 				    struct cudbg_buffer *dbg_buff,
495 				    struct cudbg_error *cudbg_err,
496 				    u8 mem_type)
497 {
498 	struct card_mem mem_info = {0};
499 	unsigned long flag, size;
500 	int rc;
501 
502 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
503 	cudbg_collect_mem_info(pdbg_init, &mem_info);
504 	switch (mem_type) {
505 	case MEM_EDC0:
506 		flag = (1 << EDC0_FLAG);
507 		size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
508 		break;
509 	case MEM_EDC1:
510 		flag = (1 << EDC1_FLAG);
511 		size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
512 		break;
513 	default:
514 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
515 		goto err;
516 	}
517 
518 	if (mem_info.mem_flag & flag) {
519 		rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
520 				       size, cudbg_err);
521 		if (rc)
522 			goto err;
523 	} else {
524 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
525 		goto err;
526 	}
527 err:
528 	return rc;
529 }
530 
531 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
532 			       struct cudbg_buffer *dbg_buff,
533 			       struct cudbg_error *cudbg_err)
534 {
535 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
536 					MEM_EDC0);
537 }
538 
539 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
540 			       struct cudbg_buffer *dbg_buff,
541 			       struct cudbg_error *cudbg_err)
542 {
543 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
544 					MEM_EDC1);
545 }
546 
547 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
548 		      struct cudbg_buffer *dbg_buff,
549 		      struct cudbg_error *cudbg_err)
550 {
551 	struct adapter *padap = pdbg_init->adap;
552 	struct cudbg_buffer temp_buff = { 0 };
553 	int rc;
554 
555 	rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
556 	if (rc)
557 		return rc;
558 
559 	rc = t4_read_rss(padap, (u16 *)temp_buff.data);
560 	if (rc) {
561 		cudbg_err->sys_err = rc;
562 		cudbg_put_buff(&temp_buff, dbg_buff);
563 		return rc;
564 	}
565 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
566 	return rc;
567 }
568 
569 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
570 				struct cudbg_buffer *dbg_buff,
571 				struct cudbg_error *cudbg_err)
572 {
573 	struct adapter *padap = pdbg_init->adap;
574 	struct cudbg_buffer temp_buff = { 0 };
575 	struct cudbg_rss_vf_conf *vfconf;
576 	int vf, rc, vf_count;
577 
578 	vf_count = padap->params.arch.vfcount;
579 	rc = cudbg_get_buff(dbg_buff,
580 			    vf_count * sizeof(struct cudbg_rss_vf_conf),
581 			    &temp_buff);
582 	if (rc)
583 		return rc;
584 
585 	vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
586 	for (vf = 0; vf < vf_count; vf++)
587 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
588 				      &vfconf[vf].rss_vf_vfh, true);
589 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
590 	return rc;
591 }
592 
593 int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
594 			   struct cudbg_buffer *dbg_buff,
595 			   struct cudbg_error *cudbg_err)
596 {
597 	struct adapter *padap = pdbg_init->adap;
598 	struct cudbg_buffer temp_buff = { 0 };
599 	int rc;
600 
601 	rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
602 	if (rc)
603 		return rc;
604 
605 	t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
606 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
607 	return rc;
608 }
609 
610 int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
611 			   struct cudbg_buffer *dbg_buff,
612 			   struct cudbg_error *cudbg_err)
613 {
614 	struct adapter *padap = pdbg_init->adap;
615 	struct cudbg_buffer temp_buff = { 0 };
616 	struct cudbg_pm_stats *pm_stats_buff;
617 	int rc;
618 
619 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
620 			    &temp_buff);
621 	if (rc)
622 		return rc;
623 
624 	pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
625 	t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
626 	t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
627 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
628 	return rc;
629 }
630 
631 int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
632 			   struct cudbg_buffer *dbg_buff,
633 			   struct cudbg_error *cudbg_err)
634 {
635 	struct adapter *padap = pdbg_init->adap;
636 	struct cudbg_buffer temp_buff = { 0 };
637 	struct cudbg_hw_sched *hw_sched_buff;
638 	int i, rc = 0;
639 
640 	if (!padap->params.vpd.cclk)
641 		return CUDBG_STATUS_CCLK_NOT_DEFINED;
642 
643 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
644 			    &temp_buff);
645 	hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
646 	hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
647 	hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
648 	t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
649 	for (i = 0; i < NTX_SCHED; ++i)
650 		t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
651 				&hw_sched_buff->ipg[i], true);
652 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
653 	return rc;
654 }
655 
656 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
657 			      struct cudbg_buffer *dbg_buff,
658 			      struct cudbg_error *cudbg_err)
659 {
660 	struct adapter *padap = pdbg_init->adap;
661 	struct cudbg_buffer temp_buff = { 0 };
662 	struct ireg_buf *ch_tp_pio;
663 	int i, rc, n = 0;
664 	u32 size;
665 
666 	if (is_t5(padap->params.chip))
667 		n = sizeof(t5_tp_pio_array) +
668 		    sizeof(t5_tp_tm_pio_array) +
669 		    sizeof(t5_tp_mib_index_array);
670 	else
671 		n = sizeof(t6_tp_pio_array) +
672 		    sizeof(t6_tp_tm_pio_array) +
673 		    sizeof(t6_tp_mib_index_array);
674 
675 	n = n / (IREG_NUM_ELEM * sizeof(u32));
676 	size = sizeof(struct ireg_buf) * n;
677 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
678 	if (rc)
679 		return rc;
680 
681 	ch_tp_pio = (struct ireg_buf *)temp_buff.data;
682 
683 	/* TP_PIO */
684 	if (is_t5(padap->params.chip))
685 		n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
686 	else if (is_t6(padap->params.chip))
687 		n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
688 
689 	for (i = 0; i < n; i++) {
690 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
691 		u32 *buff = ch_tp_pio->outbuf;
692 
693 		if (is_t5(padap->params.chip)) {
694 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
695 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
696 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
697 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
698 		} else if (is_t6(padap->params.chip)) {
699 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
700 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
701 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
702 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
703 		}
704 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
705 			       tp_pio->ireg_local_offset, true);
706 		ch_tp_pio++;
707 	}
708 
709 	/* TP_TM_PIO */
710 	if (is_t5(padap->params.chip))
711 		n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
712 	else if (is_t6(padap->params.chip))
713 		n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
714 
715 	for (i = 0; i < n; i++) {
716 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
717 		u32 *buff = ch_tp_pio->outbuf;
718 
719 		if (is_t5(padap->params.chip)) {
720 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
721 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
722 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
723 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
724 		} else if (is_t6(padap->params.chip)) {
725 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
726 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
727 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
728 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
729 		}
730 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
731 				  tp_pio->ireg_local_offset, true);
732 		ch_tp_pio++;
733 	}
734 
735 	/* TP_MIB_INDEX */
736 	if (is_t5(padap->params.chip))
737 		n = sizeof(t5_tp_mib_index_array) /
738 		    (IREG_NUM_ELEM * sizeof(u32));
739 	else if (is_t6(padap->params.chip))
740 		n = sizeof(t6_tp_mib_index_array) /
741 		    (IREG_NUM_ELEM * sizeof(u32));
742 
743 	for (i = 0; i < n ; i++) {
744 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
745 		u32 *buff = ch_tp_pio->outbuf;
746 
747 		if (is_t5(padap->params.chip)) {
748 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
749 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
750 			tp_pio->ireg_local_offset =
751 				t5_tp_mib_index_array[i][2];
752 			tp_pio->ireg_offset_range =
753 				t5_tp_mib_index_array[i][3];
754 		} else if (is_t6(padap->params.chip)) {
755 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
756 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
757 			tp_pio->ireg_local_offset =
758 				t6_tp_mib_index_array[i][2];
759 			tp_pio->ireg_offset_range =
760 				t6_tp_mib_index_array[i][3];
761 		}
762 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
763 			       tp_pio->ireg_local_offset, true);
764 		ch_tp_pio++;
765 	}
766 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
767 	return rc;
768 }
769 
770 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
771 			       struct cudbg_buffer *dbg_buff,
772 			       struct cudbg_error *cudbg_err)
773 {
774 	struct adapter *padap = pdbg_init->adap;
775 	struct cudbg_buffer temp_buff = { 0 };
776 	struct ireg_buf *ch_sge_dbg;
777 	int i, rc;
778 
779 	rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
780 	if (rc)
781 		return rc;
782 
783 	ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
784 	for (i = 0; i < 2; i++) {
785 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
786 		u32 *buff = ch_sge_dbg->outbuf;
787 
788 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
789 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
790 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
791 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
792 		t4_read_indirect(padap,
793 				 sge_pio->ireg_addr,
794 				 sge_pio->ireg_data,
795 				 buff,
796 				 sge_pio->ireg_offset_range,
797 				 sge_pio->ireg_local_offset);
798 		ch_sge_dbg++;
799 	}
800 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
801 	return rc;
802 }
803 
804 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
805 			   struct cudbg_buffer *dbg_buff,
806 			   struct cudbg_error *cudbg_err)
807 {
808 	struct adapter *padap = pdbg_init->adap;
809 	struct cudbg_buffer temp_buff = { 0 };
810 	struct cudbg_ulprx_la *ulprx_la_buff;
811 	int rc;
812 
813 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
814 			    &temp_buff);
815 	if (rc)
816 		return rc;
817 
818 	ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
819 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
820 	ulprx_la_buff->size = ULPRX_LA_SIZE;
821 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
822 	return rc;
823 }
824 
825 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
826 			struct cudbg_buffer *dbg_buff,
827 			struct cudbg_error *cudbg_err)
828 {
829 	struct adapter *padap = pdbg_init->adap;
830 	struct cudbg_buffer temp_buff = { 0 };
831 	struct cudbg_tp_la *tp_la_buff;
832 	int size, rc;
833 
834 	size = sizeof(struct cudbg_tp_la) + TPLA_SIZE *  sizeof(u64);
835 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
836 	if (rc)
837 		return rc;
838 
839 	tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
840 	tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
841 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
842 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
843 	return rc;
844 }
845 
846 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
847 			     struct cudbg_buffer *dbg_buff,
848 			     struct cudbg_error *cudbg_err)
849 {
850 	struct cudbg_cim_pif_la *cim_pif_la_buff;
851 	struct adapter *padap = pdbg_init->adap;
852 	struct cudbg_buffer temp_buff = { 0 };
853 	int size, rc;
854 
855 	size = sizeof(struct cudbg_cim_pif_la) +
856 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
857 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
858 	if (rc)
859 		return rc;
860 
861 	cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
862 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
863 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
864 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
865 			   NULL, NULL);
866 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
867 	return rc;
868 }
869 
870 int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
871 			   struct cudbg_buffer *dbg_buff,
872 			   struct cudbg_error *cudbg_err)
873 {
874 	struct adapter *padap = pdbg_init->adap;
875 	struct cudbg_buffer temp_buff = { 0 };
876 	struct cudbg_clk_info *clk_info_buff;
877 	u64 tp_tick_us;
878 	int rc;
879 
880 	if (!padap->params.vpd.cclk)
881 		return CUDBG_STATUS_CCLK_NOT_DEFINED;
882 
883 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
884 			    &temp_buff);
885 	if (rc)
886 		return rc;
887 
888 	clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
889 	clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */
890 	clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
891 	clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
892 	clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
893 	tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
894 
895 	clk_info_buff->dack_timer =
896 		(clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
897 		t4_read_reg(padap, TP_DACK_TIMER_A);
898 	clk_info_buff->retransmit_min =
899 		tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
900 	clk_info_buff->retransmit_max =
901 		tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
902 	clk_info_buff->persist_timer_min =
903 		tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
904 	clk_info_buff->persist_timer_max =
905 		tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
906 	clk_info_buff->keepalive_idle_timer =
907 		tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
908 	clk_info_buff->keepalive_interval =
909 		tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
910 	clk_info_buff->initial_srtt =
911 		tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
912 	clk_info_buff->finwait2_timer =
913 		tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
914 
915 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
916 	return rc;
917 }
918 
919 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
920 				struct cudbg_buffer *dbg_buff,
921 				struct cudbg_error *cudbg_err)
922 {
923 	struct adapter *padap = pdbg_init->adap;
924 	struct cudbg_buffer temp_buff = { 0 };
925 	struct ireg_buf *ch_pcie;
926 	int i, rc, n;
927 	u32 size;
928 
929 	n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
930 	size = sizeof(struct ireg_buf) * n * 2;
931 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
932 	if (rc)
933 		return rc;
934 
935 	ch_pcie = (struct ireg_buf *)temp_buff.data;
936 	/* PCIE_PDBG */
937 	for (i = 0; i < n; i++) {
938 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
939 		u32 *buff = ch_pcie->outbuf;
940 
941 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
942 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
943 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
944 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
945 		t4_read_indirect(padap,
946 				 pcie_pio->ireg_addr,
947 				 pcie_pio->ireg_data,
948 				 buff,
949 				 pcie_pio->ireg_offset_range,
950 				 pcie_pio->ireg_local_offset);
951 		ch_pcie++;
952 	}
953 
954 	/* PCIE_CDBG */
955 	n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
956 	for (i = 0; i < n; i++) {
957 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
958 		u32 *buff = ch_pcie->outbuf;
959 
960 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
961 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
962 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
963 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
964 		t4_read_indirect(padap,
965 				 pcie_pio->ireg_addr,
966 				 pcie_pio->ireg_data,
967 				 buff,
968 				 pcie_pio->ireg_offset_range,
969 				 pcie_pio->ireg_local_offset);
970 		ch_pcie++;
971 	}
972 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
973 	return rc;
974 }
975 
976 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
977 			      struct cudbg_buffer *dbg_buff,
978 			      struct cudbg_error *cudbg_err)
979 {
980 	struct adapter *padap = pdbg_init->adap;
981 	struct cudbg_buffer temp_buff = { 0 };
982 	struct ireg_buf *ch_pm;
983 	int i, rc, n;
984 	u32 size;
985 
986 	n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
987 	size = sizeof(struct ireg_buf) * n * 2;
988 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
989 	if (rc)
990 		return rc;
991 
992 	ch_pm = (struct ireg_buf *)temp_buff.data;
993 	/* PM_RX */
994 	for (i = 0; i < n; i++) {
995 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
996 		u32 *buff = ch_pm->outbuf;
997 
998 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
999 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
1000 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
1001 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
1002 		t4_read_indirect(padap,
1003 				 pm_pio->ireg_addr,
1004 				 pm_pio->ireg_data,
1005 				 buff,
1006 				 pm_pio->ireg_offset_range,
1007 				 pm_pio->ireg_local_offset);
1008 		ch_pm++;
1009 	}
1010 
1011 	/* PM_TX */
1012 	n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
1013 	for (i = 0; i < n; i++) {
1014 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
1015 		u32 *buff = ch_pm->outbuf;
1016 
1017 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
1018 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
1019 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
1020 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
1021 		t4_read_indirect(padap,
1022 				 pm_pio->ireg_addr,
1023 				 pm_pio->ireg_data,
1024 				 buff,
1025 				 pm_pio->ireg_offset_range,
1026 				 pm_pio->ireg_local_offset);
1027 		ch_pm++;
1028 	}
1029 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1030 	return rc;
1031 }
1032 
1033 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
1034 		      struct cudbg_buffer *dbg_buff,
1035 		      struct cudbg_error *cudbg_err)
1036 {
1037 	struct adapter *padap = pdbg_init->adap;
1038 	struct cudbg_tid_info_region_rev1 *tid1;
1039 	struct cudbg_buffer temp_buff = { 0 };
1040 	struct cudbg_tid_info_region *tid;
1041 	u32 para[2], val[2];
1042 	int rc;
1043 
1044 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
1045 			    &temp_buff);
1046 	if (rc)
1047 		return rc;
1048 
1049 	tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
1050 	tid = &tid1->tid;
1051 	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1052 	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
1053 	tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
1054 			     sizeof(struct cudbg_ver_hdr);
1055 
1056 #define FW_PARAM_PFVF_A(param) \
1057 	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
1058 	 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
1059 	 FW_PARAMS_PARAM_Y_V(0) | \
1060 	 FW_PARAMS_PARAM_Z_V(0))
1061 
1062 	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
1063 	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
1064 	rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
1065 	if (rc <  0) {
1066 		cudbg_err->sys_err = rc;
1067 		cudbg_put_buff(&temp_buff, dbg_buff);
1068 		return rc;
1069 	}
1070 	tid->uotid_base = val[0];
1071 	tid->nuotids = val[1] - val[0] + 1;
1072 
1073 	if (is_t5(padap->params.chip)) {
1074 		tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
1075 	} else if (is_t6(padap->params.chip)) {
1076 		tid1->tid_start =
1077 			t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
1078 		tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
1079 
1080 		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
1081 		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
1082 		rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
1083 				     para, val);
1084 		if (rc < 0) {
1085 			cudbg_err->sys_err = rc;
1086 			cudbg_put_buff(&temp_buff, dbg_buff);
1087 			return rc;
1088 		}
1089 		tid->hpftid_base = val[0];
1090 		tid->nhpftids = val[1] - val[0] + 1;
1091 	}
1092 
1093 	tid->ntids = padap->tids.ntids;
1094 	tid->nstids = padap->tids.nstids;
1095 	tid->stid_base = padap->tids.stid_base;
1096 	tid->hash_base = padap->tids.hash_base;
1097 
1098 	tid->natids = padap->tids.natids;
1099 	tid->nftids = padap->tids.nftids;
1100 	tid->ftid_base = padap->tids.ftid_base;
1101 	tid->aftid_base = padap->tids.aftid_base;
1102 	tid->aftid_end = padap->tids.aftid_end;
1103 
1104 	tid->sftid_base = padap->tids.sftid_base;
1105 	tid->nsftids = padap->tids.nsftids;
1106 
1107 	tid->flags = padap->flags;
1108 	tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
1109 	tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
1110 	tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
1111 
1112 #undef FW_PARAM_PFVF_A
1113 
1114 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1115 	return rc;
1116 }
1117 
1118 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
1119 {
1120 	*mask = x | y;
1121 	y = (__force u64)cpu_to_be64(y);
1122 	memcpy(addr, (char *)&y + 2, ETH_ALEN);
1123 }
1124 
1125 static void cudbg_mps_rpl_backdoor(struct adapter *padap,
1126 				   struct fw_ldst_mps_rplc *mps_rplc)
1127 {
1128 	if (is_t5(padap->params.chip)) {
1129 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1130 							  MPS_VF_RPLCT_MAP3_A));
1131 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1132 							  MPS_VF_RPLCT_MAP2_A));
1133 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1134 							  MPS_VF_RPLCT_MAP1_A));
1135 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1136 							  MPS_VF_RPLCT_MAP0_A));
1137 	} else {
1138 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1139 							  MPS_VF_RPLCT_MAP7_A));
1140 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1141 							  MPS_VF_RPLCT_MAP6_A));
1142 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1143 							  MPS_VF_RPLCT_MAP5_A));
1144 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1145 							  MPS_VF_RPLCT_MAP4_A));
1146 	}
1147 	mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
1148 	mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
1149 	mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
1150 	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
1151 }
1152 
1153 static int cudbg_collect_tcam_index(struct adapter *padap,
1154 				    struct cudbg_mps_tcam *tcam, u32 idx)
1155 {
1156 	u64 tcamy, tcamx, val;
1157 	u32 ctl, data2;
1158 	int rc = 0;
1159 
1160 	if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
1161 		/* CtlReqID   - 1: use Host Driver Requester ID
1162 		 * CtlCmdType - 0: Read, 1: Write
1163 		 * CtlTcamSel - 0: TCAM0, 1: TCAM1
1164 		 * CtlXYBitSel- 0: Y bit, 1: X bit
1165 		 */
1166 
1167 		/* Read tcamy */
1168 		ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
1169 		if (idx < 256)
1170 			ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
1171 		else
1172 			ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
1173 
1174 		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1175 		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1176 		tcamy = DMACH_G(val) << 32;
1177 		tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1178 		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1179 		tcam->lookup_type = DATALKPTYPE_G(data2);
1180 
1181 		/* 0 - Outer header, 1 - Inner header
1182 		 * [71:48] bit locations are overloaded for
1183 		 * outer vs. inner lookup types.
1184 		 */
1185 		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1186 			/* Inner header VNI */
1187 			tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1188 			tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
1189 			tcam->dip_hit = data2 & DATADIPHIT_F;
1190 		} else {
1191 			tcam->vlan_vld = data2 & DATAVIDH2_F;
1192 			tcam->ivlan = VIDL_G(val);
1193 		}
1194 
1195 		tcam->port_num = DATAPORTNUM_G(data2);
1196 
1197 		/* Read tcamx. Change the control param */
1198 		ctl |= CTLXYBITSEL_V(1);
1199 		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1200 		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1201 		tcamx = DMACH_G(val) << 32;
1202 		tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1203 		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1204 		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1205 			/* Inner header VNI mask */
1206 			tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1207 			tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
1208 		}
1209 	} else {
1210 		tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
1211 		tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
1212 	}
1213 
1214 	/* If no entry, return */
1215 	if (tcamx & tcamy)
1216 		return rc;
1217 
1218 	tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
1219 	tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
1220 
1221 	if (is_t5(padap->params.chip))
1222 		tcam->repli = (tcam->cls_lo & REPLICATE_F);
1223 	else if (is_t6(padap->params.chip))
1224 		tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
1225 
1226 	if (tcam->repli) {
1227 		struct fw_ldst_cmd ldst_cmd;
1228 		struct fw_ldst_mps_rplc mps_rplc;
1229 
1230 		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
1231 		ldst_cmd.op_to_addrspace =
1232 			htonl(FW_CMD_OP_V(FW_LDST_CMD) |
1233 			      FW_CMD_REQUEST_F | FW_CMD_READ_F |
1234 			      FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
1235 		ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
1236 		ldst_cmd.u.mps.rplc.fid_idx =
1237 			htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
1238 			      FW_LDST_CMD_IDX_V(idx));
1239 
1240 		rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
1241 				&ldst_cmd);
1242 		if (rc)
1243 			cudbg_mps_rpl_backdoor(padap, &mps_rplc);
1244 		else
1245 			mps_rplc = ldst_cmd.u.mps.rplc;
1246 
1247 		tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
1248 		tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
1249 		tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
1250 		tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
1251 		if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
1252 			tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
1253 			tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
1254 			tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
1255 			tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
1256 		}
1257 	}
1258 	cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
1259 	tcam->idx = idx;
1260 	tcam->rplc_size = padap->params.arch.mps_rplc_size;
1261 	return rc;
1262 }
1263 
1264 int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
1265 			   struct cudbg_buffer *dbg_buff,
1266 			   struct cudbg_error *cudbg_err)
1267 {
1268 	struct adapter *padap = pdbg_init->adap;
1269 	struct cudbg_buffer temp_buff = { 0 };
1270 	u32 size = 0, i, n, total_size = 0;
1271 	struct cudbg_mps_tcam *tcam;
1272 	int rc;
1273 
1274 	n = padap->params.arch.mps_tcam_size;
1275 	size = sizeof(struct cudbg_mps_tcam) * n;
1276 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1277 	if (rc)
1278 		return rc;
1279 
1280 	tcam = (struct cudbg_mps_tcam *)temp_buff.data;
1281 	for (i = 0; i < n; i++) {
1282 		rc = cudbg_collect_tcam_index(padap, tcam, i);
1283 		if (rc) {
1284 			cudbg_err->sys_err = rc;
1285 			cudbg_put_buff(&temp_buff, dbg_buff);
1286 			return rc;
1287 		}
1288 		total_size += sizeof(struct cudbg_mps_tcam);
1289 		tcam++;
1290 	}
1291 
1292 	if (!total_size) {
1293 		rc = CUDBG_SYSTEM_ERROR;
1294 		cudbg_err->sys_err = rc;
1295 		cudbg_put_buff(&temp_buff, dbg_buff);
1296 		return rc;
1297 	}
1298 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1299 	return rc;
1300 }
1301 
1302 int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
1303 			   struct cudbg_buffer *dbg_buff,
1304 			   struct cudbg_error *cudbg_err)
1305 {
1306 	struct adapter *padap = pdbg_init->adap;
1307 	struct cudbg_buffer temp_buff = { 0 };
1308 	char vpd_str[CUDBG_VPD_VER_LEN + 1];
1309 	u32 scfg_vers, vpd_vers, fw_vers;
1310 	struct cudbg_vpd_data *vpd_data;
1311 	struct vpd_params vpd = { 0 };
1312 	int rc, ret;
1313 
1314 	rc = t4_get_raw_vpd_params(padap, &vpd);
1315 	if (rc)
1316 		return rc;
1317 
1318 	rc = t4_get_fw_version(padap, &fw_vers);
1319 	if (rc)
1320 		return rc;
1321 
1322 	/* Serial Configuration Version is located beyond the PF's vpd size.
1323 	 * Temporarily give access to entire EEPROM to get it.
1324 	 */
1325 	rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE);
1326 	if (rc < 0)
1327 		return rc;
1328 
1329 	ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN,
1330 				 &scfg_vers);
1331 
1332 	/* Restore back to original PF's vpd size */
1333 	rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE);
1334 	if (rc < 0)
1335 		return rc;
1336 
1337 	if (ret)
1338 		return ret;
1339 
1340 	rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN,
1341 				vpd_str);
1342 	if (rc)
1343 		return rc;
1344 
1345 	vpd_str[CUDBG_VPD_VER_LEN] = '\0';
1346 	rc = kstrtouint(vpd_str, 0, &vpd_vers);
1347 	if (rc)
1348 		return rc;
1349 
1350 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
1351 			    &temp_buff);
1352 	if (rc)
1353 		return rc;
1354 
1355 	vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
1356 	memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1);
1357 	memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1);
1358 	memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1);
1359 	memcpy(vpd_data->mn, vpd.id, ID_LEN + 1);
1360 	vpd_data->scfg_vers = scfg_vers;
1361 	vpd_data->vpd_vers = vpd_vers;
1362 	vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers);
1363 	vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
1364 	vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
1365 	vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
1366 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1367 	return rc;
1368 }
1369 
1370 int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
1371 			struct cudbg_buffer *dbg_buff,
1372 			struct cudbg_error *cudbg_err)
1373 {
1374 	struct adapter *padap = pdbg_init->adap;
1375 	struct cudbg_buffer temp_buff = { 0 };
1376 	u32 size;
1377 	int rc;
1378 
1379 	size = sizeof(u16) * NMTUS * NCCTRL_WIN;
1380 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1381 	if (rc)
1382 		return rc;
1383 
1384 	t4_read_cong_tbl(padap, (void *)temp_buff.data);
1385 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1386 	return rc;
1387 }
1388 
1389 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
1390 			      struct cudbg_buffer *dbg_buff,
1391 			      struct cudbg_error *cudbg_err)
1392 {
1393 	struct adapter *padap = pdbg_init->adap;
1394 	struct cudbg_buffer temp_buff = { 0 };
1395 	struct ireg_buf *ma_indr;
1396 	int i, rc, n;
1397 	u32 size, j;
1398 
1399 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1400 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1401 
1402 	n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1403 	size = sizeof(struct ireg_buf) * n * 2;
1404 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1405 	if (rc)
1406 		return rc;
1407 
1408 	ma_indr = (struct ireg_buf *)temp_buff.data;
1409 	for (i = 0; i < n; i++) {
1410 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
1411 		u32 *buff = ma_indr->outbuf;
1412 
1413 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
1414 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
1415 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
1416 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
1417 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
1418 				 buff, ma_fli->ireg_offset_range,
1419 				 ma_fli->ireg_local_offset);
1420 		ma_indr++;
1421 	}
1422 
1423 	n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
1424 	for (i = 0; i < n; i++) {
1425 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
1426 		u32 *buff = ma_indr->outbuf;
1427 
1428 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
1429 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
1430 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
1431 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
1432 			t4_read_indirect(padap, ma_fli->ireg_addr,
1433 					 ma_fli->ireg_data, buff, 1,
1434 					 ma_fli->ireg_local_offset);
1435 			buff++;
1436 			ma_fli->ireg_local_offset += 0x20;
1437 		}
1438 		ma_indr++;
1439 	}
1440 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1441 	return rc;
1442 }
1443 
1444 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
1445 			   struct cudbg_buffer *dbg_buff,
1446 			   struct cudbg_error *cudbg_err)
1447 {
1448 	struct adapter *padap = pdbg_init->adap;
1449 	struct cudbg_buffer temp_buff = { 0 };
1450 	struct cudbg_ulptx_la *ulptx_la_buff;
1451 	u32 i, j;
1452 	int rc;
1453 
1454 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
1455 			    &temp_buff);
1456 	if (rc)
1457 		return rc;
1458 
1459 	ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
1460 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
1461 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
1462 						      ULP_TX_LA_RDPTR_0_A +
1463 						      0x10 * i);
1464 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
1465 						      ULP_TX_LA_WRPTR_0_A +
1466 						      0x10 * i);
1467 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
1468 						       ULP_TX_LA_RDDATA_0_A +
1469 						       0x10 * i);
1470 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
1471 			ulptx_la_buff->rd_data[i][j] =
1472 				t4_read_reg(padap,
1473 					    ULP_TX_LA_RDDATA_0_A + 0x10 * i);
1474 	}
1475 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1476 	return rc;
1477 }
1478 
1479 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
1480 				  struct cudbg_buffer *dbg_buff,
1481 				  struct cudbg_error *cudbg_err)
1482 {
1483 	struct adapter *padap = pdbg_init->adap;
1484 	struct cudbg_buffer temp_buff = { 0 };
1485 	struct ireg_buf *up_cim;
1486 	int i, rc, n;
1487 	u32 size;
1488 
1489 	n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
1490 	size = sizeof(struct ireg_buf) * n;
1491 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1492 	if (rc)
1493 		return rc;
1494 
1495 	up_cim = (struct ireg_buf *)temp_buff.data;
1496 	for (i = 0; i < n; i++) {
1497 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
1498 		u32 *buff = up_cim->outbuf;
1499 
1500 		if (is_t5(padap->params.chip)) {
1501 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
1502 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
1503 			up_cim_reg->ireg_local_offset =
1504 						t5_up_cim_reg_array[i][2];
1505 			up_cim_reg->ireg_offset_range =
1506 						t5_up_cim_reg_array[i][3];
1507 		} else if (is_t6(padap->params.chip)) {
1508 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
1509 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
1510 			up_cim_reg->ireg_local_offset =
1511 						t6_up_cim_reg_array[i][2];
1512 			up_cim_reg->ireg_offset_range =
1513 						t6_up_cim_reg_array[i][3];
1514 		}
1515 
1516 		rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
1517 				 up_cim_reg->ireg_offset_range, buff);
1518 		if (rc) {
1519 			cudbg_put_buff(&temp_buff, dbg_buff);
1520 			return rc;
1521 		}
1522 		up_cim++;
1523 	}
1524 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1525 	return rc;
1526 }
1527 
1528 int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
1529 			     struct cudbg_buffer *dbg_buff,
1530 			     struct cudbg_error *cudbg_err)
1531 {
1532 	struct adapter *padap = pdbg_init->adap;
1533 	struct cudbg_buffer temp_buff = { 0 };
1534 	struct cudbg_pbt_tables *pbt;
1535 	int i, rc;
1536 	u32 addr;
1537 
1538 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
1539 			    &temp_buff);
1540 	if (rc)
1541 		return rc;
1542 
1543 	pbt = (struct cudbg_pbt_tables *)temp_buff.data;
1544 	/* PBT dynamic entries */
1545 	addr = CUDBG_CHAC_PBT_ADDR;
1546 	for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
1547 		rc = t4_cim_read(padap, addr + (i * 4), 1,
1548 				 &pbt->pbt_dynamic[i]);
1549 		if (rc) {
1550 			cudbg_err->sys_err = rc;
1551 			cudbg_put_buff(&temp_buff, dbg_buff);
1552 			return rc;
1553 		}
1554 	}
1555 
1556 	/* PBT static entries */
1557 	/* static entries start when bit 6 is set */
1558 	addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
1559 	for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
1560 		rc = t4_cim_read(padap, addr + (i * 4), 1,
1561 				 &pbt->pbt_static[i]);
1562 		if (rc) {
1563 			cudbg_err->sys_err = rc;
1564 			cudbg_put_buff(&temp_buff, dbg_buff);
1565 			return rc;
1566 		}
1567 	}
1568 
1569 	/* LRF entries */
1570 	addr = CUDBG_CHAC_PBT_LRF;
1571 	for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
1572 		rc = t4_cim_read(padap, addr + (i * 4), 1,
1573 				 &pbt->lrf_table[i]);
1574 		if (rc) {
1575 			cudbg_err->sys_err = rc;
1576 			cudbg_put_buff(&temp_buff, dbg_buff);
1577 			return rc;
1578 		}
1579 	}
1580 
1581 	/* PBT data entries */
1582 	addr = CUDBG_CHAC_PBT_DATA;
1583 	for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
1584 		rc = t4_cim_read(padap, addr + (i * 4), 1,
1585 				 &pbt->pbt_data[i]);
1586 		if (rc) {
1587 			cudbg_err->sys_err = rc;
1588 			cudbg_put_buff(&temp_buff, dbg_buff);
1589 			return rc;
1590 		}
1591 	}
1592 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1593 	return rc;
1594 }
1595 
1596 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
1597 			   struct cudbg_buffer *dbg_buff,
1598 			   struct cudbg_error *cudbg_err)
1599 {
1600 	struct adapter *padap = pdbg_init->adap;
1601 	struct cudbg_mbox_log *mboxlog = NULL;
1602 	struct cudbg_buffer temp_buff = { 0 };
1603 	struct mbox_cmd_log *log = NULL;
1604 	struct mbox_cmd *entry;
1605 	unsigned int entry_idx;
1606 	u16 mbox_cmds;
1607 	int i, k, rc;
1608 	u64 flit;
1609 	u32 size;
1610 
1611 	log = padap->mbox_log;
1612 	mbox_cmds = padap->mbox_log->size;
1613 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
1614 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1615 	if (rc)
1616 		return rc;
1617 
1618 	mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
1619 	for (k = 0; k < mbox_cmds; k++) {
1620 		entry_idx = log->cursor + k;
1621 		if (entry_idx >= log->size)
1622 			entry_idx -= log->size;
1623 
1624 		entry = mbox_cmd_log_entry(log, entry_idx);
1625 		/* skip over unused entries */
1626 		if (entry->timestamp == 0)
1627 			continue;
1628 
1629 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
1630 		for (i = 0; i < MBOX_LEN / 8; i++) {
1631 			flit = entry->cmd[i];
1632 			mboxlog->hi[i] = (u32)(flit >> 32);
1633 			mboxlog->lo[i] = (u32)flit;
1634 		}
1635 		mboxlog++;
1636 	}
1637 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1638 	return rc;
1639 }
1640 
1641 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
1642 			       struct cudbg_buffer *dbg_buff,
1643 			       struct cudbg_error *cudbg_err)
1644 {
1645 	struct adapter *padap = pdbg_init->adap;
1646 	struct cudbg_buffer temp_buff = { 0 };
1647 	struct ireg_buf *hma_indr;
1648 	int i, rc, n;
1649 	u32 size;
1650 
1651 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1652 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1653 
1654 	n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1655 	size = sizeof(struct ireg_buf) * n;
1656 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1657 	if (rc)
1658 		return rc;
1659 
1660 	hma_indr = (struct ireg_buf *)temp_buff.data;
1661 	for (i = 0; i < n; i++) {
1662 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
1663 		u32 *buff = hma_indr->outbuf;
1664 
1665 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
1666 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
1667 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
1668 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
1669 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
1670 				 buff, hma_fli->ireg_offset_range,
1671 				 hma_fli->ireg_local_offset);
1672 		hma_indr++;
1673 	}
1674 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1675 	return rc;
1676 }
1677