1 /*
2  *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify it
5  *  under the terms and conditions of the GNU General Public License,
6  *  version 2, as published by the Free Software Foundation.
7  *
8  *  This program is distributed in the hope it will be useful, but WITHOUT
9  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  *  more details.
12  *
13  *  The full GNU General Public License is included in this distribution in
14  *  the file called "COPYING".
15  *
16  */
17 
18 #include "t4_regs.h"
19 #include "cxgb4.h"
20 #include "cudbg_if.h"
21 #include "cudbg_lib_common.h"
22 #include "cudbg_lib.h"
23 #include "cudbg_entity.h"
24 
25 static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
26 					 struct cudbg_buffer *dbg_buff)
27 {
28 	cudbg_update_buff(pin_buff, dbg_buff);
29 	cudbg_put_buff(pin_buff, dbg_buff);
30 }
31 
32 static int is_fw_attached(struct cudbg_init *pdbg_init)
33 {
34 	struct adapter *padap = pdbg_init->adap;
35 
36 	if (!(padap->flags & FW_OK) || padap->use_bd)
37 		return 0;
38 
39 	return 1;
40 }
41 
42 /* This function will add additional padding bytes into debug_buffer to make it
43  * 4 byte aligned.
44  */
45 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
46 			      struct cudbg_entity_hdr *entity_hdr)
47 {
48 	u8 zero_buf[4] = {0};
49 	u8 padding, remain;
50 
51 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
52 	padding = 4 - remain;
53 	if (remain) {
54 		memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
55 		       padding);
56 		dbg_buff->offset += padding;
57 		entity_hdr->num_pad = padding;
58 	}
59 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
60 }
61 
62 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
63 {
64 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
65 
66 	return (struct cudbg_entity_hdr *)
67 	       ((char *)outbuf + cudbg_hdr->hdr_len +
68 		(sizeof(struct cudbg_entity_hdr) * (i - 1)));
69 }
70 
71 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
72 			   struct cudbg_buffer *dbg_buff,
73 			   struct cudbg_error *cudbg_err)
74 {
75 	struct adapter *padap = pdbg_init->adap;
76 	struct cudbg_buffer temp_buff = { 0 };
77 	u32 buf_size = 0;
78 	int rc = 0;
79 
80 	if (is_t4(padap->params.chip))
81 		buf_size = T4_REGMAP_SIZE;
82 	else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
83 		buf_size = T5_REGMAP_SIZE;
84 
85 	rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
86 	if (rc)
87 		return rc;
88 	t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
89 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
90 	return rc;
91 }
92 
93 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
94 			    struct cudbg_buffer *dbg_buff,
95 			    struct cudbg_error *cudbg_err)
96 {
97 	struct adapter *padap = pdbg_init->adap;
98 	struct cudbg_buffer temp_buff = { 0 };
99 	struct devlog_params *dparams;
100 	int rc = 0;
101 
102 	rc = t4_init_devlog_params(padap);
103 	if (rc < 0) {
104 		cudbg_err->sys_err = rc;
105 		return rc;
106 	}
107 
108 	dparams = &padap->params.devlog;
109 	rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
110 	if (rc)
111 		return rc;
112 
113 	/* Collect FW devlog */
114 	if (dparams->start != 0) {
115 		spin_lock(&padap->win0_lock);
116 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
117 				  dparams->memtype, dparams->start,
118 				  dparams->size,
119 				  (__be32 *)(char *)temp_buff.data,
120 				  1);
121 		spin_unlock(&padap->win0_lock);
122 		if (rc) {
123 			cudbg_err->sys_err = rc;
124 			cudbg_put_buff(&temp_buff, dbg_buff);
125 			return rc;
126 		}
127 	}
128 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
129 	return rc;
130 }
131 
132 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
133 			 struct cudbg_buffer *dbg_buff,
134 			 struct cudbg_error *cudbg_err)
135 {
136 	struct adapter *padap = pdbg_init->adap;
137 	struct cudbg_buffer temp_buff = { 0 };
138 	int size, rc;
139 	u32 cfg = 0;
140 
141 	if (is_t6(padap->params.chip)) {
142 		size = padap->params.cim_la_size / 10 + 1;
143 		size *= 11 * sizeof(u32);
144 	} else {
145 		size = padap->params.cim_la_size / 8;
146 		size *= 8 * sizeof(u32);
147 	}
148 
149 	size += sizeof(cfg);
150 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
151 	if (rc)
152 		return rc;
153 
154 	rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
155 	if (rc) {
156 		cudbg_err->sys_err = rc;
157 		cudbg_put_buff(&temp_buff, dbg_buff);
158 		return rc;
159 	}
160 
161 	memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
162 	rc = t4_cim_read_la(padap,
163 			    (u32 *)((char *)temp_buff.data + sizeof(cfg)),
164 			    NULL);
165 	if (rc < 0) {
166 		cudbg_err->sys_err = rc;
167 		cudbg_put_buff(&temp_buff, dbg_buff);
168 		return rc;
169 	}
170 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
171 	return rc;
172 }
173 
174 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
175 			    struct cudbg_buffer *dbg_buff,
176 			    struct cudbg_error *cudbg_err)
177 {
178 	struct adapter *padap = pdbg_init->adap;
179 	struct cudbg_buffer temp_buff = { 0 };
180 	int size, rc;
181 
182 	size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
183 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
184 	if (rc)
185 		return rc;
186 
187 	t4_cim_read_ma_la(padap,
188 			  (u32 *)temp_buff.data,
189 			  (u32 *)((char *)temp_buff.data +
190 				  5 * CIM_MALA_SIZE));
191 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
192 	return rc;
193 }
194 
195 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
196 			   struct cudbg_buffer *dbg_buff,
197 			   struct cudbg_error *cudbg_err)
198 {
199 	struct adapter *padap = pdbg_init->adap;
200 	struct cudbg_buffer temp_buff = { 0 };
201 	struct cudbg_cim_qcfg *cim_qcfg_data;
202 	int rc;
203 
204 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
205 			    &temp_buff);
206 	if (rc)
207 		return rc;
208 
209 	cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
210 	cim_qcfg_data->chip = padap->params.chip;
211 	rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
212 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
213 	if (rc) {
214 		cudbg_err->sys_err = rc;
215 		cudbg_put_buff(&temp_buff, dbg_buff);
216 		return rc;
217 	}
218 
219 	rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
220 			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
221 			 cim_qcfg_data->obq_wr);
222 	if (rc) {
223 		cudbg_err->sys_err = rc;
224 		cudbg_put_buff(&temp_buff, dbg_buff);
225 		return rc;
226 	}
227 
228 	t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
229 			 cim_qcfg_data->thres);
230 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
231 	return rc;
232 }
233 
234 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
235 			      struct cudbg_buffer *dbg_buff,
236 			      struct cudbg_error *cudbg_err, int qid)
237 {
238 	struct adapter *padap = pdbg_init->adap;
239 	struct cudbg_buffer temp_buff = { 0 };
240 	int no_of_read_words, rc = 0;
241 	u32 qsize;
242 
243 	/* collect CIM IBQ */
244 	qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
245 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
246 	if (rc)
247 		return rc;
248 
249 	/* t4_read_cim_ibq will return no. of read words or error */
250 	no_of_read_words = t4_read_cim_ibq(padap, qid,
251 					   (u32 *)temp_buff.data, qsize);
252 	/* no_of_read_words is less than or equal to 0 means error */
253 	if (no_of_read_words <= 0) {
254 		if (!no_of_read_words)
255 			rc = CUDBG_SYSTEM_ERROR;
256 		else
257 			rc = no_of_read_words;
258 		cudbg_err->sys_err = rc;
259 		cudbg_put_buff(&temp_buff, dbg_buff);
260 		return rc;
261 	}
262 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
263 	return rc;
264 }
265 
266 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
267 			      struct cudbg_buffer *dbg_buff,
268 			      struct cudbg_error *cudbg_err)
269 {
270 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
271 }
272 
273 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
274 			      struct cudbg_buffer *dbg_buff,
275 			      struct cudbg_error *cudbg_err)
276 {
277 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
278 }
279 
280 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
281 			      struct cudbg_buffer *dbg_buff,
282 			      struct cudbg_error *cudbg_err)
283 {
284 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
285 }
286 
287 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
288 			       struct cudbg_buffer *dbg_buff,
289 			       struct cudbg_error *cudbg_err)
290 {
291 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
292 }
293 
294 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
295 			       struct cudbg_buffer *dbg_buff,
296 			       struct cudbg_error *cudbg_err)
297 {
298 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
299 }
300 
301 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
302 			       struct cudbg_buffer *dbg_buff,
303 			       struct cudbg_error *cudbg_err)
304 {
305 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
306 }
307 
308 u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
309 {
310 	u32 value;
311 
312 	t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
313 		     QUENUMSELECT_V(qid));
314 	value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
315 	value = CIMQSIZE_G(value) * 64; /* size in number of words */
316 	return value * sizeof(u32);
317 }
318 
319 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
320 			      struct cudbg_buffer *dbg_buff,
321 			      struct cudbg_error *cudbg_err, int qid)
322 {
323 	struct adapter *padap = pdbg_init->adap;
324 	struct cudbg_buffer temp_buff = { 0 };
325 	int no_of_read_words, rc = 0;
326 	u32 qsize;
327 
328 	/* collect CIM OBQ */
329 	qsize =  cudbg_cim_obq_size(padap, qid);
330 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
331 	if (rc)
332 		return rc;
333 
334 	/* t4_read_cim_obq will return no. of read words or error */
335 	no_of_read_words = t4_read_cim_obq(padap, qid,
336 					   (u32 *)temp_buff.data, qsize);
337 	/* no_of_read_words is less than or equal to 0 means error */
338 	if (no_of_read_words <= 0) {
339 		if (!no_of_read_words)
340 			rc = CUDBG_SYSTEM_ERROR;
341 		else
342 			rc = no_of_read_words;
343 		cudbg_err->sys_err = rc;
344 		cudbg_put_buff(&temp_buff, dbg_buff);
345 		return rc;
346 	}
347 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
348 	return rc;
349 }
350 
351 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
352 			       struct cudbg_buffer *dbg_buff,
353 			       struct cudbg_error *cudbg_err)
354 {
355 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
356 }
357 
358 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
359 			       struct cudbg_buffer *dbg_buff,
360 			       struct cudbg_error *cudbg_err)
361 {
362 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
363 }
364 
365 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
366 			       struct cudbg_buffer *dbg_buff,
367 			       struct cudbg_error *cudbg_err)
368 {
369 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
370 }
371 
372 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
373 			       struct cudbg_buffer *dbg_buff,
374 			       struct cudbg_error *cudbg_err)
375 {
376 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
377 }
378 
379 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
380 			      struct cudbg_buffer *dbg_buff,
381 			      struct cudbg_error *cudbg_err)
382 {
383 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
384 }
385 
386 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
387 			       struct cudbg_buffer *dbg_buff,
388 			       struct cudbg_error *cudbg_err)
389 {
390 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
391 }
392 
393 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
394 				struct cudbg_buffer *dbg_buff,
395 				struct cudbg_error *cudbg_err)
396 {
397 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
398 }
399 
400 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
401 				struct cudbg_buffer *dbg_buff,
402 				struct cudbg_error *cudbg_err)
403 {
404 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
405 }
406 
407 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
408 			     struct cudbg_buffer *dbg_buff, u8 mem_type,
409 			     unsigned long tot_len,
410 			     struct cudbg_error *cudbg_err)
411 {
412 	unsigned long bytes, bytes_left, bytes_read = 0;
413 	struct adapter *padap = pdbg_init->adap;
414 	struct cudbg_buffer temp_buff = { 0 };
415 	int rc = 0;
416 
417 	bytes_left = tot_len;
418 	while (bytes_left > 0) {
419 		bytes = min_t(unsigned long, bytes_left,
420 			      (unsigned long)CUDBG_CHUNK_SIZE);
421 		rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
422 		if (rc)
423 			return rc;
424 		spin_lock(&padap->win0_lock);
425 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
426 				  bytes_read, bytes,
427 				  (__be32 *)temp_buff.data,
428 				  1);
429 		spin_unlock(&padap->win0_lock);
430 		if (rc) {
431 			cudbg_err->sys_err = rc;
432 			cudbg_put_buff(&temp_buff, dbg_buff);
433 			return rc;
434 		}
435 		bytes_left -= bytes;
436 		bytes_read += bytes;
437 		cudbg_write_and_release_buff(&temp_buff, dbg_buff);
438 	}
439 	return rc;
440 }
441 
442 static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
443 				   struct card_mem *mem_info)
444 {
445 	struct adapter *padap = pdbg_init->adap;
446 	u32 value;
447 
448 	value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
449 	value = EDRAM0_SIZE_G(value);
450 	mem_info->size_edc0 = (u16)value;
451 
452 	value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
453 	value = EDRAM1_SIZE_G(value);
454 	mem_info->size_edc1 = (u16)value;
455 
456 	value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
457 	if (value & EDRAM0_ENABLE_F)
458 		mem_info->mem_flag |= (1 << EDC0_FLAG);
459 	if (value & EDRAM1_ENABLE_F)
460 		mem_info->mem_flag |= (1 << EDC1_FLAG);
461 }
462 
463 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
464 			     struct cudbg_error *cudbg_err)
465 {
466 	struct adapter *padap = pdbg_init->adap;
467 	int rc;
468 
469 	if (is_fw_attached(pdbg_init)) {
470 		/* Flush uP dcache before reading edcX/mcX  */
471 		rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
472 		if (rc)
473 			cudbg_err->sys_warn = rc;
474 	}
475 }
476 
477 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
478 				    struct cudbg_buffer *dbg_buff,
479 				    struct cudbg_error *cudbg_err,
480 				    u8 mem_type)
481 {
482 	struct card_mem mem_info = {0};
483 	unsigned long flag, size;
484 	int rc;
485 
486 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
487 	cudbg_collect_mem_info(pdbg_init, &mem_info);
488 	switch (mem_type) {
489 	case MEM_EDC0:
490 		flag = (1 << EDC0_FLAG);
491 		size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
492 		break;
493 	case MEM_EDC1:
494 		flag = (1 << EDC1_FLAG);
495 		size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
496 		break;
497 	default:
498 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
499 		goto err;
500 	}
501 
502 	if (mem_info.mem_flag & flag) {
503 		rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
504 				       size, cudbg_err);
505 		if (rc)
506 			goto err;
507 	} else {
508 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
509 		goto err;
510 	}
511 err:
512 	return rc;
513 }
514 
515 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
516 			       struct cudbg_buffer *dbg_buff,
517 			       struct cudbg_error *cudbg_err)
518 {
519 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
520 					MEM_EDC0);
521 }
522 
523 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
524 			       struct cudbg_buffer *dbg_buff,
525 			       struct cudbg_error *cudbg_err)
526 {
527 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
528 					MEM_EDC1);
529 }
530 
531 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
532 		      struct cudbg_buffer *dbg_buff,
533 		      struct cudbg_error *cudbg_err)
534 {
535 	struct adapter *padap = pdbg_init->adap;
536 	struct cudbg_buffer temp_buff = { 0 };
537 	int rc;
538 
539 	rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
540 	if (rc)
541 		return rc;
542 
543 	rc = t4_read_rss(padap, (u16 *)temp_buff.data);
544 	if (rc) {
545 		cudbg_err->sys_err = rc;
546 		cudbg_put_buff(&temp_buff, dbg_buff);
547 		return rc;
548 	}
549 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
550 	return rc;
551 }
552 
553 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
554 				struct cudbg_buffer *dbg_buff,
555 				struct cudbg_error *cudbg_err)
556 {
557 	struct adapter *padap = pdbg_init->adap;
558 	struct cudbg_buffer temp_buff = { 0 };
559 	struct cudbg_rss_vf_conf *vfconf;
560 	int vf, rc, vf_count;
561 
562 	vf_count = padap->params.arch.vfcount;
563 	rc = cudbg_get_buff(dbg_buff,
564 			    vf_count * sizeof(struct cudbg_rss_vf_conf),
565 			    &temp_buff);
566 	if (rc)
567 		return rc;
568 
569 	vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
570 	for (vf = 0; vf < vf_count; vf++)
571 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
572 				      &vfconf[vf].rss_vf_vfh, true);
573 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
574 	return rc;
575 }
576 
577 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
578 			      struct cudbg_buffer *dbg_buff,
579 			      struct cudbg_error *cudbg_err)
580 {
581 	struct adapter *padap = pdbg_init->adap;
582 	struct cudbg_buffer temp_buff = { 0 };
583 	struct ireg_buf *ch_tp_pio;
584 	int i, rc, n = 0;
585 	u32 size;
586 
587 	if (is_t5(padap->params.chip))
588 		n = sizeof(t5_tp_pio_array) +
589 		    sizeof(t5_tp_tm_pio_array) +
590 		    sizeof(t5_tp_mib_index_array);
591 	else
592 		n = sizeof(t6_tp_pio_array) +
593 		    sizeof(t6_tp_tm_pio_array) +
594 		    sizeof(t6_tp_mib_index_array);
595 
596 	n = n / (IREG_NUM_ELEM * sizeof(u32));
597 	size = sizeof(struct ireg_buf) * n;
598 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
599 	if (rc)
600 		return rc;
601 
602 	ch_tp_pio = (struct ireg_buf *)temp_buff.data;
603 
604 	/* TP_PIO */
605 	if (is_t5(padap->params.chip))
606 		n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
607 	else if (is_t6(padap->params.chip))
608 		n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
609 
610 	for (i = 0; i < n; i++) {
611 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
612 		u32 *buff = ch_tp_pio->outbuf;
613 
614 		if (is_t5(padap->params.chip)) {
615 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
616 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
617 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
618 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
619 		} else if (is_t6(padap->params.chip)) {
620 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
621 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
622 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
623 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
624 		}
625 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
626 			       tp_pio->ireg_local_offset, true);
627 		ch_tp_pio++;
628 	}
629 
630 	/* TP_TM_PIO */
631 	if (is_t5(padap->params.chip))
632 		n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
633 	else if (is_t6(padap->params.chip))
634 		n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
635 
636 	for (i = 0; i < n; i++) {
637 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
638 		u32 *buff = ch_tp_pio->outbuf;
639 
640 		if (is_t5(padap->params.chip)) {
641 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
642 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
643 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
644 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
645 		} else if (is_t6(padap->params.chip)) {
646 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
647 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
648 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
649 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
650 		}
651 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
652 				  tp_pio->ireg_local_offset, true);
653 		ch_tp_pio++;
654 	}
655 
656 	/* TP_MIB_INDEX */
657 	if (is_t5(padap->params.chip))
658 		n = sizeof(t5_tp_mib_index_array) /
659 		    (IREG_NUM_ELEM * sizeof(u32));
660 	else if (is_t6(padap->params.chip))
661 		n = sizeof(t6_tp_mib_index_array) /
662 		    (IREG_NUM_ELEM * sizeof(u32));
663 
664 	for (i = 0; i < n ; i++) {
665 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
666 		u32 *buff = ch_tp_pio->outbuf;
667 
668 		if (is_t5(padap->params.chip)) {
669 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
670 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
671 			tp_pio->ireg_local_offset =
672 				t5_tp_mib_index_array[i][2];
673 			tp_pio->ireg_offset_range =
674 				t5_tp_mib_index_array[i][3];
675 		} else if (is_t6(padap->params.chip)) {
676 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
677 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
678 			tp_pio->ireg_local_offset =
679 				t6_tp_mib_index_array[i][2];
680 			tp_pio->ireg_offset_range =
681 				t6_tp_mib_index_array[i][3];
682 		}
683 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
684 			       tp_pio->ireg_local_offset, true);
685 		ch_tp_pio++;
686 	}
687 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
688 	return rc;
689 }
690 
691 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
692 			       struct cudbg_buffer *dbg_buff,
693 			       struct cudbg_error *cudbg_err)
694 {
695 	struct adapter *padap = pdbg_init->adap;
696 	struct cudbg_buffer temp_buff = { 0 };
697 	struct ireg_buf *ch_sge_dbg;
698 	int i, rc;
699 
700 	rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
701 	if (rc)
702 		return rc;
703 
704 	ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
705 	for (i = 0; i < 2; i++) {
706 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
707 		u32 *buff = ch_sge_dbg->outbuf;
708 
709 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
710 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
711 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
712 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
713 		t4_read_indirect(padap,
714 				 sge_pio->ireg_addr,
715 				 sge_pio->ireg_data,
716 				 buff,
717 				 sge_pio->ireg_offset_range,
718 				 sge_pio->ireg_local_offset);
719 		ch_sge_dbg++;
720 	}
721 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
722 	return rc;
723 }
724 
725 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
726 			   struct cudbg_buffer *dbg_buff,
727 			   struct cudbg_error *cudbg_err)
728 {
729 	struct adapter *padap = pdbg_init->adap;
730 	struct cudbg_buffer temp_buff = { 0 };
731 	struct cudbg_ulprx_la *ulprx_la_buff;
732 	int rc;
733 
734 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
735 			    &temp_buff);
736 	if (rc)
737 		return rc;
738 
739 	ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
740 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
741 	ulprx_la_buff->size = ULPRX_LA_SIZE;
742 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
743 	return rc;
744 }
745 
746 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
747 			struct cudbg_buffer *dbg_buff,
748 			struct cudbg_error *cudbg_err)
749 {
750 	struct adapter *padap = pdbg_init->adap;
751 	struct cudbg_buffer temp_buff = { 0 };
752 	struct cudbg_tp_la *tp_la_buff;
753 	int size, rc;
754 
755 	size = sizeof(struct cudbg_tp_la) + TPLA_SIZE *  sizeof(u64);
756 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
757 	if (rc)
758 		return rc;
759 
760 	tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
761 	tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
762 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
763 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
764 	return rc;
765 }
766 
767 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
768 			     struct cudbg_buffer *dbg_buff,
769 			     struct cudbg_error *cudbg_err)
770 {
771 	struct cudbg_cim_pif_la *cim_pif_la_buff;
772 	struct adapter *padap = pdbg_init->adap;
773 	struct cudbg_buffer temp_buff = { 0 };
774 	int size, rc;
775 
776 	size = sizeof(struct cudbg_cim_pif_la) +
777 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
778 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
779 	if (rc)
780 		return rc;
781 
782 	cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
783 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
784 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
785 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
786 			   NULL, NULL);
787 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
788 	return rc;
789 }
790 
791 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
792 				struct cudbg_buffer *dbg_buff,
793 				struct cudbg_error *cudbg_err)
794 {
795 	struct adapter *padap = pdbg_init->adap;
796 	struct cudbg_buffer temp_buff = { 0 };
797 	struct ireg_buf *ch_pcie;
798 	int i, rc, n;
799 	u32 size;
800 
801 	n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
802 	size = sizeof(struct ireg_buf) * n * 2;
803 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
804 	if (rc)
805 		return rc;
806 
807 	ch_pcie = (struct ireg_buf *)temp_buff.data;
808 	/* PCIE_PDBG */
809 	for (i = 0; i < n; i++) {
810 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
811 		u32 *buff = ch_pcie->outbuf;
812 
813 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
814 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
815 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
816 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
817 		t4_read_indirect(padap,
818 				 pcie_pio->ireg_addr,
819 				 pcie_pio->ireg_data,
820 				 buff,
821 				 pcie_pio->ireg_offset_range,
822 				 pcie_pio->ireg_local_offset);
823 		ch_pcie++;
824 	}
825 
826 	/* PCIE_CDBG */
827 	n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
828 	for (i = 0; i < n; i++) {
829 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
830 		u32 *buff = ch_pcie->outbuf;
831 
832 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
833 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
834 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
835 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
836 		t4_read_indirect(padap,
837 				 pcie_pio->ireg_addr,
838 				 pcie_pio->ireg_data,
839 				 buff,
840 				 pcie_pio->ireg_offset_range,
841 				 pcie_pio->ireg_local_offset);
842 		ch_pcie++;
843 	}
844 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
845 	return rc;
846 }
847 
848 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
849 			      struct cudbg_buffer *dbg_buff,
850 			      struct cudbg_error *cudbg_err)
851 {
852 	struct adapter *padap = pdbg_init->adap;
853 	struct cudbg_buffer temp_buff = { 0 };
854 	struct ireg_buf *ch_pm;
855 	int i, rc, n;
856 	u32 size;
857 
858 	n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
859 	size = sizeof(struct ireg_buf) * n * 2;
860 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
861 	if (rc)
862 		return rc;
863 
864 	ch_pm = (struct ireg_buf *)temp_buff.data;
865 	/* PM_RX */
866 	for (i = 0; i < n; i++) {
867 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
868 		u32 *buff = ch_pm->outbuf;
869 
870 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
871 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
872 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
873 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
874 		t4_read_indirect(padap,
875 				 pm_pio->ireg_addr,
876 				 pm_pio->ireg_data,
877 				 buff,
878 				 pm_pio->ireg_offset_range,
879 				 pm_pio->ireg_local_offset);
880 		ch_pm++;
881 	}
882 
883 	/* PM_TX */
884 	n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
885 	for (i = 0; i < n; i++) {
886 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
887 		u32 *buff = ch_pm->outbuf;
888 
889 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
890 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
891 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
892 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
893 		t4_read_indirect(padap,
894 				 pm_pio->ireg_addr,
895 				 pm_pio->ireg_data,
896 				 buff,
897 				 pm_pio->ireg_offset_range,
898 				 pm_pio->ireg_local_offset);
899 		ch_pm++;
900 	}
901 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
902 	return rc;
903 }
904 
905 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
906 		      struct cudbg_buffer *dbg_buff,
907 		      struct cudbg_error *cudbg_err)
908 {
909 	struct adapter *padap = pdbg_init->adap;
910 	struct cudbg_tid_info_region_rev1 *tid1;
911 	struct cudbg_buffer temp_buff = { 0 };
912 	struct cudbg_tid_info_region *tid;
913 	u32 para[2], val[2];
914 	int rc;
915 
916 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
917 			    &temp_buff);
918 	if (rc)
919 		return rc;
920 
921 	tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
922 	tid = &tid1->tid;
923 	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
924 	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
925 	tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
926 			     sizeof(struct cudbg_ver_hdr);
927 
928 #define FW_PARAM_PFVF_A(param) \
929 	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
930 	 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
931 	 FW_PARAMS_PARAM_Y_V(0) | \
932 	 FW_PARAMS_PARAM_Z_V(0))
933 
934 	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
935 	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
936 	rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
937 	if (rc <  0) {
938 		cudbg_err->sys_err = rc;
939 		cudbg_put_buff(&temp_buff, dbg_buff);
940 		return rc;
941 	}
942 	tid->uotid_base = val[0];
943 	tid->nuotids = val[1] - val[0] + 1;
944 
945 	if (is_t5(padap->params.chip)) {
946 		tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
947 	} else if (is_t6(padap->params.chip)) {
948 		tid1->tid_start =
949 			t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
950 		tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
951 
952 		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
953 		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
954 		rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
955 				     para, val);
956 		if (rc < 0) {
957 			cudbg_err->sys_err = rc;
958 			cudbg_put_buff(&temp_buff, dbg_buff);
959 			return rc;
960 		}
961 		tid->hpftid_base = val[0];
962 		tid->nhpftids = val[1] - val[0] + 1;
963 	}
964 
965 	tid->ntids = padap->tids.ntids;
966 	tid->nstids = padap->tids.nstids;
967 	tid->stid_base = padap->tids.stid_base;
968 	tid->hash_base = padap->tids.hash_base;
969 
970 	tid->natids = padap->tids.natids;
971 	tid->nftids = padap->tids.nftids;
972 	tid->ftid_base = padap->tids.ftid_base;
973 	tid->aftid_base = padap->tids.aftid_base;
974 	tid->aftid_end = padap->tids.aftid_end;
975 
976 	tid->sftid_base = padap->tids.sftid_base;
977 	tid->nsftids = padap->tids.nsftids;
978 
979 	tid->flags = padap->flags;
980 	tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
981 	tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
982 	tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
983 
984 #undef FW_PARAM_PFVF_A
985 
986 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
987 	return rc;
988 }
989 
990 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
991 			      struct cudbg_buffer *dbg_buff,
992 			      struct cudbg_error *cudbg_err)
993 {
994 	struct adapter *padap = pdbg_init->adap;
995 	struct cudbg_buffer temp_buff = { 0 };
996 	struct ireg_buf *ma_indr;
997 	int i, rc, n;
998 	u32 size, j;
999 
1000 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1001 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1002 
1003 	n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1004 	size = sizeof(struct ireg_buf) * n * 2;
1005 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1006 	if (rc)
1007 		return rc;
1008 
1009 	ma_indr = (struct ireg_buf *)temp_buff.data;
1010 	for (i = 0; i < n; i++) {
1011 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
1012 		u32 *buff = ma_indr->outbuf;
1013 
1014 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
1015 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
1016 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
1017 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
1018 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
1019 				 buff, ma_fli->ireg_offset_range,
1020 				 ma_fli->ireg_local_offset);
1021 		ma_indr++;
1022 	}
1023 
1024 	n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
1025 	for (i = 0; i < n; i++) {
1026 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
1027 		u32 *buff = ma_indr->outbuf;
1028 
1029 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
1030 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
1031 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
1032 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
1033 			t4_read_indirect(padap, ma_fli->ireg_addr,
1034 					 ma_fli->ireg_data, buff, 1,
1035 					 ma_fli->ireg_local_offset);
1036 			buff++;
1037 			ma_fli->ireg_local_offset += 0x20;
1038 		}
1039 		ma_indr++;
1040 	}
1041 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1042 	return rc;
1043 }
1044 
1045 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
1046 			   struct cudbg_buffer *dbg_buff,
1047 			   struct cudbg_error *cudbg_err)
1048 {
1049 	struct adapter *padap = pdbg_init->adap;
1050 	struct cudbg_buffer temp_buff = { 0 };
1051 	struct cudbg_ulptx_la *ulptx_la_buff;
1052 	u32 i, j;
1053 	int rc;
1054 
1055 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
1056 			    &temp_buff);
1057 	if (rc)
1058 		return rc;
1059 
1060 	ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
1061 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
1062 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
1063 						      ULP_TX_LA_RDPTR_0_A +
1064 						      0x10 * i);
1065 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
1066 						      ULP_TX_LA_WRPTR_0_A +
1067 						      0x10 * i);
1068 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
1069 						       ULP_TX_LA_RDDATA_0_A +
1070 						       0x10 * i);
1071 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
1072 			ulptx_la_buff->rd_data[i][j] =
1073 				t4_read_reg(padap,
1074 					    ULP_TX_LA_RDDATA_0_A + 0x10 * i);
1075 	}
1076 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1077 	return rc;
1078 }
1079 
1080 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
1081 				  struct cudbg_buffer *dbg_buff,
1082 				  struct cudbg_error *cudbg_err)
1083 {
1084 	struct adapter *padap = pdbg_init->adap;
1085 	struct cudbg_buffer temp_buff = { 0 };
1086 	struct ireg_buf *up_cim;
1087 	int i, rc, n;
1088 	u32 size;
1089 
1090 	n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
1091 	size = sizeof(struct ireg_buf) * n;
1092 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1093 	if (rc)
1094 		return rc;
1095 
1096 	up_cim = (struct ireg_buf *)temp_buff.data;
1097 	for (i = 0; i < n; i++) {
1098 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
1099 		u32 *buff = up_cim->outbuf;
1100 
1101 		if (is_t5(padap->params.chip)) {
1102 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
1103 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
1104 			up_cim_reg->ireg_local_offset =
1105 						t5_up_cim_reg_array[i][2];
1106 			up_cim_reg->ireg_offset_range =
1107 						t5_up_cim_reg_array[i][3];
1108 		} else if (is_t6(padap->params.chip)) {
1109 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
1110 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
1111 			up_cim_reg->ireg_local_offset =
1112 						t6_up_cim_reg_array[i][2];
1113 			up_cim_reg->ireg_offset_range =
1114 						t6_up_cim_reg_array[i][3];
1115 		}
1116 
1117 		rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
1118 				 up_cim_reg->ireg_offset_range, buff);
1119 		if (rc) {
1120 			cudbg_put_buff(&temp_buff, dbg_buff);
1121 			return rc;
1122 		}
1123 		up_cim++;
1124 	}
1125 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1126 	return rc;
1127 }
1128 
1129 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
1130 			   struct cudbg_buffer *dbg_buff,
1131 			   struct cudbg_error *cudbg_err)
1132 {
1133 	struct adapter *padap = pdbg_init->adap;
1134 	struct cudbg_mbox_log *mboxlog = NULL;
1135 	struct cudbg_buffer temp_buff = { 0 };
1136 	struct mbox_cmd_log *log = NULL;
1137 	struct mbox_cmd *entry;
1138 	unsigned int entry_idx;
1139 	u16 mbox_cmds;
1140 	int i, k, rc;
1141 	u64 flit;
1142 	u32 size;
1143 
1144 	log = padap->mbox_log;
1145 	mbox_cmds = padap->mbox_log->size;
1146 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
1147 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1148 	if (rc)
1149 		return rc;
1150 
1151 	mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
1152 	for (k = 0; k < mbox_cmds; k++) {
1153 		entry_idx = log->cursor + k;
1154 		if (entry_idx >= log->size)
1155 			entry_idx -= log->size;
1156 
1157 		entry = mbox_cmd_log_entry(log, entry_idx);
1158 		/* skip over unused entries */
1159 		if (entry->timestamp == 0)
1160 			continue;
1161 
1162 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
1163 		for (i = 0; i < MBOX_LEN / 8; i++) {
1164 			flit = entry->cmd[i];
1165 			mboxlog->hi[i] = (u32)(flit >> 32);
1166 			mboxlog->lo[i] = (u32)flit;
1167 		}
1168 		mboxlog++;
1169 	}
1170 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1171 	return rc;
1172 }
1173 
1174 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
1175 			       struct cudbg_buffer *dbg_buff,
1176 			       struct cudbg_error *cudbg_err)
1177 {
1178 	struct adapter *padap = pdbg_init->adap;
1179 	struct cudbg_buffer temp_buff = { 0 };
1180 	struct ireg_buf *hma_indr;
1181 	int i, rc, n;
1182 	u32 size;
1183 
1184 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1185 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1186 
1187 	n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1188 	size = sizeof(struct ireg_buf) * n;
1189 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1190 	if (rc)
1191 		return rc;
1192 
1193 	hma_indr = (struct ireg_buf *)temp_buff.data;
1194 	for (i = 0; i < n; i++) {
1195 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
1196 		u32 *buff = hma_indr->outbuf;
1197 
1198 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
1199 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
1200 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
1201 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
1202 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
1203 				 buff, hma_fli->ireg_offset_range,
1204 				 hma_fli->ireg_local_offset);
1205 		hma_indr++;
1206 	}
1207 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1208 	return rc;
1209 }
1210