1 /*
2  *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify it
5  *  under the terms and conditions of the GNU General Public License,
6  *  version 2, as published by the Free Software Foundation.
7  *
8  *  This program is distributed in the hope it will be useful, but WITHOUT
9  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  *  more details.
12  *
13  *  The full GNU General Public License is included in this distribution in
14  *  the file called "COPYING".
15  *
16  */
17 
18 #include <linux/sort.h>
19 
20 #include "t4_regs.h"
21 #include "cxgb4.h"
22 #include "cudbg_if.h"
23 #include "cudbg_lib_common.h"
24 #include "cudbg_entity.h"
25 #include "cudbg_lib.h"
26 
27 static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
28 					 struct cudbg_buffer *dbg_buff)
29 {
30 	cudbg_update_buff(pin_buff, dbg_buff);
31 	cudbg_put_buff(pin_buff, dbg_buff);
32 }
33 
34 static int is_fw_attached(struct cudbg_init *pdbg_init)
35 {
36 	struct adapter *padap = pdbg_init->adap;
37 
38 	if (!(padap->flags & FW_OK) || padap->use_bd)
39 		return 0;
40 
41 	return 1;
42 }
43 
44 /* This function will add additional padding bytes into debug_buffer to make it
45  * 4 byte aligned.
46  */
47 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
48 			      struct cudbg_entity_hdr *entity_hdr)
49 {
50 	u8 zero_buf[4] = {0};
51 	u8 padding, remain;
52 
53 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
54 	padding = 4 - remain;
55 	if (remain) {
56 		memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
57 		       padding);
58 		dbg_buff->offset += padding;
59 		entity_hdr->num_pad = padding;
60 	}
61 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
62 }
63 
64 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
65 {
66 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
67 
68 	return (struct cudbg_entity_hdr *)
69 	       ((char *)outbuf + cudbg_hdr->hdr_len +
70 		(sizeof(struct cudbg_entity_hdr) * (i - 1)));
71 }
72 
73 static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
74 			      void *dest)
75 {
76 	int vaddr, rc;
77 
78 	vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE);
79 	if (vaddr < 0)
80 		return vaddr;
81 
82 	rc = pci_read_vpd(padap->pdev, vaddr, len, dest);
83 	if (rc < 0)
84 		return rc;
85 
86 	return 0;
87 }
88 
89 static int cudbg_mem_desc_cmp(const void *a, const void *b)
90 {
91 	return ((const struct cudbg_mem_desc *)a)->base -
92 	       ((const struct cudbg_mem_desc *)b)->base;
93 }
94 
95 int cudbg_fill_meminfo(struct adapter *padap,
96 		       struct cudbg_meminfo *meminfo_buff)
97 {
98 	struct cudbg_mem_desc *md;
99 	u32 lo, hi, used, alloc;
100 	int n, i;
101 
102 	memset(meminfo_buff->avail, 0,
103 	       ARRAY_SIZE(meminfo_buff->avail) *
104 	       sizeof(struct cudbg_mem_desc));
105 	memset(meminfo_buff->mem, 0,
106 	       (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
107 	md  = meminfo_buff->mem;
108 
109 	for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
110 		meminfo_buff->mem[i].limit = 0;
111 		meminfo_buff->mem[i].idx = i;
112 	}
113 
114 	/* Find and sort the populated memory ranges */
115 	i = 0;
116 	lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
117 	if (lo & EDRAM0_ENABLE_F) {
118 		hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
119 		meminfo_buff->avail[i].base =
120 			cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
121 		meminfo_buff->avail[i].limit =
122 			meminfo_buff->avail[i].base +
123 			cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
124 		meminfo_buff->avail[i].idx = 0;
125 		i++;
126 	}
127 
128 	if (lo & EDRAM1_ENABLE_F) {
129 		hi =  t4_read_reg(padap, MA_EDRAM1_BAR_A);
130 		meminfo_buff->avail[i].base =
131 			cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
132 		meminfo_buff->avail[i].limit =
133 			meminfo_buff->avail[i].base +
134 			cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
135 		meminfo_buff->avail[i].idx = 1;
136 		i++;
137 	}
138 
139 	if (is_t5(padap->params.chip)) {
140 		if (lo & EXT_MEM0_ENABLE_F) {
141 			hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
142 			meminfo_buff->avail[i].base =
143 				cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
144 			meminfo_buff->avail[i].limit =
145 				meminfo_buff->avail[i].base +
146 				cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
147 			meminfo_buff->avail[i].idx = 3;
148 			i++;
149 		}
150 
151 		if (lo & EXT_MEM1_ENABLE_F) {
152 			hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
153 			meminfo_buff->avail[i].base =
154 				cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
155 			meminfo_buff->avail[i].limit =
156 				meminfo_buff->avail[i].base +
157 				cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
158 			meminfo_buff->avail[i].idx = 4;
159 			i++;
160 		}
161 	} else {
162 		if (lo & EXT_MEM_ENABLE_F) {
163 			hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
164 			meminfo_buff->avail[i].base =
165 				cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
166 			meminfo_buff->avail[i].limit =
167 				meminfo_buff->avail[i].base +
168 				cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
169 			meminfo_buff->avail[i].idx = 2;
170 			i++;
171 		}
172 	}
173 
174 	if (!i) /* no memory available */
175 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
176 
177 	meminfo_buff->avail_c = i;
178 	sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
179 	     cudbg_mem_desc_cmp, NULL);
180 	(md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
181 	(md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
182 	(md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
183 	(md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
184 	(md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
185 	(md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
186 	(md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
187 	(md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
188 	(md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
189 
190 	/* the next few have explicit upper bounds */
191 	md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
192 	md->limit = md->base - 1 +
193 		    t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
194 		    PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
195 	md++;
196 
197 	md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
198 	md->limit = md->base - 1 +
199 		    t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
200 		    PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
201 	md++;
202 
203 	if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
204 		if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
205 			hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
206 			md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
207 		} else {
208 			hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
209 			md->base = t4_read_reg(padap,
210 					       LE_DB_HASH_TBL_BASE_ADDR_A);
211 		}
212 		md->limit = 0;
213 	} else {
214 		md->base = 0;
215 		md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
216 	}
217 	md++;
218 
219 #define ulp_region(reg) do { \
220 	md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
221 	(md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
222 } while (0)
223 
224 	ulp_region(RX_ISCSI);
225 	ulp_region(RX_TDDP);
226 	ulp_region(TX_TPT);
227 	ulp_region(RX_STAG);
228 	ulp_region(RX_RQ);
229 	ulp_region(RX_RQUDP);
230 	ulp_region(RX_PBL);
231 	ulp_region(TX_PBL);
232 #undef ulp_region
233 	md->base = 0;
234 	md->idx = ARRAY_SIZE(cudbg_region);
235 	if (!is_t4(padap->params.chip)) {
236 		u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
237 		u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
238 		u32 size = 0;
239 
240 		if (is_t5(padap->params.chip)) {
241 			if (sge_ctrl & VFIFO_ENABLE_F)
242 				size = DBVFIFO_SIZE_G(fifo_size);
243 		} else {
244 			size = T6_DBVFIFO_SIZE_G(fifo_size);
245 		}
246 
247 		if (size) {
248 			md->base = BASEADDR_G(t4_read_reg(padap,
249 							  SGE_DBVFIFO_BADDR_A));
250 			md->limit = md->base + (size << 2) - 1;
251 		}
252 	}
253 
254 	md++;
255 
256 	md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
257 	md->limit = 0;
258 	md++;
259 	md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
260 	md->limit = 0;
261 	md++;
262 
263 	md->base = padap->vres.ocq.start;
264 	if (padap->vres.ocq.size)
265 		md->limit = md->base + padap->vres.ocq.size - 1;
266 	else
267 		md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
268 	md++;
269 
270 	/* add any address-space holes, there can be up to 3 */
271 	for (n = 0; n < i - 1; n++)
272 		if (meminfo_buff->avail[n].limit <
273 		    meminfo_buff->avail[n + 1].base)
274 			(md++)->base = meminfo_buff->avail[n].limit;
275 
276 	if (meminfo_buff->avail[n].limit)
277 		(md++)->base = meminfo_buff->avail[n].limit;
278 
279 	n = md - meminfo_buff->mem;
280 	meminfo_buff->mem_c = n;
281 
282 	sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
283 	     cudbg_mem_desc_cmp, NULL);
284 
285 	lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
286 	hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
287 	meminfo_buff->up_ram_lo = lo;
288 	meminfo_buff->up_ram_hi = hi;
289 
290 	lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
291 	hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
292 	meminfo_buff->up_extmem2_lo = lo;
293 	meminfo_buff->up_extmem2_hi = hi;
294 
295 	lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
296 	meminfo_buff->rx_pages_data[0] =  PMRXMAXPAGE_G(lo);
297 	meminfo_buff->rx_pages_data[1] =
298 		t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
299 	meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
300 
301 	lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
302 	hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
303 	meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
304 	meminfo_buff->tx_pages_data[1] =
305 		hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
306 	meminfo_buff->tx_pages_data[2] =
307 		hi >= (1 << 20) ? 'M' : 'K';
308 	meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
309 
310 	meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
311 
312 	for (i = 0; i < 4; i++) {
313 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
314 			lo = t4_read_reg(padap,
315 					 MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
316 		else
317 			lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
318 		if (is_t5(padap->params.chip)) {
319 			used = T5_USED_G(lo);
320 			alloc = T5_ALLOC_G(lo);
321 		} else {
322 			used = USED_G(lo);
323 			alloc = ALLOC_G(lo);
324 		}
325 		meminfo_buff->port_used[i] = used;
326 		meminfo_buff->port_alloc[i] = alloc;
327 	}
328 
329 	for (i = 0; i < padap->params.arch.nchan; i++) {
330 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
331 			lo = t4_read_reg(padap,
332 					 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
333 		else
334 			lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
335 		if (is_t5(padap->params.chip)) {
336 			used = T5_USED_G(lo);
337 			alloc = T5_ALLOC_G(lo);
338 		} else {
339 			used = USED_G(lo);
340 			alloc = ALLOC_G(lo);
341 		}
342 		meminfo_buff->loopback_used[i] = used;
343 		meminfo_buff->loopback_alloc[i] = alloc;
344 	}
345 
346 	return 0;
347 }
348 
349 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
350 			   struct cudbg_buffer *dbg_buff,
351 			   struct cudbg_error *cudbg_err)
352 {
353 	struct adapter *padap = pdbg_init->adap;
354 	struct cudbg_buffer temp_buff = { 0 };
355 	u32 buf_size = 0;
356 	int rc = 0;
357 
358 	if (is_t4(padap->params.chip))
359 		buf_size = T4_REGMAP_SIZE;
360 	else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
361 		buf_size = T5_REGMAP_SIZE;
362 
363 	rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
364 	if (rc)
365 		return rc;
366 	t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
367 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
368 	return rc;
369 }
370 
371 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
372 			    struct cudbg_buffer *dbg_buff,
373 			    struct cudbg_error *cudbg_err)
374 {
375 	struct adapter *padap = pdbg_init->adap;
376 	struct cudbg_buffer temp_buff = { 0 };
377 	struct devlog_params *dparams;
378 	int rc = 0;
379 
380 	rc = t4_init_devlog_params(padap);
381 	if (rc < 0) {
382 		cudbg_err->sys_err = rc;
383 		return rc;
384 	}
385 
386 	dparams = &padap->params.devlog;
387 	rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
388 	if (rc)
389 		return rc;
390 
391 	/* Collect FW devlog */
392 	if (dparams->start != 0) {
393 		spin_lock(&padap->win0_lock);
394 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
395 				  dparams->memtype, dparams->start,
396 				  dparams->size,
397 				  (__be32 *)(char *)temp_buff.data,
398 				  1);
399 		spin_unlock(&padap->win0_lock);
400 		if (rc) {
401 			cudbg_err->sys_err = rc;
402 			cudbg_put_buff(&temp_buff, dbg_buff);
403 			return rc;
404 		}
405 	}
406 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
407 	return rc;
408 }
409 
410 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
411 			 struct cudbg_buffer *dbg_buff,
412 			 struct cudbg_error *cudbg_err)
413 {
414 	struct adapter *padap = pdbg_init->adap;
415 	struct cudbg_buffer temp_buff = { 0 };
416 	int size, rc;
417 	u32 cfg = 0;
418 
419 	if (is_t6(padap->params.chip)) {
420 		size = padap->params.cim_la_size / 10 + 1;
421 		size *= 11 * sizeof(u32);
422 	} else {
423 		size = padap->params.cim_la_size / 8;
424 		size *= 8 * sizeof(u32);
425 	}
426 
427 	size += sizeof(cfg);
428 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
429 	if (rc)
430 		return rc;
431 
432 	rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
433 	if (rc) {
434 		cudbg_err->sys_err = rc;
435 		cudbg_put_buff(&temp_buff, dbg_buff);
436 		return rc;
437 	}
438 
439 	memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
440 	rc = t4_cim_read_la(padap,
441 			    (u32 *)((char *)temp_buff.data + sizeof(cfg)),
442 			    NULL);
443 	if (rc < 0) {
444 		cudbg_err->sys_err = rc;
445 		cudbg_put_buff(&temp_buff, dbg_buff);
446 		return rc;
447 	}
448 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
449 	return rc;
450 }
451 
452 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
453 			    struct cudbg_buffer *dbg_buff,
454 			    struct cudbg_error *cudbg_err)
455 {
456 	struct adapter *padap = pdbg_init->adap;
457 	struct cudbg_buffer temp_buff = { 0 };
458 	int size, rc;
459 
460 	size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
461 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
462 	if (rc)
463 		return rc;
464 
465 	t4_cim_read_ma_la(padap,
466 			  (u32 *)temp_buff.data,
467 			  (u32 *)((char *)temp_buff.data +
468 				  5 * CIM_MALA_SIZE));
469 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
470 	return rc;
471 }
472 
473 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
474 			   struct cudbg_buffer *dbg_buff,
475 			   struct cudbg_error *cudbg_err)
476 {
477 	struct adapter *padap = pdbg_init->adap;
478 	struct cudbg_buffer temp_buff = { 0 };
479 	struct cudbg_cim_qcfg *cim_qcfg_data;
480 	int rc;
481 
482 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
483 			    &temp_buff);
484 	if (rc)
485 		return rc;
486 
487 	cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
488 	cim_qcfg_data->chip = padap->params.chip;
489 	rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
490 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
491 	if (rc) {
492 		cudbg_err->sys_err = rc;
493 		cudbg_put_buff(&temp_buff, dbg_buff);
494 		return rc;
495 	}
496 
497 	rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
498 			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
499 			 cim_qcfg_data->obq_wr);
500 	if (rc) {
501 		cudbg_err->sys_err = rc;
502 		cudbg_put_buff(&temp_buff, dbg_buff);
503 		return rc;
504 	}
505 
506 	t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
507 			 cim_qcfg_data->thres);
508 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
509 	return rc;
510 }
511 
512 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
513 			      struct cudbg_buffer *dbg_buff,
514 			      struct cudbg_error *cudbg_err, int qid)
515 {
516 	struct adapter *padap = pdbg_init->adap;
517 	struct cudbg_buffer temp_buff = { 0 };
518 	int no_of_read_words, rc = 0;
519 	u32 qsize;
520 
521 	/* collect CIM IBQ */
522 	qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
523 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
524 	if (rc)
525 		return rc;
526 
527 	/* t4_read_cim_ibq will return no. of read words or error */
528 	no_of_read_words = t4_read_cim_ibq(padap, qid,
529 					   (u32 *)temp_buff.data, qsize);
530 	/* no_of_read_words is less than or equal to 0 means error */
531 	if (no_of_read_words <= 0) {
532 		if (!no_of_read_words)
533 			rc = CUDBG_SYSTEM_ERROR;
534 		else
535 			rc = no_of_read_words;
536 		cudbg_err->sys_err = rc;
537 		cudbg_put_buff(&temp_buff, dbg_buff);
538 		return rc;
539 	}
540 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
541 	return rc;
542 }
543 
544 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
545 			      struct cudbg_buffer *dbg_buff,
546 			      struct cudbg_error *cudbg_err)
547 {
548 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
549 }
550 
551 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
552 			      struct cudbg_buffer *dbg_buff,
553 			      struct cudbg_error *cudbg_err)
554 {
555 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
556 }
557 
558 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
559 			      struct cudbg_buffer *dbg_buff,
560 			      struct cudbg_error *cudbg_err)
561 {
562 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
563 }
564 
565 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
566 			       struct cudbg_buffer *dbg_buff,
567 			       struct cudbg_error *cudbg_err)
568 {
569 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
570 }
571 
572 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
573 			       struct cudbg_buffer *dbg_buff,
574 			       struct cudbg_error *cudbg_err)
575 {
576 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
577 }
578 
579 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
580 			       struct cudbg_buffer *dbg_buff,
581 			       struct cudbg_error *cudbg_err)
582 {
583 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
584 }
585 
586 u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
587 {
588 	u32 value;
589 
590 	t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
591 		     QUENUMSELECT_V(qid));
592 	value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
593 	value = CIMQSIZE_G(value) * 64; /* size in number of words */
594 	return value * sizeof(u32);
595 }
596 
597 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
598 			      struct cudbg_buffer *dbg_buff,
599 			      struct cudbg_error *cudbg_err, int qid)
600 {
601 	struct adapter *padap = pdbg_init->adap;
602 	struct cudbg_buffer temp_buff = { 0 };
603 	int no_of_read_words, rc = 0;
604 	u32 qsize;
605 
606 	/* collect CIM OBQ */
607 	qsize =  cudbg_cim_obq_size(padap, qid);
608 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
609 	if (rc)
610 		return rc;
611 
612 	/* t4_read_cim_obq will return no. of read words or error */
613 	no_of_read_words = t4_read_cim_obq(padap, qid,
614 					   (u32 *)temp_buff.data, qsize);
615 	/* no_of_read_words is less than or equal to 0 means error */
616 	if (no_of_read_words <= 0) {
617 		if (!no_of_read_words)
618 			rc = CUDBG_SYSTEM_ERROR;
619 		else
620 			rc = no_of_read_words;
621 		cudbg_err->sys_err = rc;
622 		cudbg_put_buff(&temp_buff, dbg_buff);
623 		return rc;
624 	}
625 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
626 	return rc;
627 }
628 
629 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
630 			       struct cudbg_buffer *dbg_buff,
631 			       struct cudbg_error *cudbg_err)
632 {
633 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
634 }
635 
636 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
637 			       struct cudbg_buffer *dbg_buff,
638 			       struct cudbg_error *cudbg_err)
639 {
640 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
641 }
642 
643 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
644 			       struct cudbg_buffer *dbg_buff,
645 			       struct cudbg_error *cudbg_err)
646 {
647 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
648 }
649 
650 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
651 			       struct cudbg_buffer *dbg_buff,
652 			       struct cudbg_error *cudbg_err)
653 {
654 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
655 }
656 
657 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
658 			      struct cudbg_buffer *dbg_buff,
659 			      struct cudbg_error *cudbg_err)
660 {
661 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
662 }
663 
664 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
665 			       struct cudbg_buffer *dbg_buff,
666 			       struct cudbg_error *cudbg_err)
667 {
668 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
669 }
670 
671 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
672 				struct cudbg_buffer *dbg_buff,
673 				struct cudbg_error *cudbg_err)
674 {
675 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
676 }
677 
678 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
679 				struct cudbg_buffer *dbg_buff,
680 				struct cudbg_error *cudbg_err)
681 {
682 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
683 }
684 
685 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
686 			     struct cudbg_buffer *dbg_buff, u8 mem_type,
687 			     unsigned long tot_len,
688 			     struct cudbg_error *cudbg_err)
689 {
690 	unsigned long bytes, bytes_left, bytes_read = 0;
691 	struct adapter *padap = pdbg_init->adap;
692 	struct cudbg_buffer temp_buff = { 0 };
693 	int rc = 0;
694 
695 	bytes_left = tot_len;
696 	while (bytes_left > 0) {
697 		bytes = min_t(unsigned long, bytes_left,
698 			      (unsigned long)CUDBG_CHUNK_SIZE);
699 		rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
700 		if (rc)
701 			return rc;
702 		spin_lock(&padap->win0_lock);
703 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
704 				  bytes_read, bytes,
705 				  (__be32 *)temp_buff.data,
706 				  1);
707 		spin_unlock(&padap->win0_lock);
708 		if (rc) {
709 			cudbg_err->sys_err = rc;
710 			cudbg_put_buff(&temp_buff, dbg_buff);
711 			return rc;
712 		}
713 		bytes_left -= bytes;
714 		bytes_read += bytes;
715 		cudbg_write_and_release_buff(&temp_buff, dbg_buff);
716 	}
717 	return rc;
718 }
719 
720 static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
721 				   struct card_mem *mem_info)
722 {
723 	struct adapter *padap = pdbg_init->adap;
724 	u32 value;
725 
726 	value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
727 	value = EDRAM0_SIZE_G(value);
728 	mem_info->size_edc0 = (u16)value;
729 
730 	value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
731 	value = EDRAM1_SIZE_G(value);
732 	mem_info->size_edc1 = (u16)value;
733 
734 	value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
735 	if (value & EDRAM0_ENABLE_F)
736 		mem_info->mem_flag |= (1 << EDC0_FLAG);
737 	if (value & EDRAM1_ENABLE_F)
738 		mem_info->mem_flag |= (1 << EDC1_FLAG);
739 }
740 
741 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
742 			     struct cudbg_error *cudbg_err)
743 {
744 	struct adapter *padap = pdbg_init->adap;
745 	int rc;
746 
747 	if (is_fw_attached(pdbg_init)) {
748 		/* Flush uP dcache before reading edcX/mcX  */
749 		rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
750 		if (rc)
751 			cudbg_err->sys_warn = rc;
752 	}
753 }
754 
755 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
756 				    struct cudbg_buffer *dbg_buff,
757 				    struct cudbg_error *cudbg_err,
758 				    u8 mem_type)
759 {
760 	struct card_mem mem_info = {0};
761 	unsigned long flag, size;
762 	int rc;
763 
764 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
765 	cudbg_collect_mem_info(pdbg_init, &mem_info);
766 	switch (mem_type) {
767 	case MEM_EDC0:
768 		flag = (1 << EDC0_FLAG);
769 		size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
770 		break;
771 	case MEM_EDC1:
772 		flag = (1 << EDC1_FLAG);
773 		size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
774 		break;
775 	default:
776 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
777 		goto err;
778 	}
779 
780 	if (mem_info.mem_flag & flag) {
781 		rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
782 				       size, cudbg_err);
783 		if (rc)
784 			goto err;
785 	} else {
786 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
787 		goto err;
788 	}
789 err:
790 	return rc;
791 }
792 
793 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
794 			       struct cudbg_buffer *dbg_buff,
795 			       struct cudbg_error *cudbg_err)
796 {
797 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
798 					MEM_EDC0);
799 }
800 
801 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
802 			       struct cudbg_buffer *dbg_buff,
803 			       struct cudbg_error *cudbg_err)
804 {
805 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
806 					MEM_EDC1);
807 }
808 
809 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
810 		      struct cudbg_buffer *dbg_buff,
811 		      struct cudbg_error *cudbg_err)
812 {
813 	struct adapter *padap = pdbg_init->adap;
814 	struct cudbg_buffer temp_buff = { 0 };
815 	int rc;
816 
817 	rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
818 	if (rc)
819 		return rc;
820 
821 	rc = t4_read_rss(padap, (u16 *)temp_buff.data);
822 	if (rc) {
823 		cudbg_err->sys_err = rc;
824 		cudbg_put_buff(&temp_buff, dbg_buff);
825 		return rc;
826 	}
827 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
828 	return rc;
829 }
830 
831 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
832 				struct cudbg_buffer *dbg_buff,
833 				struct cudbg_error *cudbg_err)
834 {
835 	struct adapter *padap = pdbg_init->adap;
836 	struct cudbg_buffer temp_buff = { 0 };
837 	struct cudbg_rss_vf_conf *vfconf;
838 	int vf, rc, vf_count;
839 
840 	vf_count = padap->params.arch.vfcount;
841 	rc = cudbg_get_buff(dbg_buff,
842 			    vf_count * sizeof(struct cudbg_rss_vf_conf),
843 			    &temp_buff);
844 	if (rc)
845 		return rc;
846 
847 	vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
848 	for (vf = 0; vf < vf_count; vf++)
849 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
850 				      &vfconf[vf].rss_vf_vfh, true);
851 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
852 	return rc;
853 }
854 
855 int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
856 			   struct cudbg_buffer *dbg_buff,
857 			   struct cudbg_error *cudbg_err)
858 {
859 	struct adapter *padap = pdbg_init->adap;
860 	struct cudbg_buffer temp_buff = { 0 };
861 	int rc;
862 
863 	rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
864 	if (rc)
865 		return rc;
866 
867 	t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
868 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
869 	return rc;
870 }
871 
872 int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
873 			   struct cudbg_buffer *dbg_buff,
874 			   struct cudbg_error *cudbg_err)
875 {
876 	struct adapter *padap = pdbg_init->adap;
877 	struct cudbg_buffer temp_buff = { 0 };
878 	struct cudbg_pm_stats *pm_stats_buff;
879 	int rc;
880 
881 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
882 			    &temp_buff);
883 	if (rc)
884 		return rc;
885 
886 	pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
887 	t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
888 	t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
889 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
890 	return rc;
891 }
892 
893 int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
894 			   struct cudbg_buffer *dbg_buff,
895 			   struct cudbg_error *cudbg_err)
896 {
897 	struct adapter *padap = pdbg_init->adap;
898 	struct cudbg_buffer temp_buff = { 0 };
899 	struct cudbg_hw_sched *hw_sched_buff;
900 	int i, rc = 0;
901 
902 	if (!padap->params.vpd.cclk)
903 		return CUDBG_STATUS_CCLK_NOT_DEFINED;
904 
905 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
906 			    &temp_buff);
907 	hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
908 	hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
909 	hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
910 	t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
911 	for (i = 0; i < NTX_SCHED; ++i)
912 		t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
913 				&hw_sched_buff->ipg[i], true);
914 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
915 	return rc;
916 }
917 
918 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
919 			      struct cudbg_buffer *dbg_buff,
920 			      struct cudbg_error *cudbg_err)
921 {
922 	struct adapter *padap = pdbg_init->adap;
923 	struct cudbg_buffer temp_buff = { 0 };
924 	struct ireg_buf *ch_tp_pio;
925 	int i, rc, n = 0;
926 	u32 size;
927 
928 	if (is_t5(padap->params.chip))
929 		n = sizeof(t5_tp_pio_array) +
930 		    sizeof(t5_tp_tm_pio_array) +
931 		    sizeof(t5_tp_mib_index_array);
932 	else
933 		n = sizeof(t6_tp_pio_array) +
934 		    sizeof(t6_tp_tm_pio_array) +
935 		    sizeof(t6_tp_mib_index_array);
936 
937 	n = n / (IREG_NUM_ELEM * sizeof(u32));
938 	size = sizeof(struct ireg_buf) * n;
939 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
940 	if (rc)
941 		return rc;
942 
943 	ch_tp_pio = (struct ireg_buf *)temp_buff.data;
944 
945 	/* TP_PIO */
946 	if (is_t5(padap->params.chip))
947 		n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
948 	else if (is_t6(padap->params.chip))
949 		n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
950 
951 	for (i = 0; i < n; i++) {
952 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
953 		u32 *buff = ch_tp_pio->outbuf;
954 
955 		if (is_t5(padap->params.chip)) {
956 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
957 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
958 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
959 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
960 		} else if (is_t6(padap->params.chip)) {
961 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
962 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
963 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
964 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
965 		}
966 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
967 			       tp_pio->ireg_local_offset, true);
968 		ch_tp_pio++;
969 	}
970 
971 	/* TP_TM_PIO */
972 	if (is_t5(padap->params.chip))
973 		n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
974 	else if (is_t6(padap->params.chip))
975 		n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
976 
977 	for (i = 0; i < n; i++) {
978 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
979 		u32 *buff = ch_tp_pio->outbuf;
980 
981 		if (is_t5(padap->params.chip)) {
982 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
983 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
984 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
985 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
986 		} else if (is_t6(padap->params.chip)) {
987 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
988 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
989 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
990 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
991 		}
992 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
993 				  tp_pio->ireg_local_offset, true);
994 		ch_tp_pio++;
995 	}
996 
997 	/* TP_MIB_INDEX */
998 	if (is_t5(padap->params.chip))
999 		n = sizeof(t5_tp_mib_index_array) /
1000 		    (IREG_NUM_ELEM * sizeof(u32));
1001 	else if (is_t6(padap->params.chip))
1002 		n = sizeof(t6_tp_mib_index_array) /
1003 		    (IREG_NUM_ELEM * sizeof(u32));
1004 
1005 	for (i = 0; i < n ; i++) {
1006 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1007 		u32 *buff = ch_tp_pio->outbuf;
1008 
1009 		if (is_t5(padap->params.chip)) {
1010 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
1011 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
1012 			tp_pio->ireg_local_offset =
1013 				t5_tp_mib_index_array[i][2];
1014 			tp_pio->ireg_offset_range =
1015 				t5_tp_mib_index_array[i][3];
1016 		} else if (is_t6(padap->params.chip)) {
1017 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
1018 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
1019 			tp_pio->ireg_local_offset =
1020 				t6_tp_mib_index_array[i][2];
1021 			tp_pio->ireg_offset_range =
1022 				t6_tp_mib_index_array[i][3];
1023 		}
1024 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
1025 			       tp_pio->ireg_local_offset, true);
1026 		ch_tp_pio++;
1027 	}
1028 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1029 	return rc;
1030 }
1031 
1032 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
1033 			       struct cudbg_buffer *dbg_buff,
1034 			       struct cudbg_error *cudbg_err)
1035 {
1036 	struct adapter *padap = pdbg_init->adap;
1037 	struct cudbg_buffer temp_buff = { 0 };
1038 	struct ireg_buf *ch_sge_dbg;
1039 	int i, rc;
1040 
1041 	rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
1042 	if (rc)
1043 		return rc;
1044 
1045 	ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
1046 	for (i = 0; i < 2; i++) {
1047 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
1048 		u32 *buff = ch_sge_dbg->outbuf;
1049 
1050 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
1051 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
1052 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
1053 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
1054 		t4_read_indirect(padap,
1055 				 sge_pio->ireg_addr,
1056 				 sge_pio->ireg_data,
1057 				 buff,
1058 				 sge_pio->ireg_offset_range,
1059 				 sge_pio->ireg_local_offset);
1060 		ch_sge_dbg++;
1061 	}
1062 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1063 	return rc;
1064 }
1065 
1066 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
1067 			   struct cudbg_buffer *dbg_buff,
1068 			   struct cudbg_error *cudbg_err)
1069 {
1070 	struct adapter *padap = pdbg_init->adap;
1071 	struct cudbg_buffer temp_buff = { 0 };
1072 	struct cudbg_ulprx_la *ulprx_la_buff;
1073 	int rc;
1074 
1075 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
1076 			    &temp_buff);
1077 	if (rc)
1078 		return rc;
1079 
1080 	ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
1081 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
1082 	ulprx_la_buff->size = ULPRX_LA_SIZE;
1083 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1084 	return rc;
1085 }
1086 
1087 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
1088 			struct cudbg_buffer *dbg_buff,
1089 			struct cudbg_error *cudbg_err)
1090 {
1091 	struct adapter *padap = pdbg_init->adap;
1092 	struct cudbg_buffer temp_buff = { 0 };
1093 	struct cudbg_tp_la *tp_la_buff;
1094 	int size, rc;
1095 
1096 	size = sizeof(struct cudbg_tp_la) + TPLA_SIZE *  sizeof(u64);
1097 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1098 	if (rc)
1099 		return rc;
1100 
1101 	tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
1102 	tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
1103 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1104 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1105 	return rc;
1106 }
1107 
1108 int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
1109 			  struct cudbg_buffer *dbg_buff,
1110 			  struct cudbg_error *cudbg_err)
1111 {
1112 	struct adapter *padap = pdbg_init->adap;
1113 	struct cudbg_buffer temp_buff = { 0 };
1114 	struct cudbg_meminfo *meminfo_buff;
1115 	int rc;
1116 
1117 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_meminfo), &temp_buff);
1118 	if (rc)
1119 		return rc;
1120 
1121 	meminfo_buff = (struct cudbg_meminfo *)temp_buff.data;
1122 	rc = cudbg_fill_meminfo(padap, meminfo_buff);
1123 	if (rc) {
1124 		cudbg_err->sys_err = rc;
1125 		cudbg_put_buff(&temp_buff, dbg_buff);
1126 		return rc;
1127 	}
1128 
1129 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1130 	return rc;
1131 }
1132 
1133 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
1134 			     struct cudbg_buffer *dbg_buff,
1135 			     struct cudbg_error *cudbg_err)
1136 {
1137 	struct cudbg_cim_pif_la *cim_pif_la_buff;
1138 	struct adapter *padap = pdbg_init->adap;
1139 	struct cudbg_buffer temp_buff = { 0 };
1140 	int size, rc;
1141 
1142 	size = sizeof(struct cudbg_cim_pif_la) +
1143 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1144 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1145 	if (rc)
1146 		return rc;
1147 
1148 	cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
1149 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1150 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1151 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1152 			   NULL, NULL);
1153 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1154 	return rc;
1155 }
1156 
1157 int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
1158 			   struct cudbg_buffer *dbg_buff,
1159 			   struct cudbg_error *cudbg_err)
1160 {
1161 	struct adapter *padap = pdbg_init->adap;
1162 	struct cudbg_buffer temp_buff = { 0 };
1163 	struct cudbg_clk_info *clk_info_buff;
1164 	u64 tp_tick_us;
1165 	int rc;
1166 
1167 	if (!padap->params.vpd.cclk)
1168 		return CUDBG_STATUS_CCLK_NOT_DEFINED;
1169 
1170 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
1171 			    &temp_buff);
1172 	if (rc)
1173 		return rc;
1174 
1175 	clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
1176 	clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */
1177 	clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
1178 	clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
1179 	clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
1180 	tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1181 
1182 	clk_info_buff->dack_timer =
1183 		(clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
1184 		t4_read_reg(padap, TP_DACK_TIMER_A);
1185 	clk_info_buff->retransmit_min =
1186 		tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
1187 	clk_info_buff->retransmit_max =
1188 		tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
1189 	clk_info_buff->persist_timer_min =
1190 		tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
1191 	clk_info_buff->persist_timer_max =
1192 		tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
1193 	clk_info_buff->keepalive_idle_timer =
1194 		tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
1195 	clk_info_buff->keepalive_interval =
1196 		tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
1197 	clk_info_buff->initial_srtt =
1198 		tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
1199 	clk_info_buff->finwait2_timer =
1200 		tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
1201 
1202 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1203 	return rc;
1204 }
1205 
1206 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
1207 				struct cudbg_buffer *dbg_buff,
1208 				struct cudbg_error *cudbg_err)
1209 {
1210 	struct adapter *padap = pdbg_init->adap;
1211 	struct cudbg_buffer temp_buff = { 0 };
1212 	struct ireg_buf *ch_pcie;
1213 	int i, rc, n;
1214 	u32 size;
1215 
1216 	n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
1217 	size = sizeof(struct ireg_buf) * n * 2;
1218 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1219 	if (rc)
1220 		return rc;
1221 
1222 	ch_pcie = (struct ireg_buf *)temp_buff.data;
1223 	/* PCIE_PDBG */
1224 	for (i = 0; i < n; i++) {
1225 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
1226 		u32 *buff = ch_pcie->outbuf;
1227 
1228 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
1229 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
1230 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
1231 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
1232 		t4_read_indirect(padap,
1233 				 pcie_pio->ireg_addr,
1234 				 pcie_pio->ireg_data,
1235 				 buff,
1236 				 pcie_pio->ireg_offset_range,
1237 				 pcie_pio->ireg_local_offset);
1238 		ch_pcie++;
1239 	}
1240 
1241 	/* PCIE_CDBG */
1242 	n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
1243 	for (i = 0; i < n; i++) {
1244 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
1245 		u32 *buff = ch_pcie->outbuf;
1246 
1247 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
1248 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
1249 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
1250 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
1251 		t4_read_indirect(padap,
1252 				 pcie_pio->ireg_addr,
1253 				 pcie_pio->ireg_data,
1254 				 buff,
1255 				 pcie_pio->ireg_offset_range,
1256 				 pcie_pio->ireg_local_offset);
1257 		ch_pcie++;
1258 	}
1259 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1260 	return rc;
1261 }
1262 
1263 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
1264 			      struct cudbg_buffer *dbg_buff,
1265 			      struct cudbg_error *cudbg_err)
1266 {
1267 	struct adapter *padap = pdbg_init->adap;
1268 	struct cudbg_buffer temp_buff = { 0 };
1269 	struct ireg_buf *ch_pm;
1270 	int i, rc, n;
1271 	u32 size;
1272 
1273 	n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
1274 	size = sizeof(struct ireg_buf) * n * 2;
1275 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1276 	if (rc)
1277 		return rc;
1278 
1279 	ch_pm = (struct ireg_buf *)temp_buff.data;
1280 	/* PM_RX */
1281 	for (i = 0; i < n; i++) {
1282 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
1283 		u32 *buff = ch_pm->outbuf;
1284 
1285 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
1286 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
1287 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
1288 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
1289 		t4_read_indirect(padap,
1290 				 pm_pio->ireg_addr,
1291 				 pm_pio->ireg_data,
1292 				 buff,
1293 				 pm_pio->ireg_offset_range,
1294 				 pm_pio->ireg_local_offset);
1295 		ch_pm++;
1296 	}
1297 
1298 	/* PM_TX */
1299 	n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
1300 	for (i = 0; i < n; i++) {
1301 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
1302 		u32 *buff = ch_pm->outbuf;
1303 
1304 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
1305 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
1306 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
1307 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
1308 		t4_read_indirect(padap,
1309 				 pm_pio->ireg_addr,
1310 				 pm_pio->ireg_data,
1311 				 buff,
1312 				 pm_pio->ireg_offset_range,
1313 				 pm_pio->ireg_local_offset);
1314 		ch_pm++;
1315 	}
1316 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1317 	return rc;
1318 }
1319 
1320 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
1321 		      struct cudbg_buffer *dbg_buff,
1322 		      struct cudbg_error *cudbg_err)
1323 {
1324 	struct adapter *padap = pdbg_init->adap;
1325 	struct cudbg_tid_info_region_rev1 *tid1;
1326 	struct cudbg_buffer temp_buff = { 0 };
1327 	struct cudbg_tid_info_region *tid;
1328 	u32 para[2], val[2];
1329 	int rc;
1330 
1331 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
1332 			    &temp_buff);
1333 	if (rc)
1334 		return rc;
1335 
1336 	tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
1337 	tid = &tid1->tid;
1338 	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1339 	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
1340 	tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
1341 			     sizeof(struct cudbg_ver_hdr);
1342 
1343 #define FW_PARAM_PFVF_A(param) \
1344 	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
1345 	 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
1346 	 FW_PARAMS_PARAM_Y_V(0) | \
1347 	 FW_PARAMS_PARAM_Z_V(0))
1348 
1349 	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
1350 	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
1351 	rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
1352 	if (rc <  0) {
1353 		cudbg_err->sys_err = rc;
1354 		cudbg_put_buff(&temp_buff, dbg_buff);
1355 		return rc;
1356 	}
1357 	tid->uotid_base = val[0];
1358 	tid->nuotids = val[1] - val[0] + 1;
1359 
1360 	if (is_t5(padap->params.chip)) {
1361 		tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
1362 	} else if (is_t6(padap->params.chip)) {
1363 		tid1->tid_start =
1364 			t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
1365 		tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
1366 
1367 		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
1368 		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
1369 		rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
1370 				     para, val);
1371 		if (rc < 0) {
1372 			cudbg_err->sys_err = rc;
1373 			cudbg_put_buff(&temp_buff, dbg_buff);
1374 			return rc;
1375 		}
1376 		tid->hpftid_base = val[0];
1377 		tid->nhpftids = val[1] - val[0] + 1;
1378 	}
1379 
1380 	tid->ntids = padap->tids.ntids;
1381 	tid->nstids = padap->tids.nstids;
1382 	tid->stid_base = padap->tids.stid_base;
1383 	tid->hash_base = padap->tids.hash_base;
1384 
1385 	tid->natids = padap->tids.natids;
1386 	tid->nftids = padap->tids.nftids;
1387 	tid->ftid_base = padap->tids.ftid_base;
1388 	tid->aftid_base = padap->tids.aftid_base;
1389 	tid->aftid_end = padap->tids.aftid_end;
1390 
1391 	tid->sftid_base = padap->tids.sftid_base;
1392 	tid->nsftids = padap->tids.nsftids;
1393 
1394 	tid->flags = padap->flags;
1395 	tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
1396 	tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
1397 	tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
1398 
1399 #undef FW_PARAM_PFVF_A
1400 
1401 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1402 	return rc;
1403 }
1404 
1405 int cudbg_dump_context_size(struct adapter *padap)
1406 {
1407 	u32 value, size;
1408 	u8 flq;
1409 
1410 	value = t4_read_reg(padap, SGE_FLM_CFG_A);
1411 
1412 	/* Get number of data freelist queues */
1413 	flq = HDRSTARTFLQ_G(value);
1414 	size = CUDBG_MAX_FL_QIDS >> flq;
1415 
1416 	/* Add extra space for congestion manager contexts.
1417 	 * The number of CONM contexts are same as number of freelist
1418 	 * queues.
1419 	 */
1420 	size += size;
1421 	return size * sizeof(struct cudbg_ch_cntxt);
1422 }
1423 
1424 static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
1425 				enum ctxt_type ctype, u32 *data)
1426 {
1427 	struct adapter *padap = pdbg_init->adap;
1428 	int rc = -1;
1429 
1430 	/* Under heavy traffic, the SGE Queue contexts registers will be
1431 	 * frequently accessed by firmware.
1432 	 *
1433 	 * To avoid conflicts with firmware, always ask firmware to fetch
1434 	 * the SGE Queue contexts via mailbox. On failure, fallback to
1435 	 * accessing hardware registers directly.
1436 	 */
1437 	if (is_fw_attached(pdbg_init))
1438 		rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data);
1439 	if (rc)
1440 		t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
1441 }
1442 
1443 int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
1444 			       struct cudbg_buffer *dbg_buff,
1445 			       struct cudbg_error *cudbg_err)
1446 {
1447 	struct adapter *padap = pdbg_init->adap;
1448 	struct cudbg_buffer temp_buff = { 0 };
1449 	struct cudbg_ch_cntxt *buff;
1450 	u32 size, i = 0;
1451 	int rc;
1452 
1453 	rc = cudbg_dump_context_size(padap);
1454 	if (rc <= 0)
1455 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1456 
1457 	size = rc;
1458 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1459 	if (rc)
1460 		return rc;
1461 
1462 	buff = (struct cudbg_ch_cntxt *)temp_buff.data;
1463 	while (size > 0) {
1464 		buff->cntxt_type = CTXT_FLM;
1465 		buff->cntxt_id = i;
1466 		cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data);
1467 		buff++;
1468 		size -= sizeof(struct cudbg_ch_cntxt);
1469 
1470 		buff->cntxt_type = CTXT_CNM;
1471 		buff->cntxt_id = i;
1472 		cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data);
1473 		buff++;
1474 		size -= sizeof(struct cudbg_ch_cntxt);
1475 
1476 		i++;
1477 	}
1478 
1479 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1480 	return rc;
1481 }
1482 
1483 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
1484 {
1485 	*mask = x | y;
1486 	y = (__force u64)cpu_to_be64(y);
1487 	memcpy(addr, (char *)&y + 2, ETH_ALEN);
1488 }
1489 
1490 static void cudbg_mps_rpl_backdoor(struct adapter *padap,
1491 				   struct fw_ldst_mps_rplc *mps_rplc)
1492 {
1493 	if (is_t5(padap->params.chip)) {
1494 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1495 							  MPS_VF_RPLCT_MAP3_A));
1496 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1497 							  MPS_VF_RPLCT_MAP2_A));
1498 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1499 							  MPS_VF_RPLCT_MAP1_A));
1500 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1501 							  MPS_VF_RPLCT_MAP0_A));
1502 	} else {
1503 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1504 							  MPS_VF_RPLCT_MAP7_A));
1505 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1506 							  MPS_VF_RPLCT_MAP6_A));
1507 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1508 							  MPS_VF_RPLCT_MAP5_A));
1509 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1510 							  MPS_VF_RPLCT_MAP4_A));
1511 	}
1512 	mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
1513 	mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
1514 	mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
1515 	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
1516 }
1517 
1518 static int cudbg_collect_tcam_index(struct adapter *padap,
1519 				    struct cudbg_mps_tcam *tcam, u32 idx)
1520 {
1521 	u64 tcamy, tcamx, val;
1522 	u32 ctl, data2;
1523 	int rc = 0;
1524 
1525 	if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
1526 		/* CtlReqID   - 1: use Host Driver Requester ID
1527 		 * CtlCmdType - 0: Read, 1: Write
1528 		 * CtlTcamSel - 0: TCAM0, 1: TCAM1
1529 		 * CtlXYBitSel- 0: Y bit, 1: X bit
1530 		 */
1531 
1532 		/* Read tcamy */
1533 		ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
1534 		if (idx < 256)
1535 			ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
1536 		else
1537 			ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
1538 
1539 		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1540 		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1541 		tcamy = DMACH_G(val) << 32;
1542 		tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1543 		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1544 		tcam->lookup_type = DATALKPTYPE_G(data2);
1545 
1546 		/* 0 - Outer header, 1 - Inner header
1547 		 * [71:48] bit locations are overloaded for
1548 		 * outer vs. inner lookup types.
1549 		 */
1550 		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1551 			/* Inner header VNI */
1552 			tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1553 			tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
1554 			tcam->dip_hit = data2 & DATADIPHIT_F;
1555 		} else {
1556 			tcam->vlan_vld = data2 & DATAVIDH2_F;
1557 			tcam->ivlan = VIDL_G(val);
1558 		}
1559 
1560 		tcam->port_num = DATAPORTNUM_G(data2);
1561 
1562 		/* Read tcamx. Change the control param */
1563 		ctl |= CTLXYBITSEL_V(1);
1564 		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1565 		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1566 		tcamx = DMACH_G(val) << 32;
1567 		tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1568 		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1569 		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1570 			/* Inner header VNI mask */
1571 			tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1572 			tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
1573 		}
1574 	} else {
1575 		tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
1576 		tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
1577 	}
1578 
1579 	/* If no entry, return */
1580 	if (tcamx & tcamy)
1581 		return rc;
1582 
1583 	tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
1584 	tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
1585 
1586 	if (is_t5(padap->params.chip))
1587 		tcam->repli = (tcam->cls_lo & REPLICATE_F);
1588 	else if (is_t6(padap->params.chip))
1589 		tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
1590 
1591 	if (tcam->repli) {
1592 		struct fw_ldst_cmd ldst_cmd;
1593 		struct fw_ldst_mps_rplc mps_rplc;
1594 
1595 		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
1596 		ldst_cmd.op_to_addrspace =
1597 			htonl(FW_CMD_OP_V(FW_LDST_CMD) |
1598 			      FW_CMD_REQUEST_F | FW_CMD_READ_F |
1599 			      FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
1600 		ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
1601 		ldst_cmd.u.mps.rplc.fid_idx =
1602 			htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
1603 			      FW_LDST_CMD_IDX_V(idx));
1604 
1605 		rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
1606 				&ldst_cmd);
1607 		if (rc)
1608 			cudbg_mps_rpl_backdoor(padap, &mps_rplc);
1609 		else
1610 			mps_rplc = ldst_cmd.u.mps.rplc;
1611 
1612 		tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
1613 		tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
1614 		tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
1615 		tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
1616 		if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
1617 			tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
1618 			tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
1619 			tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
1620 			tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
1621 		}
1622 	}
1623 	cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
1624 	tcam->idx = idx;
1625 	tcam->rplc_size = padap->params.arch.mps_rplc_size;
1626 	return rc;
1627 }
1628 
1629 int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
1630 			   struct cudbg_buffer *dbg_buff,
1631 			   struct cudbg_error *cudbg_err)
1632 {
1633 	struct adapter *padap = pdbg_init->adap;
1634 	struct cudbg_buffer temp_buff = { 0 };
1635 	u32 size = 0, i, n, total_size = 0;
1636 	struct cudbg_mps_tcam *tcam;
1637 	int rc;
1638 
1639 	n = padap->params.arch.mps_tcam_size;
1640 	size = sizeof(struct cudbg_mps_tcam) * n;
1641 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1642 	if (rc)
1643 		return rc;
1644 
1645 	tcam = (struct cudbg_mps_tcam *)temp_buff.data;
1646 	for (i = 0; i < n; i++) {
1647 		rc = cudbg_collect_tcam_index(padap, tcam, i);
1648 		if (rc) {
1649 			cudbg_err->sys_err = rc;
1650 			cudbg_put_buff(&temp_buff, dbg_buff);
1651 			return rc;
1652 		}
1653 		total_size += sizeof(struct cudbg_mps_tcam);
1654 		tcam++;
1655 	}
1656 
1657 	if (!total_size) {
1658 		rc = CUDBG_SYSTEM_ERROR;
1659 		cudbg_err->sys_err = rc;
1660 		cudbg_put_buff(&temp_buff, dbg_buff);
1661 		return rc;
1662 	}
1663 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1664 	return rc;
1665 }
1666 
1667 int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
1668 			   struct cudbg_buffer *dbg_buff,
1669 			   struct cudbg_error *cudbg_err)
1670 {
1671 	struct adapter *padap = pdbg_init->adap;
1672 	struct cudbg_buffer temp_buff = { 0 };
1673 	char vpd_str[CUDBG_VPD_VER_LEN + 1];
1674 	u32 scfg_vers, vpd_vers, fw_vers;
1675 	struct cudbg_vpd_data *vpd_data;
1676 	struct vpd_params vpd = { 0 };
1677 	int rc, ret;
1678 
1679 	rc = t4_get_raw_vpd_params(padap, &vpd);
1680 	if (rc)
1681 		return rc;
1682 
1683 	rc = t4_get_fw_version(padap, &fw_vers);
1684 	if (rc)
1685 		return rc;
1686 
1687 	/* Serial Configuration Version is located beyond the PF's vpd size.
1688 	 * Temporarily give access to entire EEPROM to get it.
1689 	 */
1690 	rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE);
1691 	if (rc < 0)
1692 		return rc;
1693 
1694 	ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN,
1695 				 &scfg_vers);
1696 
1697 	/* Restore back to original PF's vpd size */
1698 	rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE);
1699 	if (rc < 0)
1700 		return rc;
1701 
1702 	if (ret)
1703 		return ret;
1704 
1705 	rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN,
1706 				vpd_str);
1707 	if (rc)
1708 		return rc;
1709 
1710 	vpd_str[CUDBG_VPD_VER_LEN] = '\0';
1711 	rc = kstrtouint(vpd_str, 0, &vpd_vers);
1712 	if (rc)
1713 		return rc;
1714 
1715 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
1716 			    &temp_buff);
1717 	if (rc)
1718 		return rc;
1719 
1720 	vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
1721 	memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1);
1722 	memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1);
1723 	memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1);
1724 	memcpy(vpd_data->mn, vpd.id, ID_LEN + 1);
1725 	vpd_data->scfg_vers = scfg_vers;
1726 	vpd_data->vpd_vers = vpd_vers;
1727 	vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers);
1728 	vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
1729 	vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
1730 	vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
1731 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1732 	return rc;
1733 }
1734 
1735 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
1736 			  struct cudbg_tid_data *tid_data)
1737 {
1738 	struct adapter *padap = pdbg_init->adap;
1739 	int i, cmd_retry = 8;
1740 	u32 val;
1741 
1742 	/* Fill REQ_DATA regs with 0's */
1743 	for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++)
1744 		t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0);
1745 
1746 	/* Write DBIG command */
1747 	val = DBGICMD_V(4) | DBGITID_V(tid);
1748 	t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val);
1749 	tid_data->dbig_cmd = val;
1750 
1751 	val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */
1752 	t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val);
1753 	tid_data->dbig_conf = val;
1754 
1755 	/* Poll the DBGICMDBUSY bit */
1756 	val = 1;
1757 	while (val) {
1758 		val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A);
1759 		val = val & DBGICMDBUSY_F;
1760 		cmd_retry--;
1761 		if (!cmd_retry)
1762 			return CUDBG_SYSTEM_ERROR;
1763 	}
1764 
1765 	/* Check RESP status */
1766 	val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A);
1767 	tid_data->dbig_rsp_stat = val;
1768 	if (!(val & 1))
1769 		return CUDBG_SYSTEM_ERROR;
1770 
1771 	/* Read RESP data */
1772 	for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++)
1773 		tid_data->data[i] = t4_read_reg(padap,
1774 						LE_DB_DBGI_RSP_DATA_A +
1775 						(i << 2));
1776 	tid_data->tid = tid;
1777 	return 0;
1778 }
1779 
1780 static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region)
1781 {
1782 	int type = LE_ET_UNKNOWN;
1783 
1784 	if (tid < tcam_region.server_start)
1785 		type = LE_ET_TCAM_CON;
1786 	else if (tid < tcam_region.filter_start)
1787 		type = LE_ET_TCAM_SERVER;
1788 	else if (tid < tcam_region.clip_start)
1789 		type = LE_ET_TCAM_FILTER;
1790 	else if (tid < tcam_region.routing_start)
1791 		type = LE_ET_TCAM_CLIP;
1792 	else if (tid < tcam_region.tid_hash_base)
1793 		type = LE_ET_TCAM_ROUTING;
1794 	else if (tid < tcam_region.max_tid)
1795 		type = LE_ET_HASH_CON;
1796 	else
1797 		type = LE_ET_INVALID_TID;
1798 
1799 	return type;
1800 }
1801 
1802 static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data,
1803 			       struct cudbg_tcam tcam_region)
1804 {
1805 	int ipv6 = 0;
1806 	int le_type;
1807 
1808 	le_type = cudbg_get_le_type(tid_data->tid, tcam_region);
1809 	if (tid_data->tid & 1)
1810 		return 0;
1811 
1812 	if (le_type == LE_ET_HASH_CON) {
1813 		ipv6 = tid_data->data[16] & 0x8000;
1814 	} else if (le_type == LE_ET_TCAM_CON) {
1815 		ipv6 = tid_data->data[16] & 0x8000;
1816 		if (ipv6)
1817 			ipv6 = tid_data->data[9] == 0x00C00000;
1818 	} else {
1819 		ipv6 = 0;
1820 	}
1821 	return ipv6;
1822 }
1823 
1824 void cudbg_fill_le_tcam_info(struct adapter *padap,
1825 			     struct cudbg_tcam *tcam_region)
1826 {
1827 	u32 value;
1828 
1829 	/* Get the LE regions */
1830 	value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A); /* hash base index */
1831 	tcam_region->tid_hash_base = value;
1832 
1833 	/* Get routing table index */
1834 	value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A);
1835 	tcam_region->routing_start = value;
1836 
1837 	/*Get clip table index */
1838 	value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
1839 	tcam_region->clip_start = value;
1840 
1841 	/* Get filter table index */
1842 	value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A);
1843 	tcam_region->filter_start = value;
1844 
1845 	/* Get server table index */
1846 	value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A);
1847 	tcam_region->server_start = value;
1848 
1849 	/* Check whether hash is enabled and calculate the max tids */
1850 	value = t4_read_reg(padap, LE_DB_CONFIG_A);
1851 	if ((value >> HASHEN_S) & 1) {
1852 		value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A);
1853 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
1854 			tcam_region->max_tid = (value & 0xFFFFF) +
1855 					       tcam_region->tid_hash_base;
1856 		} else {
1857 			value = HASHTIDSIZE_G(value);
1858 			value = 1 << value;
1859 			tcam_region->max_tid = value +
1860 					       tcam_region->tid_hash_base;
1861 		}
1862 	} else { /* hash not enabled */
1863 		tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
1864 	}
1865 }
1866 
1867 int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
1868 			  struct cudbg_buffer *dbg_buff,
1869 			  struct cudbg_error *cudbg_err)
1870 {
1871 	struct adapter *padap = pdbg_init->adap;
1872 	struct cudbg_buffer temp_buff = { 0 };
1873 	struct cudbg_tcam tcam_region = { 0 };
1874 	struct cudbg_tid_data *tid_data;
1875 	u32 bytes = 0;
1876 	int rc, size;
1877 	u32 i;
1878 
1879 	cudbg_fill_le_tcam_info(padap, &tcam_region);
1880 
1881 	size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
1882 	size += sizeof(struct cudbg_tcam);
1883 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1884 	if (rc)
1885 		return rc;
1886 
1887 	memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
1888 	bytes = sizeof(struct cudbg_tcam);
1889 	tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes);
1890 	/* read all tid */
1891 	for (i = 0; i < tcam_region.max_tid; ) {
1892 		rc = cudbg_read_tid(pdbg_init, i, tid_data);
1893 		if (rc) {
1894 			cudbg_err->sys_err = rc;
1895 			cudbg_put_buff(&temp_buff, dbg_buff);
1896 			return rc;
1897 		}
1898 
1899 		/* ipv6 takes two tids */
1900 		cudbg_is_ipv6_entry(tid_data, tcam_region) ? i += 2 : i++;
1901 
1902 		tid_data++;
1903 		bytes += sizeof(struct cudbg_tid_data);
1904 	}
1905 
1906 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1907 	return rc;
1908 }
1909 
1910 int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
1911 			struct cudbg_buffer *dbg_buff,
1912 			struct cudbg_error *cudbg_err)
1913 {
1914 	struct adapter *padap = pdbg_init->adap;
1915 	struct cudbg_buffer temp_buff = { 0 };
1916 	u32 size;
1917 	int rc;
1918 
1919 	size = sizeof(u16) * NMTUS * NCCTRL_WIN;
1920 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1921 	if (rc)
1922 		return rc;
1923 
1924 	t4_read_cong_tbl(padap, (void *)temp_buff.data);
1925 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1926 	return rc;
1927 }
1928 
1929 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
1930 			      struct cudbg_buffer *dbg_buff,
1931 			      struct cudbg_error *cudbg_err)
1932 {
1933 	struct adapter *padap = pdbg_init->adap;
1934 	struct cudbg_buffer temp_buff = { 0 };
1935 	struct ireg_buf *ma_indr;
1936 	int i, rc, n;
1937 	u32 size, j;
1938 
1939 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1940 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1941 
1942 	n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1943 	size = sizeof(struct ireg_buf) * n * 2;
1944 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1945 	if (rc)
1946 		return rc;
1947 
1948 	ma_indr = (struct ireg_buf *)temp_buff.data;
1949 	for (i = 0; i < n; i++) {
1950 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
1951 		u32 *buff = ma_indr->outbuf;
1952 
1953 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
1954 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
1955 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
1956 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
1957 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
1958 				 buff, ma_fli->ireg_offset_range,
1959 				 ma_fli->ireg_local_offset);
1960 		ma_indr++;
1961 	}
1962 
1963 	n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
1964 	for (i = 0; i < n; i++) {
1965 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
1966 		u32 *buff = ma_indr->outbuf;
1967 
1968 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
1969 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
1970 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
1971 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
1972 			t4_read_indirect(padap, ma_fli->ireg_addr,
1973 					 ma_fli->ireg_data, buff, 1,
1974 					 ma_fli->ireg_local_offset);
1975 			buff++;
1976 			ma_fli->ireg_local_offset += 0x20;
1977 		}
1978 		ma_indr++;
1979 	}
1980 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1981 	return rc;
1982 }
1983 
1984 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
1985 			   struct cudbg_buffer *dbg_buff,
1986 			   struct cudbg_error *cudbg_err)
1987 {
1988 	struct adapter *padap = pdbg_init->adap;
1989 	struct cudbg_buffer temp_buff = { 0 };
1990 	struct cudbg_ulptx_la *ulptx_la_buff;
1991 	u32 i, j;
1992 	int rc;
1993 
1994 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
1995 			    &temp_buff);
1996 	if (rc)
1997 		return rc;
1998 
1999 	ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
2000 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
2001 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
2002 						      ULP_TX_LA_RDPTR_0_A +
2003 						      0x10 * i);
2004 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
2005 						      ULP_TX_LA_WRPTR_0_A +
2006 						      0x10 * i);
2007 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
2008 						       ULP_TX_LA_RDDATA_0_A +
2009 						       0x10 * i);
2010 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
2011 			ulptx_la_buff->rd_data[i][j] =
2012 				t4_read_reg(padap,
2013 					    ULP_TX_LA_RDDATA_0_A + 0x10 * i);
2014 	}
2015 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
2016 	return rc;
2017 }
2018 
2019 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
2020 				  struct cudbg_buffer *dbg_buff,
2021 				  struct cudbg_error *cudbg_err)
2022 {
2023 	struct adapter *padap = pdbg_init->adap;
2024 	struct cudbg_buffer temp_buff = { 0 };
2025 	struct ireg_buf *up_cim;
2026 	int i, rc, n;
2027 	u32 size;
2028 
2029 	n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
2030 	size = sizeof(struct ireg_buf) * n;
2031 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
2032 	if (rc)
2033 		return rc;
2034 
2035 	up_cim = (struct ireg_buf *)temp_buff.data;
2036 	for (i = 0; i < n; i++) {
2037 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
2038 		u32 *buff = up_cim->outbuf;
2039 
2040 		if (is_t5(padap->params.chip)) {
2041 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
2042 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
2043 			up_cim_reg->ireg_local_offset =
2044 						t5_up_cim_reg_array[i][2];
2045 			up_cim_reg->ireg_offset_range =
2046 						t5_up_cim_reg_array[i][3];
2047 		} else if (is_t6(padap->params.chip)) {
2048 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
2049 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
2050 			up_cim_reg->ireg_local_offset =
2051 						t6_up_cim_reg_array[i][2];
2052 			up_cim_reg->ireg_offset_range =
2053 						t6_up_cim_reg_array[i][3];
2054 		}
2055 
2056 		rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
2057 				 up_cim_reg->ireg_offset_range, buff);
2058 		if (rc) {
2059 			cudbg_put_buff(&temp_buff, dbg_buff);
2060 			return rc;
2061 		}
2062 		up_cim++;
2063 	}
2064 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
2065 	return rc;
2066 }
2067 
2068 int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
2069 			     struct cudbg_buffer *dbg_buff,
2070 			     struct cudbg_error *cudbg_err)
2071 {
2072 	struct adapter *padap = pdbg_init->adap;
2073 	struct cudbg_buffer temp_buff = { 0 };
2074 	struct cudbg_pbt_tables *pbt;
2075 	int i, rc;
2076 	u32 addr;
2077 
2078 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
2079 			    &temp_buff);
2080 	if (rc)
2081 		return rc;
2082 
2083 	pbt = (struct cudbg_pbt_tables *)temp_buff.data;
2084 	/* PBT dynamic entries */
2085 	addr = CUDBG_CHAC_PBT_ADDR;
2086 	for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
2087 		rc = t4_cim_read(padap, addr + (i * 4), 1,
2088 				 &pbt->pbt_dynamic[i]);
2089 		if (rc) {
2090 			cudbg_err->sys_err = rc;
2091 			cudbg_put_buff(&temp_buff, dbg_buff);
2092 			return rc;
2093 		}
2094 	}
2095 
2096 	/* PBT static entries */
2097 	/* static entries start when bit 6 is set */
2098 	addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
2099 	for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
2100 		rc = t4_cim_read(padap, addr + (i * 4), 1,
2101 				 &pbt->pbt_static[i]);
2102 		if (rc) {
2103 			cudbg_err->sys_err = rc;
2104 			cudbg_put_buff(&temp_buff, dbg_buff);
2105 			return rc;
2106 		}
2107 	}
2108 
2109 	/* LRF entries */
2110 	addr = CUDBG_CHAC_PBT_LRF;
2111 	for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
2112 		rc = t4_cim_read(padap, addr + (i * 4), 1,
2113 				 &pbt->lrf_table[i]);
2114 		if (rc) {
2115 			cudbg_err->sys_err = rc;
2116 			cudbg_put_buff(&temp_buff, dbg_buff);
2117 			return rc;
2118 		}
2119 	}
2120 
2121 	/* PBT data entries */
2122 	addr = CUDBG_CHAC_PBT_DATA;
2123 	for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
2124 		rc = t4_cim_read(padap, addr + (i * 4), 1,
2125 				 &pbt->pbt_data[i]);
2126 		if (rc) {
2127 			cudbg_err->sys_err = rc;
2128 			cudbg_put_buff(&temp_buff, dbg_buff);
2129 			return rc;
2130 		}
2131 	}
2132 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
2133 	return rc;
2134 }
2135 
2136 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
2137 			   struct cudbg_buffer *dbg_buff,
2138 			   struct cudbg_error *cudbg_err)
2139 {
2140 	struct adapter *padap = pdbg_init->adap;
2141 	struct cudbg_mbox_log *mboxlog = NULL;
2142 	struct cudbg_buffer temp_buff = { 0 };
2143 	struct mbox_cmd_log *log = NULL;
2144 	struct mbox_cmd *entry;
2145 	unsigned int entry_idx;
2146 	u16 mbox_cmds;
2147 	int i, k, rc;
2148 	u64 flit;
2149 	u32 size;
2150 
2151 	log = padap->mbox_log;
2152 	mbox_cmds = padap->mbox_log->size;
2153 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
2154 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
2155 	if (rc)
2156 		return rc;
2157 
2158 	mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
2159 	for (k = 0; k < mbox_cmds; k++) {
2160 		entry_idx = log->cursor + k;
2161 		if (entry_idx >= log->size)
2162 			entry_idx -= log->size;
2163 
2164 		entry = mbox_cmd_log_entry(log, entry_idx);
2165 		/* skip over unused entries */
2166 		if (entry->timestamp == 0)
2167 			continue;
2168 
2169 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
2170 		for (i = 0; i < MBOX_LEN / 8; i++) {
2171 			flit = entry->cmd[i];
2172 			mboxlog->hi[i] = (u32)(flit >> 32);
2173 			mboxlog->lo[i] = (u32)flit;
2174 		}
2175 		mboxlog++;
2176 	}
2177 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
2178 	return rc;
2179 }
2180 
2181 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
2182 			       struct cudbg_buffer *dbg_buff,
2183 			       struct cudbg_error *cudbg_err)
2184 {
2185 	struct adapter *padap = pdbg_init->adap;
2186 	struct cudbg_buffer temp_buff = { 0 };
2187 	struct ireg_buf *hma_indr;
2188 	int i, rc, n;
2189 	u32 size;
2190 
2191 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
2192 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
2193 
2194 	n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
2195 	size = sizeof(struct ireg_buf) * n;
2196 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
2197 	if (rc)
2198 		return rc;
2199 
2200 	hma_indr = (struct ireg_buf *)temp_buff.data;
2201 	for (i = 0; i < n; i++) {
2202 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
2203 		u32 *buff = hma_indr->outbuf;
2204 
2205 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
2206 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
2207 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
2208 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
2209 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
2210 				 buff, hma_fli->ireg_offset_range,
2211 				 hma_fli->ireg_local_offset);
2212 		hma_indr++;
2213 	}
2214 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
2215 	return rc;
2216 }
2217