1 /*
2  *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify it
5  *  under the terms and conditions of the GNU General Public License,
6  *  version 2, as published by the Free Software Foundation.
7  *
8  *  This program is distributed in the hope it will be useful, but WITHOUT
9  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  *  more details.
12  *
13  *  The full GNU General Public License is included in this distribution in
14  *  the file called "COPYING".
15  *
16  */
17 
18 #include <linux/sort.h>
19 
20 #include "t4_regs.h"
21 #include "cxgb4.h"
22 #include "cxgb4_cudbg.h"
23 #include "cudbg_if.h"
24 #include "cudbg_lib_common.h"
25 #include "cudbg_entity.h"
26 #include "cudbg_lib.h"
27 #include "cudbg_zlib.h"
28 
29 static int cudbg_do_compression(struct cudbg_init *pdbg_init,
30 				struct cudbg_buffer *pin_buff,
31 				struct cudbg_buffer *dbg_buff)
32 {
33 	struct cudbg_buffer temp_in_buff = { 0 };
34 	int bytes_left, bytes_read, bytes;
35 	u32 offset = dbg_buff->offset;
36 	int rc;
37 
38 	temp_in_buff.offset = pin_buff->offset;
39 	temp_in_buff.data = pin_buff->data;
40 	temp_in_buff.size = pin_buff->size;
41 
42 	bytes_left = pin_buff->size;
43 	bytes_read = 0;
44 	while (bytes_left > 0) {
45 		/* Do compression in smaller chunks */
46 		bytes = min_t(unsigned long, bytes_left,
47 			      (unsigned long)CUDBG_CHUNK_SIZE);
48 		temp_in_buff.data = (char *)pin_buff->data + bytes_read;
49 		temp_in_buff.size = bytes;
50 		rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff);
51 		if (rc)
52 			return rc;
53 		bytes_left -= bytes;
54 		bytes_read += bytes;
55 	}
56 
57 	pin_buff->size = dbg_buff->offset - offset;
58 	return 0;
59 }
60 
61 static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init,
62 					struct cudbg_buffer *pin_buff,
63 					struct cudbg_buffer *dbg_buff)
64 {
65 	int rc = 0;
66 
67 	if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) {
68 		cudbg_update_buff(pin_buff, dbg_buff);
69 	} else {
70 		rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff);
71 		if (rc)
72 			goto out;
73 	}
74 
75 out:
76 	cudbg_put_buff(pdbg_init, pin_buff);
77 	return rc;
78 }
79 
80 static int is_fw_attached(struct cudbg_init *pdbg_init)
81 {
82 	struct adapter *padap = pdbg_init->adap;
83 
84 	if (!(padap->flags & FW_OK) || padap->use_bd)
85 		return 0;
86 
87 	return 1;
88 }
89 
90 /* This function will add additional padding bytes into debug_buffer to make it
91  * 4 byte aligned.
92  */
93 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
94 			      struct cudbg_entity_hdr *entity_hdr)
95 {
96 	u8 zero_buf[4] = {0};
97 	u8 padding, remain;
98 
99 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
100 	padding = 4 - remain;
101 	if (remain) {
102 		memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
103 		       padding);
104 		dbg_buff->offset += padding;
105 		entity_hdr->num_pad = padding;
106 	}
107 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
108 }
109 
110 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
111 {
112 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
113 
114 	return (struct cudbg_entity_hdr *)
115 	       ((char *)outbuf + cudbg_hdr->hdr_len +
116 		(sizeof(struct cudbg_entity_hdr) * (i - 1)));
117 }
118 
119 static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
120 			      void *dest)
121 {
122 	int vaddr, rc;
123 
124 	vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE);
125 	if (vaddr < 0)
126 		return vaddr;
127 
128 	rc = pci_read_vpd(padap->pdev, vaddr, len, dest);
129 	if (rc < 0)
130 		return rc;
131 
132 	return 0;
133 }
134 
135 static int cudbg_mem_desc_cmp(const void *a, const void *b)
136 {
137 	return ((const struct cudbg_mem_desc *)a)->base -
138 	       ((const struct cudbg_mem_desc *)b)->base;
139 }
140 
141 int cudbg_fill_meminfo(struct adapter *padap,
142 		       struct cudbg_meminfo *meminfo_buff)
143 {
144 	struct cudbg_mem_desc *md;
145 	u32 lo, hi, used, alloc;
146 	int n, i;
147 
148 	memset(meminfo_buff->avail, 0,
149 	       ARRAY_SIZE(meminfo_buff->avail) *
150 	       sizeof(struct cudbg_mem_desc));
151 	memset(meminfo_buff->mem, 0,
152 	       (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
153 	md  = meminfo_buff->mem;
154 
155 	for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
156 		meminfo_buff->mem[i].limit = 0;
157 		meminfo_buff->mem[i].idx = i;
158 	}
159 
160 	/* Find and sort the populated memory ranges */
161 	i = 0;
162 	lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
163 	if (lo & EDRAM0_ENABLE_F) {
164 		hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
165 		meminfo_buff->avail[i].base =
166 			cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
167 		meminfo_buff->avail[i].limit =
168 			meminfo_buff->avail[i].base +
169 			cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
170 		meminfo_buff->avail[i].idx = 0;
171 		i++;
172 	}
173 
174 	if (lo & EDRAM1_ENABLE_F) {
175 		hi =  t4_read_reg(padap, MA_EDRAM1_BAR_A);
176 		meminfo_buff->avail[i].base =
177 			cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
178 		meminfo_buff->avail[i].limit =
179 			meminfo_buff->avail[i].base +
180 			cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
181 		meminfo_buff->avail[i].idx = 1;
182 		i++;
183 	}
184 
185 	if (is_t5(padap->params.chip)) {
186 		if (lo & EXT_MEM0_ENABLE_F) {
187 			hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
188 			meminfo_buff->avail[i].base =
189 				cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
190 			meminfo_buff->avail[i].limit =
191 				meminfo_buff->avail[i].base +
192 				cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
193 			meminfo_buff->avail[i].idx = 3;
194 			i++;
195 		}
196 
197 		if (lo & EXT_MEM1_ENABLE_F) {
198 			hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
199 			meminfo_buff->avail[i].base =
200 				cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
201 			meminfo_buff->avail[i].limit =
202 				meminfo_buff->avail[i].base +
203 				cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
204 			meminfo_buff->avail[i].idx = 4;
205 			i++;
206 		}
207 	} else {
208 		if (lo & EXT_MEM_ENABLE_F) {
209 			hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
210 			meminfo_buff->avail[i].base =
211 				cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
212 			meminfo_buff->avail[i].limit =
213 				meminfo_buff->avail[i].base +
214 				cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
215 			meminfo_buff->avail[i].idx = 2;
216 			i++;
217 		}
218 
219 		if (lo & HMA_MUX_F) {
220 			hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
221 			meminfo_buff->avail[i].base =
222 				cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
223 			meminfo_buff->avail[i].limit =
224 				meminfo_buff->avail[i].base +
225 				cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
226 			meminfo_buff->avail[i].idx = 5;
227 			i++;
228 		}
229 	}
230 
231 	if (!i) /* no memory available */
232 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
233 
234 	meminfo_buff->avail_c = i;
235 	sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
236 	     cudbg_mem_desc_cmp, NULL);
237 	(md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
238 	(md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
239 	(md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
240 	(md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
241 	(md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
242 	(md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
243 	(md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
244 	(md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
245 	(md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
246 
247 	/* the next few have explicit upper bounds */
248 	md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
249 	md->limit = md->base - 1 +
250 		    t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
251 		    PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
252 	md++;
253 
254 	md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
255 	md->limit = md->base - 1 +
256 		    t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
257 		    PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
258 	md++;
259 
260 	if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
261 		if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
262 			hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
263 			md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
264 		} else {
265 			hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
266 			md->base = t4_read_reg(padap,
267 					       LE_DB_HASH_TBL_BASE_ADDR_A);
268 		}
269 		md->limit = 0;
270 	} else {
271 		md->base = 0;
272 		md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
273 	}
274 	md++;
275 
276 #define ulp_region(reg) do { \
277 	md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
278 	(md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
279 } while (0)
280 
281 	ulp_region(RX_ISCSI);
282 	ulp_region(RX_TDDP);
283 	ulp_region(TX_TPT);
284 	ulp_region(RX_STAG);
285 	ulp_region(RX_RQ);
286 	ulp_region(RX_RQUDP);
287 	ulp_region(RX_PBL);
288 	ulp_region(TX_PBL);
289 #undef ulp_region
290 	md->base = 0;
291 	md->idx = ARRAY_SIZE(cudbg_region);
292 	if (!is_t4(padap->params.chip)) {
293 		u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
294 		u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
295 		u32 size = 0;
296 
297 		if (is_t5(padap->params.chip)) {
298 			if (sge_ctrl & VFIFO_ENABLE_F)
299 				size = DBVFIFO_SIZE_G(fifo_size);
300 		} else {
301 			size = T6_DBVFIFO_SIZE_G(fifo_size);
302 		}
303 
304 		if (size) {
305 			md->base = BASEADDR_G(t4_read_reg(padap,
306 							  SGE_DBVFIFO_BADDR_A));
307 			md->limit = md->base + (size << 2) - 1;
308 		}
309 	}
310 
311 	md++;
312 
313 	md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
314 	md->limit = 0;
315 	md++;
316 	md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
317 	md->limit = 0;
318 	md++;
319 
320 	md->base = padap->vres.ocq.start;
321 	if (padap->vres.ocq.size)
322 		md->limit = md->base + padap->vres.ocq.size - 1;
323 	else
324 		md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
325 	md++;
326 
327 	/* add any address-space holes, there can be up to 3 */
328 	for (n = 0; n < i - 1; n++)
329 		if (meminfo_buff->avail[n].limit <
330 		    meminfo_buff->avail[n + 1].base)
331 			(md++)->base = meminfo_buff->avail[n].limit;
332 
333 	if (meminfo_buff->avail[n].limit)
334 		(md++)->base = meminfo_buff->avail[n].limit;
335 
336 	n = md - meminfo_buff->mem;
337 	meminfo_buff->mem_c = n;
338 
339 	sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
340 	     cudbg_mem_desc_cmp, NULL);
341 
342 	lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
343 	hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
344 	meminfo_buff->up_ram_lo = lo;
345 	meminfo_buff->up_ram_hi = hi;
346 
347 	lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
348 	hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
349 	meminfo_buff->up_extmem2_lo = lo;
350 	meminfo_buff->up_extmem2_hi = hi;
351 
352 	lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
353 	for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++)
354 		meminfo_buff->free_rx_cnt +=
355 			FREERXPAGECOUNT_G(t4_read_reg(padap,
356 						      TP_FLM_FREE_RX_CNT_A));
357 
358 	meminfo_buff->rx_pages_data[0] =  PMRXMAXPAGE_G(lo);
359 	meminfo_buff->rx_pages_data[1] =
360 		t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
361 	meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
362 
363 	lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
364 	hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
365 	for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++)
366 		meminfo_buff->free_tx_cnt +=
367 			FREETXPAGECOUNT_G(t4_read_reg(padap,
368 						      TP_FLM_FREE_TX_CNT_A));
369 
370 	meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
371 	meminfo_buff->tx_pages_data[1] =
372 		hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
373 	meminfo_buff->tx_pages_data[2] =
374 		hi >= (1 << 20) ? 'M' : 'K';
375 	meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
376 
377 	meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
378 	meminfo_buff->p_structs_free_cnt =
379 		FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A));
380 
381 	for (i = 0; i < 4; i++) {
382 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
383 			lo = t4_read_reg(padap,
384 					 MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
385 		else
386 			lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
387 		if (is_t5(padap->params.chip)) {
388 			used = T5_USED_G(lo);
389 			alloc = T5_ALLOC_G(lo);
390 		} else {
391 			used = USED_G(lo);
392 			alloc = ALLOC_G(lo);
393 		}
394 		meminfo_buff->port_used[i] = used;
395 		meminfo_buff->port_alloc[i] = alloc;
396 	}
397 
398 	for (i = 0; i < padap->params.arch.nchan; i++) {
399 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
400 			lo = t4_read_reg(padap,
401 					 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
402 		else
403 			lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
404 		if (is_t5(padap->params.chip)) {
405 			used = T5_USED_G(lo);
406 			alloc = T5_ALLOC_G(lo);
407 		} else {
408 			used = USED_G(lo);
409 			alloc = ALLOC_G(lo);
410 		}
411 		meminfo_buff->loopback_used[i] = used;
412 		meminfo_buff->loopback_alloc[i] = alloc;
413 	}
414 
415 	return 0;
416 }
417 
418 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
419 			   struct cudbg_buffer *dbg_buff,
420 			   struct cudbg_error *cudbg_err)
421 {
422 	struct adapter *padap = pdbg_init->adap;
423 	struct cudbg_buffer temp_buff = { 0 };
424 	u32 buf_size = 0;
425 	int rc = 0;
426 
427 	if (is_t4(padap->params.chip))
428 		buf_size = T4_REGMAP_SIZE;
429 	else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
430 		buf_size = T5_REGMAP_SIZE;
431 
432 	rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff);
433 	if (rc)
434 		return rc;
435 	t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
436 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
437 }
438 
439 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
440 			    struct cudbg_buffer *dbg_buff,
441 			    struct cudbg_error *cudbg_err)
442 {
443 	struct adapter *padap = pdbg_init->adap;
444 	struct cudbg_buffer temp_buff = { 0 };
445 	struct devlog_params *dparams;
446 	int rc = 0;
447 
448 	rc = t4_init_devlog_params(padap);
449 	if (rc < 0) {
450 		cudbg_err->sys_err = rc;
451 		return rc;
452 	}
453 
454 	dparams = &padap->params.devlog;
455 	rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff);
456 	if (rc)
457 		return rc;
458 
459 	/* Collect FW devlog */
460 	if (dparams->start != 0) {
461 		spin_lock(&padap->win0_lock);
462 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
463 				  dparams->memtype, dparams->start,
464 				  dparams->size,
465 				  (__be32 *)(char *)temp_buff.data,
466 				  1);
467 		spin_unlock(&padap->win0_lock);
468 		if (rc) {
469 			cudbg_err->sys_err = rc;
470 			cudbg_put_buff(pdbg_init, &temp_buff);
471 			return rc;
472 		}
473 	}
474 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
475 }
476 
477 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
478 			 struct cudbg_buffer *dbg_buff,
479 			 struct cudbg_error *cudbg_err)
480 {
481 	struct adapter *padap = pdbg_init->adap;
482 	struct cudbg_buffer temp_buff = { 0 };
483 	int size, rc;
484 	u32 cfg = 0;
485 
486 	if (is_t6(padap->params.chip)) {
487 		size = padap->params.cim_la_size / 10 + 1;
488 		size *= 10 * sizeof(u32);
489 	} else {
490 		size = padap->params.cim_la_size / 8;
491 		size *= 8 * sizeof(u32);
492 	}
493 
494 	size += sizeof(cfg);
495 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
496 	if (rc)
497 		return rc;
498 
499 	rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
500 	if (rc) {
501 		cudbg_err->sys_err = rc;
502 		cudbg_put_buff(pdbg_init, &temp_buff);
503 		return rc;
504 	}
505 
506 	memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
507 	rc = t4_cim_read_la(padap,
508 			    (u32 *)((char *)temp_buff.data + sizeof(cfg)),
509 			    NULL);
510 	if (rc < 0) {
511 		cudbg_err->sys_err = rc;
512 		cudbg_put_buff(pdbg_init, &temp_buff);
513 		return rc;
514 	}
515 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
516 }
517 
518 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
519 			    struct cudbg_buffer *dbg_buff,
520 			    struct cudbg_error *cudbg_err)
521 {
522 	struct adapter *padap = pdbg_init->adap;
523 	struct cudbg_buffer temp_buff = { 0 };
524 	int size, rc;
525 
526 	size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
527 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
528 	if (rc)
529 		return rc;
530 
531 	t4_cim_read_ma_la(padap,
532 			  (u32 *)temp_buff.data,
533 			  (u32 *)((char *)temp_buff.data +
534 				  5 * CIM_MALA_SIZE));
535 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
536 }
537 
538 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
539 			   struct cudbg_buffer *dbg_buff,
540 			   struct cudbg_error *cudbg_err)
541 {
542 	struct adapter *padap = pdbg_init->adap;
543 	struct cudbg_buffer temp_buff = { 0 };
544 	struct cudbg_cim_qcfg *cim_qcfg_data;
545 	int rc;
546 
547 	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg),
548 			    &temp_buff);
549 	if (rc)
550 		return rc;
551 
552 	cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
553 	cim_qcfg_data->chip = padap->params.chip;
554 	rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
555 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
556 	if (rc) {
557 		cudbg_err->sys_err = rc;
558 		cudbg_put_buff(pdbg_init, &temp_buff);
559 		return rc;
560 	}
561 
562 	rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
563 			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
564 			 cim_qcfg_data->obq_wr);
565 	if (rc) {
566 		cudbg_err->sys_err = rc;
567 		cudbg_put_buff(pdbg_init, &temp_buff);
568 		return rc;
569 	}
570 
571 	t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
572 			 cim_qcfg_data->thres);
573 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
574 }
575 
576 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
577 			      struct cudbg_buffer *dbg_buff,
578 			      struct cudbg_error *cudbg_err, int qid)
579 {
580 	struct adapter *padap = pdbg_init->adap;
581 	struct cudbg_buffer temp_buff = { 0 };
582 	int no_of_read_words, rc = 0;
583 	u32 qsize;
584 
585 	/* collect CIM IBQ */
586 	qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
587 	rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
588 	if (rc)
589 		return rc;
590 
591 	/* t4_read_cim_ibq will return no. of read words or error */
592 	no_of_read_words = t4_read_cim_ibq(padap, qid,
593 					   (u32 *)temp_buff.data, qsize);
594 	/* no_of_read_words is less than or equal to 0 means error */
595 	if (no_of_read_words <= 0) {
596 		if (!no_of_read_words)
597 			rc = CUDBG_SYSTEM_ERROR;
598 		else
599 			rc = no_of_read_words;
600 		cudbg_err->sys_err = rc;
601 		cudbg_put_buff(pdbg_init, &temp_buff);
602 		return rc;
603 	}
604 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
605 }
606 
607 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
608 			      struct cudbg_buffer *dbg_buff,
609 			      struct cudbg_error *cudbg_err)
610 {
611 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
612 }
613 
614 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
615 			      struct cudbg_buffer *dbg_buff,
616 			      struct cudbg_error *cudbg_err)
617 {
618 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
619 }
620 
621 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
622 			      struct cudbg_buffer *dbg_buff,
623 			      struct cudbg_error *cudbg_err)
624 {
625 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
626 }
627 
628 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
629 			       struct cudbg_buffer *dbg_buff,
630 			       struct cudbg_error *cudbg_err)
631 {
632 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
633 }
634 
635 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
636 			       struct cudbg_buffer *dbg_buff,
637 			       struct cudbg_error *cudbg_err)
638 {
639 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
640 }
641 
642 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
643 			       struct cudbg_buffer *dbg_buff,
644 			       struct cudbg_error *cudbg_err)
645 {
646 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
647 }
648 
649 u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
650 {
651 	u32 value;
652 
653 	t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
654 		     QUENUMSELECT_V(qid));
655 	value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
656 	value = CIMQSIZE_G(value) * 64; /* size in number of words */
657 	return value * sizeof(u32);
658 }
659 
660 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
661 			      struct cudbg_buffer *dbg_buff,
662 			      struct cudbg_error *cudbg_err, int qid)
663 {
664 	struct adapter *padap = pdbg_init->adap;
665 	struct cudbg_buffer temp_buff = { 0 };
666 	int no_of_read_words, rc = 0;
667 	u32 qsize;
668 
669 	/* collect CIM OBQ */
670 	qsize =  cudbg_cim_obq_size(padap, qid);
671 	rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
672 	if (rc)
673 		return rc;
674 
675 	/* t4_read_cim_obq will return no. of read words or error */
676 	no_of_read_words = t4_read_cim_obq(padap, qid,
677 					   (u32 *)temp_buff.data, qsize);
678 	/* no_of_read_words is less than or equal to 0 means error */
679 	if (no_of_read_words <= 0) {
680 		if (!no_of_read_words)
681 			rc = CUDBG_SYSTEM_ERROR;
682 		else
683 			rc = no_of_read_words;
684 		cudbg_err->sys_err = rc;
685 		cudbg_put_buff(pdbg_init, &temp_buff);
686 		return rc;
687 	}
688 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
689 }
690 
691 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
692 			       struct cudbg_buffer *dbg_buff,
693 			       struct cudbg_error *cudbg_err)
694 {
695 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
696 }
697 
698 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
699 			       struct cudbg_buffer *dbg_buff,
700 			       struct cudbg_error *cudbg_err)
701 {
702 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
703 }
704 
705 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
706 			       struct cudbg_buffer *dbg_buff,
707 			       struct cudbg_error *cudbg_err)
708 {
709 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
710 }
711 
712 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
713 			       struct cudbg_buffer *dbg_buff,
714 			       struct cudbg_error *cudbg_err)
715 {
716 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
717 }
718 
719 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
720 			      struct cudbg_buffer *dbg_buff,
721 			      struct cudbg_error *cudbg_err)
722 {
723 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
724 }
725 
726 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
727 			       struct cudbg_buffer *dbg_buff,
728 			       struct cudbg_error *cudbg_err)
729 {
730 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
731 }
732 
733 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
734 				struct cudbg_buffer *dbg_buff,
735 				struct cudbg_error *cudbg_err)
736 {
737 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
738 }
739 
740 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
741 				struct cudbg_buffer *dbg_buff,
742 				struct cudbg_error *cudbg_err)
743 {
744 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
745 }
746 
747 static int cudbg_meminfo_get_mem_index(struct adapter *padap,
748 				       struct cudbg_meminfo *mem_info,
749 				       u8 mem_type, u8 *idx)
750 {
751 	u8 i, flag;
752 
753 	switch (mem_type) {
754 	case MEM_EDC0:
755 		flag = EDC0_FLAG;
756 		break;
757 	case MEM_EDC1:
758 		flag = EDC1_FLAG;
759 		break;
760 	case MEM_MC0:
761 		/* Some T5 cards have both MC0 and MC1. */
762 		flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG;
763 		break;
764 	case MEM_MC1:
765 		flag = MC1_FLAG;
766 		break;
767 	case MEM_HMA:
768 		flag = HMA_FLAG;
769 		break;
770 	default:
771 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
772 	}
773 
774 	for (i = 0; i < mem_info->avail_c; i++) {
775 		if (mem_info->avail[i].idx == flag) {
776 			*idx = i;
777 			return 0;
778 		}
779 	}
780 
781 	return CUDBG_STATUS_ENTITY_NOT_FOUND;
782 }
783 
784 /* Fetch the @region_name's start and end from @meminfo. */
785 static int cudbg_get_mem_region(struct adapter *padap,
786 				struct cudbg_meminfo *meminfo,
787 				u8 mem_type, const char *region_name,
788 				struct cudbg_mem_desc *mem_desc)
789 {
790 	u8 mc, found = 0;
791 	u32 i, idx = 0;
792 	int rc;
793 
794 	rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
795 	if (rc)
796 		return rc;
797 
798 	for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
799 		if (!strcmp(cudbg_region[i], region_name)) {
800 			found = 1;
801 			idx = i;
802 			break;
803 		}
804 	}
805 	if (!found)
806 		return -EINVAL;
807 
808 	found = 0;
809 	for (i = 0; i < meminfo->mem_c; i++) {
810 		if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
811 			continue; /* Skip holes */
812 
813 		if (!(meminfo->mem[i].limit))
814 			meminfo->mem[i].limit =
815 				i < meminfo->mem_c - 1 ?
816 				meminfo->mem[i + 1].base - 1 : ~0;
817 
818 		if (meminfo->mem[i].idx == idx) {
819 			/* Check if the region exists in @mem_type memory */
820 			if (meminfo->mem[i].base < meminfo->avail[mc].base &&
821 			    meminfo->mem[i].limit < meminfo->avail[mc].base)
822 				return -EINVAL;
823 
824 			if (meminfo->mem[i].base > meminfo->avail[mc].limit)
825 				return -EINVAL;
826 
827 			memcpy(mem_desc, &meminfo->mem[i],
828 			       sizeof(struct cudbg_mem_desc));
829 			found = 1;
830 			break;
831 		}
832 	}
833 	if (!found)
834 		return -EINVAL;
835 
836 	return 0;
837 }
838 
839 /* Fetch and update the start and end of the requested memory region w.r.t 0
840  * in the corresponding EDC/MC/HMA.
841  */
842 static int cudbg_get_mem_relative(struct adapter *padap,
843 				  struct cudbg_meminfo *meminfo,
844 				  u8 mem_type, u32 *out_base, u32 *out_end)
845 {
846 	u8 mc_idx;
847 	int rc;
848 
849 	rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx);
850 	if (rc)
851 		return rc;
852 
853 	if (*out_base < meminfo->avail[mc_idx].base)
854 		*out_base = 0;
855 	else
856 		*out_base -= meminfo->avail[mc_idx].base;
857 
858 	if (*out_end > meminfo->avail[mc_idx].limit)
859 		*out_end = meminfo->avail[mc_idx].limit;
860 	else
861 		*out_end -= meminfo->avail[mc_idx].base;
862 
863 	return 0;
864 }
865 
866 /* Get TX and RX Payload region */
867 static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type,
868 				   const char *region_name,
869 				   struct cudbg_region_info *payload)
870 {
871 	struct cudbg_mem_desc mem_desc = { 0 };
872 	struct cudbg_meminfo meminfo;
873 	int rc;
874 
875 	rc = cudbg_fill_meminfo(padap, &meminfo);
876 	if (rc)
877 		return rc;
878 
879 	rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name,
880 				  &mem_desc);
881 	if (rc) {
882 		payload->exist = false;
883 		return 0;
884 	}
885 
886 	payload->exist = true;
887 	payload->start = mem_desc.base;
888 	payload->end = mem_desc.limit;
889 
890 	return cudbg_get_mem_relative(padap, &meminfo, mem_type,
891 				      &payload->start, &payload->end);
892 }
893 
894 static int cudbg_memory_read(struct cudbg_init *pdbg_init, int win,
895 			     int mtype, u32 addr, u32 len, void *hbuf)
896 {
897 	u32 win_pf, memoffset, mem_aperture, mem_base;
898 	struct adapter *adap = pdbg_init->adap;
899 	u32 pos, offset, resid;
900 	u32 *res_buf;
901 	u64 *buf;
902 	int ret;
903 
904 	/* Argument sanity checks ...
905 	 */
906 	if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
907 		return -EINVAL;
908 
909 	buf = (u64 *)hbuf;
910 
911 	/* Try to do 64-bit reads.  Residual will be handled later. */
912 	resid = len & 0x7;
913 	len -= resid;
914 
915 	ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
916 				&mem_aperture);
917 	if (ret)
918 		return ret;
919 
920 	addr = addr + memoffset;
921 	win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
922 
923 	pos = addr & ~(mem_aperture - 1);
924 	offset = addr - pos;
925 
926 	/* Set up initial PCI-E Memory Window to cover the start of our
927 	 * transfer.
928 	 */
929 	t4_memory_update_win(adap, win, pos | win_pf);
930 
931 	/* Transfer data from the adapter */
932 	while (len > 0) {
933 		*buf++ = le64_to_cpu((__force __le64)
934 				     t4_read_reg64(adap, mem_base + offset));
935 		offset += sizeof(u64);
936 		len -= sizeof(u64);
937 
938 		/* If we've reached the end of our current window aperture,
939 		 * move the PCI-E Memory Window on to the next.
940 		 */
941 		if (offset == mem_aperture) {
942 			pos += mem_aperture;
943 			offset = 0;
944 			t4_memory_update_win(adap, win, pos | win_pf);
945 		}
946 	}
947 
948 	res_buf = (u32 *)buf;
949 	/* Read residual in 32-bit multiples */
950 	while (resid > sizeof(u32)) {
951 		*res_buf++ = le32_to_cpu((__force __le32)
952 					 t4_read_reg(adap, mem_base + offset));
953 		offset += sizeof(u32);
954 		resid -= sizeof(u32);
955 
956 		/* If we've reached the end of our current window aperture,
957 		 * move the PCI-E Memory Window on to the next.
958 		 */
959 		if (offset == mem_aperture) {
960 			pos += mem_aperture;
961 			offset = 0;
962 			t4_memory_update_win(adap, win, pos | win_pf);
963 		}
964 	}
965 
966 	/* Transfer residual < 32-bits */
967 	if (resid)
968 		t4_memory_rw_residual(adap, resid, mem_base + offset,
969 				      (u8 *)res_buf, T4_MEMORY_READ);
970 
971 	return 0;
972 }
973 
974 #define CUDBG_YIELD_ITERATION 256
975 
976 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
977 			     struct cudbg_buffer *dbg_buff, u8 mem_type,
978 			     unsigned long tot_len,
979 			     struct cudbg_error *cudbg_err)
980 {
981 	static const char * const region_name[] = { "Tx payload:",
982 						    "Rx payload:" };
983 	unsigned long bytes, bytes_left, bytes_read = 0;
984 	struct adapter *padap = pdbg_init->adap;
985 	struct cudbg_buffer temp_buff = { 0 };
986 	struct cudbg_region_info payload[2];
987 	u32 yield_count = 0;
988 	int rc = 0;
989 	u8 i;
990 
991 	/* Get TX/RX Payload region range if they exist */
992 	memset(payload, 0, sizeof(payload));
993 	for (i = 0; i < ARRAY_SIZE(region_name); i++) {
994 		rc = cudbg_get_payload_range(padap, mem_type, region_name[i],
995 					     &payload[i]);
996 		if (rc)
997 			return rc;
998 
999 		if (payload[i].exist) {
1000 			/* Align start and end to avoid wrap around */
1001 			payload[i].start = roundup(payload[i].start,
1002 						   CUDBG_CHUNK_SIZE);
1003 			payload[i].end = rounddown(payload[i].end,
1004 						   CUDBG_CHUNK_SIZE);
1005 		}
1006 	}
1007 
1008 	bytes_left = tot_len;
1009 	while (bytes_left > 0) {
1010 		/* As MC size is huge and read through PIO access, this
1011 		 * loop will hold cpu for a longer time. OS may think that
1012 		 * the process is hanged and will generate CPU stall traces.
1013 		 * So yield the cpu regularly.
1014 		 */
1015 		yield_count++;
1016 		if (!(yield_count % CUDBG_YIELD_ITERATION))
1017 			schedule();
1018 
1019 		bytes = min_t(unsigned long, bytes_left,
1020 			      (unsigned long)CUDBG_CHUNK_SIZE);
1021 		rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff);
1022 		if (rc)
1023 			return rc;
1024 
1025 		for (i = 0; i < ARRAY_SIZE(payload); i++)
1026 			if (payload[i].exist &&
1027 			    bytes_read >= payload[i].start &&
1028 			    bytes_read + bytes <= payload[i].end)
1029 				/* TX and RX Payload regions can't overlap */
1030 				goto skip_read;
1031 
1032 		spin_lock(&padap->win0_lock);
1033 		rc = cudbg_memory_read(pdbg_init, MEMWIN_NIC, mem_type,
1034 				       bytes_read, bytes, temp_buff.data);
1035 		spin_unlock(&padap->win0_lock);
1036 		if (rc) {
1037 			cudbg_err->sys_err = rc;
1038 			cudbg_put_buff(pdbg_init, &temp_buff);
1039 			return rc;
1040 		}
1041 
1042 skip_read:
1043 		bytes_left -= bytes;
1044 		bytes_read += bytes;
1045 		rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
1046 						  dbg_buff);
1047 		if (rc) {
1048 			cudbg_put_buff(pdbg_init, &temp_buff);
1049 			return rc;
1050 		}
1051 	}
1052 	return rc;
1053 }
1054 
1055 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
1056 			     struct cudbg_error *cudbg_err)
1057 {
1058 	struct adapter *padap = pdbg_init->adap;
1059 	int rc;
1060 
1061 	if (is_fw_attached(pdbg_init)) {
1062 		/* Flush uP dcache before reading edcX/mcX  */
1063 		rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
1064 		if (rc)
1065 			cudbg_err->sys_warn = rc;
1066 	}
1067 }
1068 
1069 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
1070 				    struct cudbg_buffer *dbg_buff,
1071 				    struct cudbg_error *cudbg_err,
1072 				    u8 mem_type)
1073 {
1074 	struct adapter *padap = pdbg_init->adap;
1075 	struct cudbg_meminfo mem_info;
1076 	unsigned long size;
1077 	u8 mc_idx;
1078 	int rc;
1079 
1080 	memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
1081 	rc = cudbg_fill_meminfo(padap, &mem_info);
1082 	if (rc)
1083 		return rc;
1084 
1085 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
1086 	rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
1087 	if (rc)
1088 		return rc;
1089 
1090 	size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
1091 	return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
1092 				 cudbg_err);
1093 }
1094 
1095 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
1096 			       struct cudbg_buffer *dbg_buff,
1097 			       struct cudbg_error *cudbg_err)
1098 {
1099 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1100 					MEM_EDC0);
1101 }
1102 
1103 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
1104 			       struct cudbg_buffer *dbg_buff,
1105 			       struct cudbg_error *cudbg_err)
1106 {
1107 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1108 					MEM_EDC1);
1109 }
1110 
1111 int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
1112 			      struct cudbg_buffer *dbg_buff,
1113 			      struct cudbg_error *cudbg_err)
1114 {
1115 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1116 					MEM_MC0);
1117 }
1118 
1119 int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
1120 			      struct cudbg_buffer *dbg_buff,
1121 			      struct cudbg_error *cudbg_err)
1122 {
1123 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1124 					MEM_MC1);
1125 }
1126 
1127 int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
1128 			      struct cudbg_buffer *dbg_buff,
1129 			      struct cudbg_error *cudbg_err)
1130 {
1131 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1132 					MEM_HMA);
1133 }
1134 
1135 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
1136 		      struct cudbg_buffer *dbg_buff,
1137 		      struct cudbg_error *cudbg_err)
1138 {
1139 	struct adapter *padap = pdbg_init->adap;
1140 	struct cudbg_buffer temp_buff = { 0 };
1141 	int rc, nentries;
1142 
1143 	nentries = t4_chip_rss_size(padap);
1144 	rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16),
1145 			    &temp_buff);
1146 	if (rc)
1147 		return rc;
1148 
1149 	rc = t4_read_rss(padap, (u16 *)temp_buff.data);
1150 	if (rc) {
1151 		cudbg_err->sys_err = rc;
1152 		cudbg_put_buff(pdbg_init, &temp_buff);
1153 		return rc;
1154 	}
1155 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1156 }
1157 
1158 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
1159 				struct cudbg_buffer *dbg_buff,
1160 				struct cudbg_error *cudbg_err)
1161 {
1162 	struct adapter *padap = pdbg_init->adap;
1163 	struct cudbg_buffer temp_buff = { 0 };
1164 	struct cudbg_rss_vf_conf *vfconf;
1165 	int vf, rc, vf_count;
1166 
1167 	vf_count = padap->params.arch.vfcount;
1168 	rc = cudbg_get_buff(pdbg_init, dbg_buff,
1169 			    vf_count * sizeof(struct cudbg_rss_vf_conf),
1170 			    &temp_buff);
1171 	if (rc)
1172 		return rc;
1173 
1174 	vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
1175 	for (vf = 0; vf < vf_count; vf++)
1176 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1177 				      &vfconf[vf].rss_vf_vfh, true);
1178 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1179 }
1180 
1181 int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
1182 			   struct cudbg_buffer *dbg_buff,
1183 			   struct cudbg_error *cudbg_err)
1184 {
1185 	struct adapter *padap = pdbg_init->adap;
1186 	struct cudbg_buffer temp_buff = { 0 };
1187 	int rc;
1188 
1189 	rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16),
1190 			    &temp_buff);
1191 	if (rc)
1192 		return rc;
1193 
1194 	t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
1195 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1196 }
1197 
1198 int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
1199 			   struct cudbg_buffer *dbg_buff,
1200 			   struct cudbg_error *cudbg_err)
1201 {
1202 	struct adapter *padap = pdbg_init->adap;
1203 	struct cudbg_buffer temp_buff = { 0 };
1204 	struct cudbg_pm_stats *pm_stats_buff;
1205 	int rc;
1206 
1207 	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats),
1208 			    &temp_buff);
1209 	if (rc)
1210 		return rc;
1211 
1212 	pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
1213 	t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1214 	t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1215 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1216 }
1217 
1218 int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
1219 			   struct cudbg_buffer *dbg_buff,
1220 			   struct cudbg_error *cudbg_err)
1221 {
1222 	struct adapter *padap = pdbg_init->adap;
1223 	struct cudbg_buffer temp_buff = { 0 };
1224 	struct cudbg_hw_sched *hw_sched_buff;
1225 	int i, rc = 0;
1226 
1227 	if (!padap->params.vpd.cclk)
1228 		return CUDBG_STATUS_CCLK_NOT_DEFINED;
1229 
1230 	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched),
1231 			    &temp_buff);
1232 	hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
1233 	hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
1234 	hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
1235 	t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1236 	for (i = 0; i < NTX_SCHED; ++i)
1237 		t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1238 				&hw_sched_buff->ipg[i], true);
1239 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1240 }
1241 
1242 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
1243 			      struct cudbg_buffer *dbg_buff,
1244 			      struct cudbg_error *cudbg_err)
1245 {
1246 	struct adapter *padap = pdbg_init->adap;
1247 	struct cudbg_buffer temp_buff = { 0 };
1248 	struct ireg_buf *ch_tp_pio;
1249 	int i, rc, n = 0;
1250 	u32 size;
1251 
1252 	if (is_t5(padap->params.chip))
1253 		n = sizeof(t5_tp_pio_array) +
1254 		    sizeof(t5_tp_tm_pio_array) +
1255 		    sizeof(t5_tp_mib_index_array);
1256 	else
1257 		n = sizeof(t6_tp_pio_array) +
1258 		    sizeof(t6_tp_tm_pio_array) +
1259 		    sizeof(t6_tp_mib_index_array);
1260 
1261 	n = n / (IREG_NUM_ELEM * sizeof(u32));
1262 	size = sizeof(struct ireg_buf) * n;
1263 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1264 	if (rc)
1265 		return rc;
1266 
1267 	ch_tp_pio = (struct ireg_buf *)temp_buff.data;
1268 
1269 	/* TP_PIO */
1270 	if (is_t5(padap->params.chip))
1271 		n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1272 	else if (is_t6(padap->params.chip))
1273 		n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1274 
1275 	for (i = 0; i < n; i++) {
1276 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1277 		u32 *buff = ch_tp_pio->outbuf;
1278 
1279 		if (is_t5(padap->params.chip)) {
1280 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
1281 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
1282 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
1283 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
1284 		} else if (is_t6(padap->params.chip)) {
1285 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
1286 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
1287 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
1288 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
1289 		}
1290 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
1291 			       tp_pio->ireg_local_offset, true);
1292 		ch_tp_pio++;
1293 	}
1294 
1295 	/* TP_TM_PIO */
1296 	if (is_t5(padap->params.chip))
1297 		n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1298 	else if (is_t6(padap->params.chip))
1299 		n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1300 
1301 	for (i = 0; i < n; i++) {
1302 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1303 		u32 *buff = ch_tp_pio->outbuf;
1304 
1305 		if (is_t5(padap->params.chip)) {
1306 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
1307 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
1308 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
1309 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
1310 		} else if (is_t6(padap->params.chip)) {
1311 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
1312 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
1313 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
1314 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
1315 		}
1316 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
1317 				  tp_pio->ireg_local_offset, true);
1318 		ch_tp_pio++;
1319 	}
1320 
1321 	/* TP_MIB_INDEX */
1322 	if (is_t5(padap->params.chip))
1323 		n = sizeof(t5_tp_mib_index_array) /
1324 		    (IREG_NUM_ELEM * sizeof(u32));
1325 	else if (is_t6(padap->params.chip))
1326 		n = sizeof(t6_tp_mib_index_array) /
1327 		    (IREG_NUM_ELEM * sizeof(u32));
1328 
1329 	for (i = 0; i < n ; i++) {
1330 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1331 		u32 *buff = ch_tp_pio->outbuf;
1332 
1333 		if (is_t5(padap->params.chip)) {
1334 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
1335 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
1336 			tp_pio->ireg_local_offset =
1337 				t5_tp_mib_index_array[i][2];
1338 			tp_pio->ireg_offset_range =
1339 				t5_tp_mib_index_array[i][3];
1340 		} else if (is_t6(padap->params.chip)) {
1341 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
1342 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
1343 			tp_pio->ireg_local_offset =
1344 				t6_tp_mib_index_array[i][2];
1345 			tp_pio->ireg_offset_range =
1346 				t6_tp_mib_index_array[i][3];
1347 		}
1348 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
1349 			       tp_pio->ireg_local_offset, true);
1350 		ch_tp_pio++;
1351 	}
1352 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1353 }
1354 
1355 static void cudbg_read_sge_qbase_indirect_reg(struct adapter *padap,
1356 					      struct sge_qbase_reg_field *qbase,
1357 					      u32 func, bool is_pf)
1358 {
1359 	u32 *buff, i;
1360 
1361 	if (is_pf) {
1362 		buff = qbase->pf_data_value[func];
1363 	} else {
1364 		buff = qbase->vf_data_value[func];
1365 		/* In SGE_QBASE_INDEX,
1366 		 * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256.
1367 		 */
1368 		func += 8;
1369 	}
1370 
1371 	t4_write_reg(padap, qbase->reg_addr, func);
1372 	for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++, buff++)
1373 		*buff = t4_read_reg(padap, qbase->reg_data[i]);
1374 }
1375 
1376 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
1377 			       struct cudbg_buffer *dbg_buff,
1378 			       struct cudbg_error *cudbg_err)
1379 {
1380 	struct adapter *padap = pdbg_init->adap;
1381 	struct cudbg_buffer temp_buff = { 0 };
1382 	struct sge_qbase_reg_field *sge_qbase;
1383 	struct ireg_buf *ch_sge_dbg;
1384 	int i, rc;
1385 
1386 	rc = cudbg_get_buff(pdbg_init, dbg_buff,
1387 			    sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
1388 			    &temp_buff);
1389 	if (rc)
1390 		return rc;
1391 
1392 	ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
1393 	for (i = 0; i < 2; i++) {
1394 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
1395 		u32 *buff = ch_sge_dbg->outbuf;
1396 
1397 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
1398 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
1399 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
1400 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
1401 		t4_read_indirect(padap,
1402 				 sge_pio->ireg_addr,
1403 				 sge_pio->ireg_data,
1404 				 buff,
1405 				 sge_pio->ireg_offset_range,
1406 				 sge_pio->ireg_local_offset);
1407 		ch_sge_dbg++;
1408 	}
1409 
1410 	if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
1411 		sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
1412 		/* 1 addr reg SGE_QBASE_INDEX and 4 data reg
1413 		 * SGE_QBASE_MAP[0-3]
1414 		 */
1415 		sge_qbase->reg_addr = t6_sge_qbase_index_array[0];
1416 		for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++)
1417 			sge_qbase->reg_data[i] =
1418 				t6_sge_qbase_index_array[i + 1];
1419 
1420 		for (i = 0; i <= PCIE_FW_MASTER_M; i++)
1421 			cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
1422 							  i, true);
1423 
1424 		for (i = 0; i < padap->params.arch.vfcount; i++)
1425 			cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
1426 							  i, false);
1427 
1428 		sge_qbase->vfcount = padap->params.arch.vfcount;
1429 	}
1430 
1431 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1432 }
1433 
1434 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
1435 			   struct cudbg_buffer *dbg_buff,
1436 			   struct cudbg_error *cudbg_err)
1437 {
1438 	struct adapter *padap = pdbg_init->adap;
1439 	struct cudbg_buffer temp_buff = { 0 };
1440 	struct cudbg_ulprx_la *ulprx_la_buff;
1441 	int rc;
1442 
1443 	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la),
1444 			    &temp_buff);
1445 	if (rc)
1446 		return rc;
1447 
1448 	ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
1449 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
1450 	ulprx_la_buff->size = ULPRX_LA_SIZE;
1451 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1452 }
1453 
1454 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
1455 			struct cudbg_buffer *dbg_buff,
1456 			struct cudbg_error *cudbg_err)
1457 {
1458 	struct adapter *padap = pdbg_init->adap;
1459 	struct cudbg_buffer temp_buff = { 0 };
1460 	struct cudbg_tp_la *tp_la_buff;
1461 	int size, rc;
1462 
1463 	size = sizeof(struct cudbg_tp_la) + TPLA_SIZE *  sizeof(u64);
1464 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1465 	if (rc)
1466 		return rc;
1467 
1468 	tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
1469 	tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
1470 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1471 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1472 }
1473 
1474 int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
1475 			  struct cudbg_buffer *dbg_buff,
1476 			  struct cudbg_error *cudbg_err)
1477 {
1478 	struct adapter *padap = pdbg_init->adap;
1479 	struct cudbg_buffer temp_buff = { 0 };
1480 	struct cudbg_meminfo *meminfo_buff;
1481 	struct cudbg_ver_hdr *ver_hdr;
1482 	int rc;
1483 
1484 	rc = cudbg_get_buff(pdbg_init, dbg_buff,
1485 			    sizeof(struct cudbg_ver_hdr) +
1486 			    sizeof(struct cudbg_meminfo),
1487 			    &temp_buff);
1488 	if (rc)
1489 		return rc;
1490 
1491 	ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
1492 	ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
1493 	ver_hdr->revision = CUDBG_MEMINFO_REV;
1494 	ver_hdr->size = sizeof(struct cudbg_meminfo);
1495 
1496 	meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data +
1497 						sizeof(*ver_hdr));
1498 	rc = cudbg_fill_meminfo(padap, meminfo_buff);
1499 	if (rc) {
1500 		cudbg_err->sys_err = rc;
1501 		cudbg_put_buff(pdbg_init, &temp_buff);
1502 		return rc;
1503 	}
1504 
1505 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1506 }
1507 
1508 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
1509 			     struct cudbg_buffer *dbg_buff,
1510 			     struct cudbg_error *cudbg_err)
1511 {
1512 	struct cudbg_cim_pif_la *cim_pif_la_buff;
1513 	struct adapter *padap = pdbg_init->adap;
1514 	struct cudbg_buffer temp_buff = { 0 };
1515 	int size, rc;
1516 
1517 	size = sizeof(struct cudbg_cim_pif_la) +
1518 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1519 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1520 	if (rc)
1521 		return rc;
1522 
1523 	cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
1524 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1525 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1526 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1527 			   NULL, NULL);
1528 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1529 }
1530 
1531 int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
1532 			   struct cudbg_buffer *dbg_buff,
1533 			   struct cudbg_error *cudbg_err)
1534 {
1535 	struct adapter *padap = pdbg_init->adap;
1536 	struct cudbg_buffer temp_buff = { 0 };
1537 	struct cudbg_clk_info *clk_info_buff;
1538 	u64 tp_tick_us;
1539 	int rc;
1540 
1541 	if (!padap->params.vpd.cclk)
1542 		return CUDBG_STATUS_CCLK_NOT_DEFINED;
1543 
1544 	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info),
1545 			    &temp_buff);
1546 	if (rc)
1547 		return rc;
1548 
1549 	clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
1550 	clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */
1551 	clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
1552 	clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
1553 	clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
1554 	tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1555 
1556 	clk_info_buff->dack_timer =
1557 		(clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
1558 		t4_read_reg(padap, TP_DACK_TIMER_A);
1559 	clk_info_buff->retransmit_min =
1560 		tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
1561 	clk_info_buff->retransmit_max =
1562 		tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
1563 	clk_info_buff->persist_timer_min =
1564 		tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
1565 	clk_info_buff->persist_timer_max =
1566 		tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
1567 	clk_info_buff->keepalive_idle_timer =
1568 		tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
1569 	clk_info_buff->keepalive_interval =
1570 		tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
1571 	clk_info_buff->initial_srtt =
1572 		tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
1573 	clk_info_buff->finwait2_timer =
1574 		tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
1575 
1576 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1577 }
1578 
1579 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
1580 				struct cudbg_buffer *dbg_buff,
1581 				struct cudbg_error *cudbg_err)
1582 {
1583 	struct adapter *padap = pdbg_init->adap;
1584 	struct cudbg_buffer temp_buff = { 0 };
1585 	struct ireg_buf *ch_pcie;
1586 	int i, rc, n;
1587 	u32 size;
1588 
1589 	n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
1590 	size = sizeof(struct ireg_buf) * n * 2;
1591 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1592 	if (rc)
1593 		return rc;
1594 
1595 	ch_pcie = (struct ireg_buf *)temp_buff.data;
1596 	/* PCIE_PDBG */
1597 	for (i = 0; i < n; i++) {
1598 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
1599 		u32 *buff = ch_pcie->outbuf;
1600 
1601 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
1602 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
1603 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
1604 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
1605 		t4_read_indirect(padap,
1606 				 pcie_pio->ireg_addr,
1607 				 pcie_pio->ireg_data,
1608 				 buff,
1609 				 pcie_pio->ireg_offset_range,
1610 				 pcie_pio->ireg_local_offset);
1611 		ch_pcie++;
1612 	}
1613 
1614 	/* PCIE_CDBG */
1615 	n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
1616 	for (i = 0; i < n; i++) {
1617 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
1618 		u32 *buff = ch_pcie->outbuf;
1619 
1620 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
1621 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
1622 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
1623 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
1624 		t4_read_indirect(padap,
1625 				 pcie_pio->ireg_addr,
1626 				 pcie_pio->ireg_data,
1627 				 buff,
1628 				 pcie_pio->ireg_offset_range,
1629 				 pcie_pio->ireg_local_offset);
1630 		ch_pcie++;
1631 	}
1632 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1633 }
1634 
1635 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
1636 			      struct cudbg_buffer *dbg_buff,
1637 			      struct cudbg_error *cudbg_err)
1638 {
1639 	struct adapter *padap = pdbg_init->adap;
1640 	struct cudbg_buffer temp_buff = { 0 };
1641 	struct ireg_buf *ch_pm;
1642 	int i, rc, n;
1643 	u32 size;
1644 
1645 	n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
1646 	size = sizeof(struct ireg_buf) * n * 2;
1647 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1648 	if (rc)
1649 		return rc;
1650 
1651 	ch_pm = (struct ireg_buf *)temp_buff.data;
1652 	/* PM_RX */
1653 	for (i = 0; i < n; i++) {
1654 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
1655 		u32 *buff = ch_pm->outbuf;
1656 
1657 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
1658 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
1659 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
1660 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
1661 		t4_read_indirect(padap,
1662 				 pm_pio->ireg_addr,
1663 				 pm_pio->ireg_data,
1664 				 buff,
1665 				 pm_pio->ireg_offset_range,
1666 				 pm_pio->ireg_local_offset);
1667 		ch_pm++;
1668 	}
1669 
1670 	/* PM_TX */
1671 	n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
1672 	for (i = 0; i < n; i++) {
1673 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
1674 		u32 *buff = ch_pm->outbuf;
1675 
1676 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
1677 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
1678 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
1679 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
1680 		t4_read_indirect(padap,
1681 				 pm_pio->ireg_addr,
1682 				 pm_pio->ireg_data,
1683 				 buff,
1684 				 pm_pio->ireg_offset_range,
1685 				 pm_pio->ireg_local_offset);
1686 		ch_pm++;
1687 	}
1688 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1689 }
1690 
1691 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
1692 		      struct cudbg_buffer *dbg_buff,
1693 		      struct cudbg_error *cudbg_err)
1694 {
1695 	struct adapter *padap = pdbg_init->adap;
1696 	struct cudbg_tid_info_region_rev1 *tid1;
1697 	struct cudbg_buffer temp_buff = { 0 };
1698 	struct cudbg_tid_info_region *tid;
1699 	u32 para[2], val[2];
1700 	int rc;
1701 
1702 	rc = cudbg_get_buff(pdbg_init, dbg_buff,
1703 			    sizeof(struct cudbg_tid_info_region_rev1),
1704 			    &temp_buff);
1705 	if (rc)
1706 		return rc;
1707 
1708 	tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
1709 	tid = &tid1->tid;
1710 	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1711 	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
1712 	tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
1713 			     sizeof(struct cudbg_ver_hdr);
1714 
1715 	/* If firmware is not attached/alive, use backdoor register
1716 	 * access to collect dump.
1717 	 */
1718 	if (!is_fw_attached(pdbg_init))
1719 		goto fill_tid;
1720 
1721 #define FW_PARAM_PFVF_A(param) \
1722 	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
1723 	 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
1724 	 FW_PARAMS_PARAM_Y_V(0) | \
1725 	 FW_PARAMS_PARAM_Z_V(0))
1726 
1727 	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
1728 	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
1729 	rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
1730 	if (rc <  0) {
1731 		cudbg_err->sys_err = rc;
1732 		cudbg_put_buff(pdbg_init, &temp_buff);
1733 		return rc;
1734 	}
1735 	tid->uotid_base = val[0];
1736 	tid->nuotids = val[1] - val[0] + 1;
1737 
1738 	if (is_t5(padap->params.chip)) {
1739 		tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
1740 	} else if (is_t6(padap->params.chip)) {
1741 		tid1->tid_start =
1742 			t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
1743 		tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
1744 
1745 		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
1746 		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
1747 		rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
1748 				     para, val);
1749 		if (rc < 0) {
1750 			cudbg_err->sys_err = rc;
1751 			cudbg_put_buff(pdbg_init, &temp_buff);
1752 			return rc;
1753 		}
1754 		tid->hpftid_base = val[0];
1755 		tid->nhpftids = val[1] - val[0] + 1;
1756 	}
1757 
1758 #undef FW_PARAM_PFVF_A
1759 
1760 fill_tid:
1761 	tid->ntids = padap->tids.ntids;
1762 	tid->nstids = padap->tids.nstids;
1763 	tid->stid_base = padap->tids.stid_base;
1764 	tid->hash_base = padap->tids.hash_base;
1765 
1766 	tid->natids = padap->tids.natids;
1767 	tid->nftids = padap->tids.nftids;
1768 	tid->ftid_base = padap->tids.ftid_base;
1769 	tid->aftid_base = padap->tids.aftid_base;
1770 	tid->aftid_end = padap->tids.aftid_end;
1771 
1772 	tid->sftid_base = padap->tids.sftid_base;
1773 	tid->nsftids = padap->tids.nsftids;
1774 
1775 	tid->flags = padap->flags;
1776 	tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
1777 	tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
1778 	tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
1779 
1780 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1781 }
1782 
1783 int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
1784 			      struct cudbg_buffer *dbg_buff,
1785 			      struct cudbg_error *cudbg_err)
1786 {
1787 	struct adapter *padap = pdbg_init->adap;
1788 	struct cudbg_buffer temp_buff = { 0 };
1789 	u32 size, *value, j;
1790 	int i, rc, n;
1791 
1792 	size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
1793 	n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
1794 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1795 	if (rc)
1796 		return rc;
1797 
1798 	value = (u32 *)temp_buff.data;
1799 	for (i = 0; i < n; i++) {
1800 		for (j = t5_pcie_config_array[i][0];
1801 		     j <= t5_pcie_config_array[i][1]; j += 4) {
1802 			t4_hw_pci_read_cfg4(padap, j, value);
1803 			value++;
1804 		}
1805 	}
1806 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1807 }
1808 
1809 static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
1810 {
1811 	int index, bit, bit_pos = 0;
1812 
1813 	switch (type) {
1814 	case CTXT_EGRESS:
1815 		bit_pos = 176;
1816 		break;
1817 	case CTXT_INGRESS:
1818 		bit_pos = 141;
1819 		break;
1820 	case CTXT_FLM:
1821 		bit_pos = 89;
1822 		break;
1823 	}
1824 	index = bit_pos / 32;
1825 	bit =  bit_pos % 32;
1826 	return buf[index] & (1U << bit);
1827 }
1828 
1829 static int cudbg_get_ctxt_region_info(struct adapter *padap,
1830 				      struct cudbg_region_info *ctx_info,
1831 				      u8 *mem_type)
1832 {
1833 	struct cudbg_mem_desc mem_desc;
1834 	struct cudbg_meminfo meminfo;
1835 	u32 i, j, value, found;
1836 	u8 flq;
1837 	int rc;
1838 
1839 	rc = cudbg_fill_meminfo(padap, &meminfo);
1840 	if (rc)
1841 		return rc;
1842 
1843 	/* Get EGRESS and INGRESS context region size */
1844 	for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
1845 		found = 0;
1846 		memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc));
1847 		for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) {
1848 			rc = cudbg_get_mem_region(padap, &meminfo, j,
1849 						  cudbg_region[i],
1850 						  &mem_desc);
1851 			if (!rc) {
1852 				found = 1;
1853 				rc = cudbg_get_mem_relative(padap, &meminfo, j,
1854 							    &mem_desc.base,
1855 							    &mem_desc.limit);
1856 				if (rc) {
1857 					ctx_info[i].exist = false;
1858 					break;
1859 				}
1860 				ctx_info[i].exist = true;
1861 				ctx_info[i].start = mem_desc.base;
1862 				ctx_info[i].end = mem_desc.limit;
1863 				mem_type[i] = j;
1864 				break;
1865 			}
1866 		}
1867 		if (!found)
1868 			ctx_info[i].exist = false;
1869 	}
1870 
1871 	/* Get FLM and CNM max qid. */
1872 	value = t4_read_reg(padap, SGE_FLM_CFG_A);
1873 
1874 	/* Get number of data freelist queues */
1875 	flq = HDRSTARTFLQ_G(value);
1876 	ctx_info[CTXT_FLM].exist = true;
1877 	ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
1878 
1879 	/* The number of CONM contexts are same as number of freelist
1880 	 * queues.
1881 	 */
1882 	ctx_info[CTXT_CNM].exist = true;
1883 	ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
1884 
1885 	return 0;
1886 }
1887 
1888 int cudbg_dump_context_size(struct adapter *padap)
1889 {
1890 	struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
1891 	u8 mem_type[CTXT_INGRESS + 1] = { 0 };
1892 	u32 i, size = 0;
1893 	int rc;
1894 
1895 	/* Get max valid qid for each type of queue */
1896 	rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
1897 	if (rc)
1898 		return rc;
1899 
1900 	for (i = 0; i < CTXT_CNM; i++) {
1901 		if (!region_info[i].exist) {
1902 			if (i == CTXT_EGRESS || i == CTXT_INGRESS)
1903 				size += CUDBG_LOWMEM_MAX_CTXT_QIDS *
1904 					SGE_CTXT_SIZE;
1905 			continue;
1906 		}
1907 
1908 		size += (region_info[i].end - region_info[i].start + 1) /
1909 			SGE_CTXT_SIZE;
1910 	}
1911 	return size * sizeof(struct cudbg_ch_cntxt);
1912 }
1913 
1914 static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
1915 				enum ctxt_type ctype, u32 *data)
1916 {
1917 	struct adapter *padap = pdbg_init->adap;
1918 	int rc = -1;
1919 
1920 	/* Under heavy traffic, the SGE Queue contexts registers will be
1921 	 * frequently accessed by firmware.
1922 	 *
1923 	 * To avoid conflicts with firmware, always ask firmware to fetch
1924 	 * the SGE Queue contexts via mailbox. On failure, fallback to
1925 	 * accessing hardware registers directly.
1926 	 */
1927 	if (is_fw_attached(pdbg_init))
1928 		rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data);
1929 	if (rc)
1930 		t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
1931 }
1932 
1933 static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid,
1934 				  u8 ctxt_type,
1935 				  struct cudbg_ch_cntxt **out_buff)
1936 {
1937 	struct cudbg_ch_cntxt *buff = *out_buff;
1938 	int rc;
1939 	u32 j;
1940 
1941 	for (j = 0; j < max_qid; j++) {
1942 		cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data);
1943 		rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type);
1944 		if (!rc)
1945 			continue;
1946 
1947 		buff->cntxt_type = ctxt_type;
1948 		buff->cntxt_id = j;
1949 		buff++;
1950 		if (ctxt_type == CTXT_FLM) {
1951 			cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data);
1952 			buff->cntxt_type = CTXT_CNM;
1953 			buff->cntxt_id = j;
1954 			buff++;
1955 		}
1956 	}
1957 
1958 	*out_buff = buff;
1959 }
1960 
1961 int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
1962 			       struct cudbg_buffer *dbg_buff,
1963 			       struct cudbg_error *cudbg_err)
1964 {
1965 	struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
1966 	struct adapter *padap = pdbg_init->adap;
1967 	u32 j, size, max_ctx_size, max_ctx_qid;
1968 	u8 mem_type[CTXT_INGRESS + 1] = { 0 };
1969 	struct cudbg_buffer temp_buff = { 0 };
1970 	struct cudbg_ch_cntxt *buff;
1971 	u64 *dst_off, *src_off;
1972 	u8 *ctx_buf;
1973 	u8 i, k;
1974 	int rc;
1975 
1976 	/* Get max valid qid for each type of queue */
1977 	rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
1978 	if (rc)
1979 		return rc;
1980 
1981 	rc = cudbg_dump_context_size(padap);
1982 	if (rc <= 0)
1983 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1984 
1985 	size = rc;
1986 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1987 	if (rc)
1988 		return rc;
1989 
1990 	/* Get buffer with enough space to read the biggest context
1991 	 * region in memory.
1992 	 */
1993 	max_ctx_size = max(region_info[CTXT_EGRESS].end -
1994 			   region_info[CTXT_EGRESS].start + 1,
1995 			   region_info[CTXT_INGRESS].end -
1996 			   region_info[CTXT_INGRESS].start + 1);
1997 
1998 	ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
1999 	if (!ctx_buf) {
2000 		cudbg_put_buff(pdbg_init, &temp_buff);
2001 		return -ENOMEM;
2002 	}
2003 
2004 	buff = (struct cudbg_ch_cntxt *)temp_buff.data;
2005 
2006 	/* Collect EGRESS and INGRESS context data.
2007 	 * In case of failures, fallback to collecting via FW or
2008 	 * backdoor access.
2009 	 */
2010 	for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
2011 		if (!region_info[i].exist) {
2012 			max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
2013 			cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
2014 					      &buff);
2015 			continue;
2016 		}
2017 
2018 		max_ctx_size = region_info[i].end - region_info[i].start + 1;
2019 		max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
2020 
2021 		/* If firmware is not attached/alive, use backdoor register
2022 		 * access to collect dump.
2023 		 */
2024 		if (is_fw_attached(pdbg_init)) {
2025 			t4_sge_ctxt_flush(padap, padap->mbox, i);
2026 
2027 			rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i],
2028 					  region_info[i].start, max_ctx_size,
2029 					  (__be32 *)ctx_buf, 1);
2030 		}
2031 
2032 		if (rc || !is_fw_attached(pdbg_init)) {
2033 			max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
2034 			cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
2035 					      &buff);
2036 			continue;
2037 		}
2038 
2039 		for (j = 0; j < max_ctx_qid; j++) {
2040 			src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
2041 			dst_off = (u64 *)buff->data;
2042 
2043 			/* The data is stored in 64-bit cpu order.  Convert it
2044 			 * to big endian before parsing.
2045 			 */
2046 			for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++)
2047 				dst_off[k] = cpu_to_be64(src_off[k]);
2048 
2049 			rc = cudbg_sge_ctxt_check_valid(buff->data, i);
2050 			if (!rc)
2051 				continue;
2052 
2053 			buff->cntxt_type = i;
2054 			buff->cntxt_id = j;
2055 			buff++;
2056 		}
2057 	}
2058 
2059 	kvfree(ctx_buf);
2060 
2061 	/* Collect FREELIST and CONGESTION MANAGER contexts */
2062 	max_ctx_size = region_info[CTXT_FLM].end -
2063 		       region_info[CTXT_FLM].start + 1;
2064 	max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
2065 	/* Since FLM and CONM are 1-to-1 mapped, the below function
2066 	 * will fetch both FLM and CONM contexts.
2067 	 */
2068 	cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
2069 
2070 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2071 }
2072 
2073 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
2074 {
2075 	*mask = x | y;
2076 	y = (__force u64)cpu_to_be64(y);
2077 	memcpy(addr, (char *)&y + 2, ETH_ALEN);
2078 }
2079 
2080 static void cudbg_mps_rpl_backdoor(struct adapter *padap,
2081 				   struct fw_ldst_mps_rplc *mps_rplc)
2082 {
2083 	if (is_t5(padap->params.chip)) {
2084 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
2085 							  MPS_VF_RPLCT_MAP3_A));
2086 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
2087 							  MPS_VF_RPLCT_MAP2_A));
2088 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
2089 							  MPS_VF_RPLCT_MAP1_A));
2090 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
2091 							  MPS_VF_RPLCT_MAP0_A));
2092 	} else {
2093 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
2094 							  MPS_VF_RPLCT_MAP7_A));
2095 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
2096 							  MPS_VF_RPLCT_MAP6_A));
2097 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
2098 							  MPS_VF_RPLCT_MAP5_A));
2099 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
2100 							  MPS_VF_RPLCT_MAP4_A));
2101 	}
2102 	mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
2103 	mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
2104 	mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
2105 	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
2106 }
2107 
2108 static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init,
2109 				    struct cudbg_mps_tcam *tcam, u32 idx)
2110 {
2111 	struct adapter *padap = pdbg_init->adap;
2112 	u64 tcamy, tcamx, val;
2113 	u32 ctl, data2;
2114 	int rc = 0;
2115 
2116 	if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
2117 		/* CtlReqID   - 1: use Host Driver Requester ID
2118 		 * CtlCmdType - 0: Read, 1: Write
2119 		 * CtlTcamSel - 0: TCAM0, 1: TCAM1
2120 		 * CtlXYBitSel- 0: Y bit, 1: X bit
2121 		 */
2122 
2123 		/* Read tcamy */
2124 		ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
2125 		if (idx < 256)
2126 			ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
2127 		else
2128 			ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
2129 
2130 		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
2131 		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
2132 		tcamy = DMACH_G(val) << 32;
2133 		tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
2134 		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
2135 		tcam->lookup_type = DATALKPTYPE_G(data2);
2136 
2137 		/* 0 - Outer header, 1 - Inner header
2138 		 * [71:48] bit locations are overloaded for
2139 		 * outer vs. inner lookup types.
2140 		 */
2141 		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
2142 			/* Inner header VNI */
2143 			tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
2144 			tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
2145 			tcam->dip_hit = data2 & DATADIPHIT_F;
2146 		} else {
2147 			tcam->vlan_vld = data2 & DATAVIDH2_F;
2148 			tcam->ivlan = VIDL_G(val);
2149 		}
2150 
2151 		tcam->port_num = DATAPORTNUM_G(data2);
2152 
2153 		/* Read tcamx. Change the control param */
2154 		ctl |= CTLXYBITSEL_V(1);
2155 		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
2156 		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
2157 		tcamx = DMACH_G(val) << 32;
2158 		tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
2159 		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
2160 		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
2161 			/* Inner header VNI mask */
2162 			tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
2163 			tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
2164 		}
2165 	} else {
2166 		tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
2167 		tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
2168 	}
2169 
2170 	/* If no entry, return */
2171 	if (tcamx & tcamy)
2172 		return rc;
2173 
2174 	tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
2175 	tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
2176 
2177 	if (is_t5(padap->params.chip))
2178 		tcam->repli = (tcam->cls_lo & REPLICATE_F);
2179 	else if (is_t6(padap->params.chip))
2180 		tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
2181 
2182 	if (tcam->repli) {
2183 		struct fw_ldst_cmd ldst_cmd;
2184 		struct fw_ldst_mps_rplc mps_rplc;
2185 
2186 		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
2187 		ldst_cmd.op_to_addrspace =
2188 			htonl(FW_CMD_OP_V(FW_LDST_CMD) |
2189 			      FW_CMD_REQUEST_F | FW_CMD_READ_F |
2190 			      FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
2191 		ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
2192 		ldst_cmd.u.mps.rplc.fid_idx =
2193 			htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
2194 			      FW_LDST_CMD_IDX_V(idx));
2195 
2196 		/* If firmware is not attached/alive, use backdoor register
2197 		 * access to collect dump.
2198 		 */
2199 		if (is_fw_attached(pdbg_init))
2200 			rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
2201 					sizeof(ldst_cmd), &ldst_cmd);
2202 
2203 		if (rc || !is_fw_attached(pdbg_init)) {
2204 			cudbg_mps_rpl_backdoor(padap, &mps_rplc);
2205 			/* Ignore error since we collected directly from
2206 			 * reading registers.
2207 			 */
2208 			rc = 0;
2209 		} else {
2210 			mps_rplc = ldst_cmd.u.mps.rplc;
2211 		}
2212 
2213 		tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
2214 		tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
2215 		tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
2216 		tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
2217 		if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
2218 			tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
2219 			tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
2220 			tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
2221 			tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
2222 		}
2223 	}
2224 	cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
2225 	tcam->idx = idx;
2226 	tcam->rplc_size = padap->params.arch.mps_rplc_size;
2227 	return rc;
2228 }
2229 
2230 int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
2231 			   struct cudbg_buffer *dbg_buff,
2232 			   struct cudbg_error *cudbg_err)
2233 {
2234 	struct adapter *padap = pdbg_init->adap;
2235 	struct cudbg_buffer temp_buff = { 0 };
2236 	u32 size = 0, i, n, total_size = 0;
2237 	struct cudbg_mps_tcam *tcam;
2238 	int rc;
2239 
2240 	n = padap->params.arch.mps_tcam_size;
2241 	size = sizeof(struct cudbg_mps_tcam) * n;
2242 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2243 	if (rc)
2244 		return rc;
2245 
2246 	tcam = (struct cudbg_mps_tcam *)temp_buff.data;
2247 	for (i = 0; i < n; i++) {
2248 		rc = cudbg_collect_tcam_index(pdbg_init, tcam, i);
2249 		if (rc) {
2250 			cudbg_err->sys_err = rc;
2251 			cudbg_put_buff(pdbg_init, &temp_buff);
2252 			return rc;
2253 		}
2254 		total_size += sizeof(struct cudbg_mps_tcam);
2255 		tcam++;
2256 	}
2257 
2258 	if (!total_size) {
2259 		rc = CUDBG_SYSTEM_ERROR;
2260 		cudbg_err->sys_err = rc;
2261 		cudbg_put_buff(pdbg_init, &temp_buff);
2262 		return rc;
2263 	}
2264 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2265 }
2266 
2267 int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
2268 			   struct cudbg_buffer *dbg_buff,
2269 			   struct cudbg_error *cudbg_err)
2270 {
2271 	struct adapter *padap = pdbg_init->adap;
2272 	struct cudbg_buffer temp_buff = { 0 };
2273 	char vpd_str[CUDBG_VPD_VER_LEN + 1];
2274 	u32 scfg_vers, vpd_vers, fw_vers;
2275 	struct cudbg_vpd_data *vpd_data;
2276 	struct vpd_params vpd = { 0 };
2277 	int rc, ret;
2278 
2279 	rc = t4_get_raw_vpd_params(padap, &vpd);
2280 	if (rc)
2281 		return rc;
2282 
2283 	rc = t4_get_fw_version(padap, &fw_vers);
2284 	if (rc)
2285 		return rc;
2286 
2287 	/* Serial Configuration Version is located beyond the PF's vpd size.
2288 	 * Temporarily give access to entire EEPROM to get it.
2289 	 */
2290 	rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE);
2291 	if (rc < 0)
2292 		return rc;
2293 
2294 	ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN,
2295 				 &scfg_vers);
2296 
2297 	/* Restore back to original PF's vpd size */
2298 	rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE);
2299 	if (rc < 0)
2300 		return rc;
2301 
2302 	if (ret)
2303 		return ret;
2304 
2305 	rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN,
2306 				vpd_str);
2307 	if (rc)
2308 		return rc;
2309 
2310 	vpd_str[CUDBG_VPD_VER_LEN] = '\0';
2311 	rc = kstrtouint(vpd_str, 0, &vpd_vers);
2312 	if (rc)
2313 		return rc;
2314 
2315 	rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data),
2316 			    &temp_buff);
2317 	if (rc)
2318 		return rc;
2319 
2320 	vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
2321 	memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1);
2322 	memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1);
2323 	memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1);
2324 	memcpy(vpd_data->mn, vpd.id, ID_LEN + 1);
2325 	vpd_data->scfg_vers = scfg_vers;
2326 	vpd_data->vpd_vers = vpd_vers;
2327 	vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers);
2328 	vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
2329 	vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
2330 	vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
2331 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2332 }
2333 
2334 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
2335 			  struct cudbg_tid_data *tid_data)
2336 {
2337 	struct adapter *padap = pdbg_init->adap;
2338 	int i, cmd_retry = 8;
2339 	u32 val;
2340 
2341 	/* Fill REQ_DATA regs with 0's */
2342 	for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++)
2343 		t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0);
2344 
2345 	/* Write DBIG command */
2346 	val = DBGICMD_V(4) | DBGITID_V(tid);
2347 	t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val);
2348 	tid_data->dbig_cmd = val;
2349 
2350 	val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */
2351 	t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val);
2352 	tid_data->dbig_conf = val;
2353 
2354 	/* Poll the DBGICMDBUSY bit */
2355 	val = 1;
2356 	while (val) {
2357 		val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A);
2358 		val = val & DBGICMDBUSY_F;
2359 		cmd_retry--;
2360 		if (!cmd_retry)
2361 			return CUDBG_SYSTEM_ERROR;
2362 	}
2363 
2364 	/* Check RESP status */
2365 	val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A);
2366 	tid_data->dbig_rsp_stat = val;
2367 	if (!(val & 1))
2368 		return CUDBG_SYSTEM_ERROR;
2369 
2370 	/* Read RESP data */
2371 	for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++)
2372 		tid_data->data[i] = t4_read_reg(padap,
2373 						LE_DB_DBGI_RSP_DATA_A +
2374 						(i << 2));
2375 	tid_data->tid = tid;
2376 	return 0;
2377 }
2378 
2379 static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region)
2380 {
2381 	int type = LE_ET_UNKNOWN;
2382 
2383 	if (tid < tcam_region.server_start)
2384 		type = LE_ET_TCAM_CON;
2385 	else if (tid < tcam_region.filter_start)
2386 		type = LE_ET_TCAM_SERVER;
2387 	else if (tid < tcam_region.clip_start)
2388 		type = LE_ET_TCAM_FILTER;
2389 	else if (tid < tcam_region.routing_start)
2390 		type = LE_ET_TCAM_CLIP;
2391 	else if (tid < tcam_region.tid_hash_base)
2392 		type = LE_ET_TCAM_ROUTING;
2393 	else if (tid < tcam_region.max_tid)
2394 		type = LE_ET_HASH_CON;
2395 	else
2396 		type = LE_ET_INVALID_TID;
2397 
2398 	return type;
2399 }
2400 
2401 static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data,
2402 			       struct cudbg_tcam tcam_region)
2403 {
2404 	int ipv6 = 0;
2405 	int le_type;
2406 
2407 	le_type = cudbg_get_le_type(tid_data->tid, tcam_region);
2408 	if (tid_data->tid & 1)
2409 		return 0;
2410 
2411 	if (le_type == LE_ET_HASH_CON) {
2412 		ipv6 = tid_data->data[16] & 0x8000;
2413 	} else if (le_type == LE_ET_TCAM_CON) {
2414 		ipv6 = tid_data->data[16] & 0x8000;
2415 		if (ipv6)
2416 			ipv6 = tid_data->data[9] == 0x00C00000;
2417 	} else {
2418 		ipv6 = 0;
2419 	}
2420 	return ipv6;
2421 }
2422 
2423 void cudbg_fill_le_tcam_info(struct adapter *padap,
2424 			     struct cudbg_tcam *tcam_region)
2425 {
2426 	u32 value;
2427 
2428 	/* Get the LE regions */
2429 	value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A); /* hash base index */
2430 	tcam_region->tid_hash_base = value;
2431 
2432 	/* Get routing table index */
2433 	value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A);
2434 	tcam_region->routing_start = value;
2435 
2436 	/* Get clip table index. For T6 there is separate CLIP TCAM */
2437 	if (is_t6(padap->params.chip))
2438 		value = t4_read_reg(padap, LE_DB_CLCAM_TID_BASE_A);
2439 	else
2440 		value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
2441 	tcam_region->clip_start = value;
2442 
2443 	/* Get filter table index */
2444 	value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A);
2445 	tcam_region->filter_start = value;
2446 
2447 	/* Get server table index */
2448 	value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A);
2449 	tcam_region->server_start = value;
2450 
2451 	/* Check whether hash is enabled and calculate the max tids */
2452 	value = t4_read_reg(padap, LE_DB_CONFIG_A);
2453 	if ((value >> HASHEN_S) & 1) {
2454 		value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A);
2455 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
2456 			tcam_region->max_tid = (value & 0xFFFFF) +
2457 					       tcam_region->tid_hash_base;
2458 		} else {
2459 			value = HASHTIDSIZE_G(value);
2460 			value = 1 << value;
2461 			tcam_region->max_tid = value +
2462 					       tcam_region->tid_hash_base;
2463 		}
2464 	} else { /* hash not enabled */
2465 		if (is_t6(padap->params.chip))
2466 			tcam_region->max_tid = (value & ASLIPCOMPEN_F) ?
2467 					       CUDBG_MAX_TID_COMP_EN :
2468 					       CUDBG_MAX_TID_COMP_DIS;
2469 		else
2470 			tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
2471 	}
2472 
2473 	if (is_t6(padap->params.chip))
2474 		tcam_region->max_tid += CUDBG_T6_CLIP;
2475 }
2476 
2477 int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
2478 			  struct cudbg_buffer *dbg_buff,
2479 			  struct cudbg_error *cudbg_err)
2480 {
2481 	struct adapter *padap = pdbg_init->adap;
2482 	struct cudbg_buffer temp_buff = { 0 };
2483 	struct cudbg_tcam tcam_region = { 0 };
2484 	struct cudbg_tid_data *tid_data;
2485 	u32 bytes = 0;
2486 	int rc, size;
2487 	u32 i;
2488 
2489 	cudbg_fill_le_tcam_info(padap, &tcam_region);
2490 
2491 	size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
2492 	size += sizeof(struct cudbg_tcam);
2493 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2494 	if (rc)
2495 		return rc;
2496 
2497 	memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
2498 	bytes = sizeof(struct cudbg_tcam);
2499 	tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes);
2500 	/* read all tid */
2501 	for (i = 0; i < tcam_region.max_tid; ) {
2502 		rc = cudbg_read_tid(pdbg_init, i, tid_data);
2503 		if (rc) {
2504 			cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
2505 			/* Update tcam header and exit */
2506 			tcam_region.max_tid = i;
2507 			memcpy(temp_buff.data, &tcam_region,
2508 			       sizeof(struct cudbg_tcam));
2509 			goto out;
2510 		}
2511 
2512 		if (cudbg_is_ipv6_entry(tid_data, tcam_region)) {
2513 			/* T6 CLIP TCAM: ipv6 takes 4 entries */
2514 			if (is_t6(padap->params.chip) &&
2515 			    i >= tcam_region.clip_start &&
2516 			    i < tcam_region.clip_start + CUDBG_T6_CLIP)
2517 				i += 4;
2518 			else /* Main TCAM: ipv6 takes two tids */
2519 				i += 2;
2520 		} else {
2521 			i++;
2522 		}
2523 
2524 		tid_data++;
2525 		bytes += sizeof(struct cudbg_tid_data);
2526 	}
2527 
2528 out:
2529 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2530 }
2531 
2532 int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
2533 			struct cudbg_buffer *dbg_buff,
2534 			struct cudbg_error *cudbg_err)
2535 {
2536 	struct adapter *padap = pdbg_init->adap;
2537 	struct cudbg_buffer temp_buff = { 0 };
2538 	u32 size;
2539 	int rc;
2540 
2541 	size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2542 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2543 	if (rc)
2544 		return rc;
2545 
2546 	t4_read_cong_tbl(padap, (void *)temp_buff.data);
2547 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2548 }
2549 
2550 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
2551 			      struct cudbg_buffer *dbg_buff,
2552 			      struct cudbg_error *cudbg_err)
2553 {
2554 	struct adapter *padap = pdbg_init->adap;
2555 	struct cudbg_buffer temp_buff = { 0 };
2556 	struct ireg_buf *ma_indr;
2557 	int i, rc, n;
2558 	u32 size, j;
2559 
2560 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
2561 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
2562 
2563 	n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
2564 	size = sizeof(struct ireg_buf) * n * 2;
2565 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2566 	if (rc)
2567 		return rc;
2568 
2569 	ma_indr = (struct ireg_buf *)temp_buff.data;
2570 	for (i = 0; i < n; i++) {
2571 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
2572 		u32 *buff = ma_indr->outbuf;
2573 
2574 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
2575 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
2576 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
2577 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
2578 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
2579 				 buff, ma_fli->ireg_offset_range,
2580 				 ma_fli->ireg_local_offset);
2581 		ma_indr++;
2582 	}
2583 
2584 	n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
2585 	for (i = 0; i < n; i++) {
2586 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
2587 		u32 *buff = ma_indr->outbuf;
2588 
2589 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
2590 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
2591 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
2592 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
2593 			t4_read_indirect(padap, ma_fli->ireg_addr,
2594 					 ma_fli->ireg_data, buff, 1,
2595 					 ma_fli->ireg_local_offset);
2596 			buff++;
2597 			ma_fli->ireg_local_offset += 0x20;
2598 		}
2599 		ma_indr++;
2600 	}
2601 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2602 }
2603 
2604 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
2605 			   struct cudbg_buffer *dbg_buff,
2606 			   struct cudbg_error *cudbg_err)
2607 {
2608 	struct adapter *padap = pdbg_init->adap;
2609 	struct cudbg_buffer temp_buff = { 0 };
2610 	struct cudbg_ulptx_la *ulptx_la_buff;
2611 	struct cudbg_ver_hdr *ver_hdr;
2612 	u32 i, j;
2613 	int rc;
2614 
2615 	rc = cudbg_get_buff(pdbg_init, dbg_buff,
2616 			    sizeof(struct cudbg_ver_hdr) +
2617 			    sizeof(struct cudbg_ulptx_la),
2618 			    &temp_buff);
2619 	if (rc)
2620 		return rc;
2621 
2622 	ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
2623 	ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
2624 	ver_hdr->revision = CUDBG_ULPTX_LA_REV;
2625 	ver_hdr->size = sizeof(struct cudbg_ulptx_la);
2626 
2627 	ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data +
2628 						  sizeof(*ver_hdr));
2629 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
2630 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
2631 						      ULP_TX_LA_RDPTR_0_A +
2632 						      0x10 * i);
2633 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
2634 						      ULP_TX_LA_WRPTR_0_A +
2635 						      0x10 * i);
2636 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
2637 						       ULP_TX_LA_RDDATA_0_A +
2638 						       0x10 * i);
2639 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
2640 			ulptx_la_buff->rd_data[i][j] =
2641 				t4_read_reg(padap,
2642 					    ULP_TX_LA_RDDATA_0_A + 0x10 * i);
2643 	}
2644 
2645 	for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) {
2646 		t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1);
2647 		ulptx_la_buff->rdptr_asic[i] =
2648 				t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A);
2649 		ulptx_la_buff->rddata_asic[i][0] =
2650 				t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A);
2651 		ulptx_la_buff->rddata_asic[i][1] =
2652 				t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A);
2653 		ulptx_la_buff->rddata_asic[i][2] =
2654 				t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A);
2655 		ulptx_la_buff->rddata_asic[i][3] =
2656 				t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A);
2657 		ulptx_la_buff->rddata_asic[i][4] =
2658 				t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A);
2659 		ulptx_la_buff->rddata_asic[i][5] =
2660 				t4_read_reg(padap, PM_RX_BASE_ADDR);
2661 	}
2662 
2663 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2664 }
2665 
2666 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
2667 				  struct cudbg_buffer *dbg_buff,
2668 				  struct cudbg_error *cudbg_err)
2669 {
2670 	struct adapter *padap = pdbg_init->adap;
2671 	struct cudbg_buffer temp_buff = { 0 };
2672 	u32 local_offset, local_range;
2673 	struct ireg_buf *up_cim;
2674 	u32 size, j, iter;
2675 	u32 instance = 0;
2676 	int i, rc, n;
2677 
2678 	if (is_t5(padap->params.chip))
2679 		n = sizeof(t5_up_cim_reg_array) /
2680 		    ((IREG_NUM_ELEM + 1) * sizeof(u32));
2681 	else if (is_t6(padap->params.chip))
2682 		n = sizeof(t6_up_cim_reg_array) /
2683 		    ((IREG_NUM_ELEM + 1) * sizeof(u32));
2684 	else
2685 		return CUDBG_STATUS_NOT_IMPLEMENTED;
2686 
2687 	size = sizeof(struct ireg_buf) * n;
2688 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2689 	if (rc)
2690 		return rc;
2691 
2692 	up_cim = (struct ireg_buf *)temp_buff.data;
2693 	for (i = 0; i < n; i++) {
2694 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
2695 		u32 *buff = up_cim->outbuf;
2696 
2697 		if (is_t5(padap->params.chip)) {
2698 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
2699 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
2700 			up_cim_reg->ireg_local_offset =
2701 						t5_up_cim_reg_array[i][2];
2702 			up_cim_reg->ireg_offset_range =
2703 						t5_up_cim_reg_array[i][3];
2704 			instance = t5_up_cim_reg_array[i][4];
2705 		} else if (is_t6(padap->params.chip)) {
2706 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
2707 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
2708 			up_cim_reg->ireg_local_offset =
2709 						t6_up_cim_reg_array[i][2];
2710 			up_cim_reg->ireg_offset_range =
2711 						t6_up_cim_reg_array[i][3];
2712 			instance = t6_up_cim_reg_array[i][4];
2713 		}
2714 
2715 		switch (instance) {
2716 		case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES:
2717 			iter = up_cim_reg->ireg_offset_range;
2718 			local_offset = 0x120;
2719 			local_range = 1;
2720 			break;
2721 		case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES:
2722 			iter = up_cim_reg->ireg_offset_range;
2723 			local_offset = 0x10;
2724 			local_range = 1;
2725 			break;
2726 		default:
2727 			iter = 1;
2728 			local_offset = 0;
2729 			local_range = up_cim_reg->ireg_offset_range;
2730 			break;
2731 		}
2732 
2733 		for (j = 0; j < iter; j++, buff++) {
2734 			rc = t4_cim_read(padap,
2735 					 up_cim_reg->ireg_local_offset +
2736 					 (j * local_offset), local_range, buff);
2737 			if (rc) {
2738 				cudbg_put_buff(pdbg_init, &temp_buff);
2739 				return rc;
2740 			}
2741 		}
2742 		up_cim++;
2743 	}
2744 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2745 }
2746 
2747 int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
2748 			     struct cudbg_buffer *dbg_buff,
2749 			     struct cudbg_error *cudbg_err)
2750 {
2751 	struct adapter *padap = pdbg_init->adap;
2752 	struct cudbg_buffer temp_buff = { 0 };
2753 	struct cudbg_pbt_tables *pbt;
2754 	int i, rc;
2755 	u32 addr;
2756 
2757 	rc = cudbg_get_buff(pdbg_init, dbg_buff,
2758 			    sizeof(struct cudbg_pbt_tables),
2759 			    &temp_buff);
2760 	if (rc)
2761 		return rc;
2762 
2763 	pbt = (struct cudbg_pbt_tables *)temp_buff.data;
2764 	/* PBT dynamic entries */
2765 	addr = CUDBG_CHAC_PBT_ADDR;
2766 	for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
2767 		rc = t4_cim_read(padap, addr + (i * 4), 1,
2768 				 &pbt->pbt_dynamic[i]);
2769 		if (rc) {
2770 			cudbg_err->sys_err = rc;
2771 			cudbg_put_buff(pdbg_init, &temp_buff);
2772 			return rc;
2773 		}
2774 	}
2775 
2776 	/* PBT static entries */
2777 	/* static entries start when bit 6 is set */
2778 	addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
2779 	for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
2780 		rc = t4_cim_read(padap, addr + (i * 4), 1,
2781 				 &pbt->pbt_static[i]);
2782 		if (rc) {
2783 			cudbg_err->sys_err = rc;
2784 			cudbg_put_buff(pdbg_init, &temp_buff);
2785 			return rc;
2786 		}
2787 	}
2788 
2789 	/* LRF entries */
2790 	addr = CUDBG_CHAC_PBT_LRF;
2791 	for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
2792 		rc = t4_cim_read(padap, addr + (i * 4), 1,
2793 				 &pbt->lrf_table[i]);
2794 		if (rc) {
2795 			cudbg_err->sys_err = rc;
2796 			cudbg_put_buff(pdbg_init, &temp_buff);
2797 			return rc;
2798 		}
2799 	}
2800 
2801 	/* PBT data entries */
2802 	addr = CUDBG_CHAC_PBT_DATA;
2803 	for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
2804 		rc = t4_cim_read(padap, addr + (i * 4), 1,
2805 				 &pbt->pbt_data[i]);
2806 		if (rc) {
2807 			cudbg_err->sys_err = rc;
2808 			cudbg_put_buff(pdbg_init, &temp_buff);
2809 			return rc;
2810 		}
2811 	}
2812 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2813 }
2814 
2815 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
2816 			   struct cudbg_buffer *dbg_buff,
2817 			   struct cudbg_error *cudbg_err)
2818 {
2819 	struct adapter *padap = pdbg_init->adap;
2820 	struct cudbg_mbox_log *mboxlog = NULL;
2821 	struct cudbg_buffer temp_buff = { 0 };
2822 	struct mbox_cmd_log *log = NULL;
2823 	struct mbox_cmd *entry;
2824 	unsigned int entry_idx;
2825 	u16 mbox_cmds;
2826 	int i, k, rc;
2827 	u64 flit;
2828 	u32 size;
2829 
2830 	log = padap->mbox_log;
2831 	mbox_cmds = padap->mbox_log->size;
2832 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
2833 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2834 	if (rc)
2835 		return rc;
2836 
2837 	mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
2838 	for (k = 0; k < mbox_cmds; k++) {
2839 		entry_idx = log->cursor + k;
2840 		if (entry_idx >= log->size)
2841 			entry_idx -= log->size;
2842 
2843 		entry = mbox_cmd_log_entry(log, entry_idx);
2844 		/* skip over unused entries */
2845 		if (entry->timestamp == 0)
2846 			continue;
2847 
2848 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
2849 		for (i = 0; i < MBOX_LEN / 8; i++) {
2850 			flit = entry->cmd[i];
2851 			mboxlog->hi[i] = (u32)(flit >> 32);
2852 			mboxlog->lo[i] = (u32)flit;
2853 		}
2854 		mboxlog++;
2855 	}
2856 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2857 }
2858 
2859 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
2860 			       struct cudbg_buffer *dbg_buff,
2861 			       struct cudbg_error *cudbg_err)
2862 {
2863 	struct adapter *padap = pdbg_init->adap;
2864 	struct cudbg_buffer temp_buff = { 0 };
2865 	struct ireg_buf *hma_indr;
2866 	int i, rc, n;
2867 	u32 size;
2868 
2869 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
2870 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
2871 
2872 	n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
2873 	size = sizeof(struct ireg_buf) * n;
2874 	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2875 	if (rc)
2876 		return rc;
2877 
2878 	hma_indr = (struct ireg_buf *)temp_buff.data;
2879 	for (i = 0; i < n; i++) {
2880 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
2881 		u32 *buff = hma_indr->outbuf;
2882 
2883 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
2884 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
2885 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
2886 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
2887 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
2888 				 buff, hma_fli->ireg_offset_range,
2889 				 hma_fli->ireg_local_offset);
2890 		hma_indr++;
2891 	}
2892 	return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2893 }
2894 
2895 void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
2896 				   u32 *num, u32 *size)
2897 {
2898 	u32 tot_entries = 0, tot_size = 0;
2899 
2900 	/* NIC TXQ, RXQ, FLQ, and CTRLQ */
2901 	tot_entries += MAX_ETH_QSETS * 3;
2902 	tot_entries += MAX_CTRL_QUEUES;
2903 
2904 	tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
2905 	tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
2906 	tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE;
2907 	tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES *
2908 		    MAX_CTRL_TXQ_DESC_SIZE;
2909 
2910 	/* FW_EVTQ and INTRQ */
2911 	tot_entries += INGQ_EXTRAS;
2912 	tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
2913 
2914 	/* PTP_TXQ */
2915 	tot_entries += 1;
2916 	tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
2917 
2918 	/* ULD TXQ, RXQ, and FLQ */
2919 	tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS;
2920 	tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2;
2921 
2922 	tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES *
2923 		    MAX_TXQ_DESC_SIZE;
2924 	tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES *
2925 		    MAX_RXQ_DESC_SIZE;
2926 	tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS *
2927 		    MAX_FL_DESC_SIZE;
2928 
2929 	/* ULD CIQ */
2930 	tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS;
2931 	tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
2932 		    MAX_RXQ_DESC_SIZE;
2933 
2934 	tot_size += sizeof(struct cudbg_ver_hdr) +
2935 		    sizeof(struct cudbg_qdesc_info) +
2936 		    sizeof(struct cudbg_qdesc_entry) * tot_entries;
2937 
2938 	if (num)
2939 		*num = tot_entries;
2940 
2941 	if (size)
2942 		*size = tot_size;
2943 }
2944 
2945 int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
2946 			struct cudbg_buffer *dbg_buff,
2947 			struct cudbg_error *cudbg_err)
2948 {
2949 	u32 num_queues = 0, tot_entries = 0, size = 0;
2950 	struct adapter *padap = pdbg_init->adap;
2951 	struct cudbg_buffer temp_buff = { 0 };
2952 	struct cudbg_qdesc_entry *qdesc_entry;
2953 	struct cudbg_qdesc_info *qdesc_info;
2954 	struct cudbg_ver_hdr *ver_hdr;
2955 	struct sge *s = &padap->sge;
2956 	u32 i, j, cur_off, tot_len;
2957 	u8 *data;
2958 	int rc;
2959 
2960 	cudbg_fill_qdesc_num_and_size(padap, &tot_entries, &size);
2961 	size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE);
2962 	tot_len = size;
2963 	data = kvzalloc(size, GFP_KERNEL);
2964 	if (!data)
2965 		return -ENOMEM;
2966 
2967 	ver_hdr = (struct cudbg_ver_hdr *)data;
2968 	ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
2969 	ver_hdr->revision = CUDBG_QDESC_REV;
2970 	ver_hdr->size = sizeof(struct cudbg_qdesc_info);
2971 	size -= sizeof(*ver_hdr);
2972 
2973 	qdesc_info = (struct cudbg_qdesc_info *)(data +
2974 						 sizeof(*ver_hdr));
2975 	size -= sizeof(*qdesc_info);
2976 	qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data;
2977 
2978 #define QDESC_GET(q, desc, type, label) do { \
2979 	if (size <= 0) { \
2980 		goto label; \
2981 	} \
2982 	if (desc) { \
2983 		cudbg_fill_qdesc_##q(q, type, qdesc_entry); \
2984 		size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \
2985 		num_queues++; \
2986 		qdesc_entry = cudbg_next_qdesc(qdesc_entry); \
2987 	} \
2988 } while (0)
2989 
2990 #define QDESC_GET_TXQ(q, type, label) do { \
2991 	struct sge_txq *txq = (struct sge_txq *)q; \
2992 	QDESC_GET(txq, txq->desc, type, label); \
2993 } while (0)
2994 
2995 #define QDESC_GET_RXQ(q, type, label) do { \
2996 	struct sge_rspq *rxq = (struct sge_rspq *)q; \
2997 	QDESC_GET(rxq, rxq->desc, type, label); \
2998 } while (0)
2999 
3000 #define QDESC_GET_FLQ(q, type, label) do { \
3001 	struct sge_fl *flq = (struct sge_fl *)q; \
3002 	QDESC_GET(flq, flq->desc, type, label); \
3003 } while (0)
3004 
3005 	/* NIC TXQ */
3006 	for (i = 0; i < s->ethqsets; i++)
3007 		QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out);
3008 
3009 	/* NIC RXQ */
3010 	for (i = 0; i < s->ethqsets; i++)
3011 		QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out);
3012 
3013 	/* NIC FLQ */
3014 	for (i = 0; i < s->ethqsets; i++)
3015 		QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out);
3016 
3017 	/* NIC CTRLQ */
3018 	for (i = 0; i < padap->params.nports; i++)
3019 		QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out);
3020 
3021 	/* FW_EVTQ */
3022 	QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out);
3023 
3024 	/* INTRQ */
3025 	QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out);
3026 
3027 	/* PTP_TXQ */
3028 	QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out);
3029 
3030 	/* ULD Queues */
3031 	mutex_lock(&uld_mutex);
3032 
3033 	if (s->uld_txq_info) {
3034 		struct sge_uld_txq_info *utxq;
3035 
3036 		/* ULD TXQ */
3037 		for (j = 0; j < CXGB4_TX_MAX; j++) {
3038 			if (!s->uld_txq_info[j])
3039 				continue;
3040 
3041 			utxq = s->uld_txq_info[j];
3042 			for (i = 0; i < utxq->ntxq; i++)
3043 				QDESC_GET_TXQ(&utxq->uldtxq[i].q,
3044 					      cudbg_uld_txq_to_qtype(j),
3045 					      out_unlock);
3046 		}
3047 	}
3048 
3049 	if (s->uld_rxq_info) {
3050 		struct sge_uld_rxq_info *urxq;
3051 		u32 base;
3052 
3053 		/* ULD RXQ */
3054 		for (j = 0; j < CXGB4_ULD_MAX; j++) {
3055 			if (!s->uld_rxq_info[j])
3056 				continue;
3057 
3058 			urxq = s->uld_rxq_info[j];
3059 			for (i = 0; i < urxq->nrxq; i++)
3060 				QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
3061 					      cudbg_uld_rxq_to_qtype(j),
3062 					      out_unlock);
3063 		}
3064 
3065 		/* ULD FLQ */
3066 		for (j = 0; j < CXGB4_ULD_MAX; j++) {
3067 			if (!s->uld_rxq_info[j])
3068 				continue;
3069 
3070 			urxq = s->uld_rxq_info[j];
3071 			for (i = 0; i < urxq->nrxq; i++)
3072 				QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
3073 					      cudbg_uld_flq_to_qtype(j),
3074 					      out_unlock);
3075 		}
3076 
3077 		/* ULD CIQ */
3078 		for (j = 0; j < CXGB4_ULD_MAX; j++) {
3079 			if (!s->uld_rxq_info[j])
3080 				continue;
3081 
3082 			urxq = s->uld_rxq_info[j];
3083 			base = urxq->nrxq;
3084 			for (i = 0; i < urxq->nciq; i++)
3085 				QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
3086 					      cudbg_uld_ciq_to_qtype(j),
3087 					      out_unlock);
3088 		}
3089 	}
3090 
3091 out_unlock:
3092 	mutex_unlock(&uld_mutex);
3093 
3094 out:
3095 	qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
3096 	qdesc_info->num_queues = num_queues;
3097 	cur_off = 0;
3098 	while (tot_len) {
3099 		u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE);
3100 
3101 		rc = cudbg_get_buff(pdbg_init, dbg_buff, chunk_size,
3102 				    &temp_buff);
3103 		if (rc) {
3104 			cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
3105 			goto out_free;
3106 		}
3107 
3108 		memcpy(temp_buff.data, data + cur_off, chunk_size);
3109 		tot_len -= chunk_size;
3110 		cur_off += chunk_size;
3111 		rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
3112 						  dbg_buff);
3113 		if (rc) {
3114 			cudbg_put_buff(pdbg_init, &temp_buff);
3115 			cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
3116 			goto out_free;
3117 		}
3118 	}
3119 
3120 out_free:
3121 	if (data)
3122 		kvfree(data);
3123 
3124 #undef QDESC_GET_FLQ
3125 #undef QDESC_GET_RXQ
3126 #undef QDESC_GET_TXQ
3127 #undef QDESC_GET
3128 
3129 	return rc;
3130 }
3131