1 /*
2  *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify it
5  *  under the terms and conditions of the GNU General Public License,
6  *  version 2, as published by the Free Software Foundation.
7  *
8  *  This program is distributed in the hope it will be useful, but WITHOUT
9  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  *  more details.
12  *
13  *  The full GNU General Public License is included in this distribution in
14  *  the file called "COPYING".
15  *
16  */
17 
18 #include "t4_regs.h"
19 #include "cxgb4.h"
20 #include "cudbg_if.h"
21 #include "cudbg_lib_common.h"
22 #include "cudbg_lib.h"
23 #include "cudbg_entity.h"
24 
25 static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
26 					 struct cudbg_buffer *dbg_buff)
27 {
28 	cudbg_update_buff(pin_buff, dbg_buff);
29 	cudbg_put_buff(pin_buff, dbg_buff);
30 }
31 
32 static int is_fw_attached(struct cudbg_init *pdbg_init)
33 {
34 	struct adapter *padap = pdbg_init->adap;
35 
36 	if (!(padap->flags & FW_OK) || padap->use_bd)
37 		return 0;
38 
39 	return 1;
40 }
41 
42 /* This function will add additional padding bytes into debug_buffer to make it
43  * 4 byte aligned.
44  */
45 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
46 			      struct cudbg_entity_hdr *entity_hdr)
47 {
48 	u8 zero_buf[4] = {0};
49 	u8 padding, remain;
50 
51 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
52 	padding = 4 - remain;
53 	if (remain) {
54 		memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
55 		       padding);
56 		dbg_buff->offset += padding;
57 		entity_hdr->num_pad = padding;
58 	}
59 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
60 }
61 
62 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
63 {
64 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
65 
66 	return (struct cudbg_entity_hdr *)
67 	       ((char *)outbuf + cudbg_hdr->hdr_len +
68 		(sizeof(struct cudbg_entity_hdr) * (i - 1)));
69 }
70 
71 static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
72 			      void *dest)
73 {
74 	int vaddr, rc;
75 
76 	vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE);
77 	if (vaddr < 0)
78 		return vaddr;
79 
80 	rc = pci_read_vpd(padap->pdev, vaddr, len, dest);
81 	if (rc < 0)
82 		return rc;
83 
84 	return 0;
85 }
86 
87 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
88 			   struct cudbg_buffer *dbg_buff,
89 			   struct cudbg_error *cudbg_err)
90 {
91 	struct adapter *padap = pdbg_init->adap;
92 	struct cudbg_buffer temp_buff = { 0 };
93 	u32 buf_size = 0;
94 	int rc = 0;
95 
96 	if (is_t4(padap->params.chip))
97 		buf_size = T4_REGMAP_SIZE;
98 	else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
99 		buf_size = T5_REGMAP_SIZE;
100 
101 	rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
102 	if (rc)
103 		return rc;
104 	t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
105 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
106 	return rc;
107 }
108 
109 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
110 			    struct cudbg_buffer *dbg_buff,
111 			    struct cudbg_error *cudbg_err)
112 {
113 	struct adapter *padap = pdbg_init->adap;
114 	struct cudbg_buffer temp_buff = { 0 };
115 	struct devlog_params *dparams;
116 	int rc = 0;
117 
118 	rc = t4_init_devlog_params(padap);
119 	if (rc < 0) {
120 		cudbg_err->sys_err = rc;
121 		return rc;
122 	}
123 
124 	dparams = &padap->params.devlog;
125 	rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
126 	if (rc)
127 		return rc;
128 
129 	/* Collect FW devlog */
130 	if (dparams->start != 0) {
131 		spin_lock(&padap->win0_lock);
132 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
133 				  dparams->memtype, dparams->start,
134 				  dparams->size,
135 				  (__be32 *)(char *)temp_buff.data,
136 				  1);
137 		spin_unlock(&padap->win0_lock);
138 		if (rc) {
139 			cudbg_err->sys_err = rc;
140 			cudbg_put_buff(&temp_buff, dbg_buff);
141 			return rc;
142 		}
143 	}
144 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
145 	return rc;
146 }
147 
148 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
149 			 struct cudbg_buffer *dbg_buff,
150 			 struct cudbg_error *cudbg_err)
151 {
152 	struct adapter *padap = pdbg_init->adap;
153 	struct cudbg_buffer temp_buff = { 0 };
154 	int size, rc;
155 	u32 cfg = 0;
156 
157 	if (is_t6(padap->params.chip)) {
158 		size = padap->params.cim_la_size / 10 + 1;
159 		size *= 11 * sizeof(u32);
160 	} else {
161 		size = padap->params.cim_la_size / 8;
162 		size *= 8 * sizeof(u32);
163 	}
164 
165 	size += sizeof(cfg);
166 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
167 	if (rc)
168 		return rc;
169 
170 	rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
171 	if (rc) {
172 		cudbg_err->sys_err = rc;
173 		cudbg_put_buff(&temp_buff, dbg_buff);
174 		return rc;
175 	}
176 
177 	memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
178 	rc = t4_cim_read_la(padap,
179 			    (u32 *)((char *)temp_buff.data + sizeof(cfg)),
180 			    NULL);
181 	if (rc < 0) {
182 		cudbg_err->sys_err = rc;
183 		cudbg_put_buff(&temp_buff, dbg_buff);
184 		return rc;
185 	}
186 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
187 	return rc;
188 }
189 
190 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
191 			    struct cudbg_buffer *dbg_buff,
192 			    struct cudbg_error *cudbg_err)
193 {
194 	struct adapter *padap = pdbg_init->adap;
195 	struct cudbg_buffer temp_buff = { 0 };
196 	int size, rc;
197 
198 	size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
199 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
200 	if (rc)
201 		return rc;
202 
203 	t4_cim_read_ma_la(padap,
204 			  (u32 *)temp_buff.data,
205 			  (u32 *)((char *)temp_buff.data +
206 				  5 * CIM_MALA_SIZE));
207 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
208 	return rc;
209 }
210 
211 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
212 			   struct cudbg_buffer *dbg_buff,
213 			   struct cudbg_error *cudbg_err)
214 {
215 	struct adapter *padap = pdbg_init->adap;
216 	struct cudbg_buffer temp_buff = { 0 };
217 	struct cudbg_cim_qcfg *cim_qcfg_data;
218 	int rc;
219 
220 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
221 			    &temp_buff);
222 	if (rc)
223 		return rc;
224 
225 	cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
226 	cim_qcfg_data->chip = padap->params.chip;
227 	rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
228 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
229 	if (rc) {
230 		cudbg_err->sys_err = rc;
231 		cudbg_put_buff(&temp_buff, dbg_buff);
232 		return rc;
233 	}
234 
235 	rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
236 			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
237 			 cim_qcfg_data->obq_wr);
238 	if (rc) {
239 		cudbg_err->sys_err = rc;
240 		cudbg_put_buff(&temp_buff, dbg_buff);
241 		return rc;
242 	}
243 
244 	t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
245 			 cim_qcfg_data->thres);
246 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
247 	return rc;
248 }
249 
250 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
251 			      struct cudbg_buffer *dbg_buff,
252 			      struct cudbg_error *cudbg_err, int qid)
253 {
254 	struct adapter *padap = pdbg_init->adap;
255 	struct cudbg_buffer temp_buff = { 0 };
256 	int no_of_read_words, rc = 0;
257 	u32 qsize;
258 
259 	/* collect CIM IBQ */
260 	qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
261 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
262 	if (rc)
263 		return rc;
264 
265 	/* t4_read_cim_ibq will return no. of read words or error */
266 	no_of_read_words = t4_read_cim_ibq(padap, qid,
267 					   (u32 *)temp_buff.data, qsize);
268 	/* no_of_read_words is less than or equal to 0 means error */
269 	if (no_of_read_words <= 0) {
270 		if (!no_of_read_words)
271 			rc = CUDBG_SYSTEM_ERROR;
272 		else
273 			rc = no_of_read_words;
274 		cudbg_err->sys_err = rc;
275 		cudbg_put_buff(&temp_buff, dbg_buff);
276 		return rc;
277 	}
278 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
279 	return rc;
280 }
281 
282 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
283 			      struct cudbg_buffer *dbg_buff,
284 			      struct cudbg_error *cudbg_err)
285 {
286 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
287 }
288 
289 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
290 			      struct cudbg_buffer *dbg_buff,
291 			      struct cudbg_error *cudbg_err)
292 {
293 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
294 }
295 
296 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
297 			      struct cudbg_buffer *dbg_buff,
298 			      struct cudbg_error *cudbg_err)
299 {
300 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
301 }
302 
303 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
304 			       struct cudbg_buffer *dbg_buff,
305 			       struct cudbg_error *cudbg_err)
306 {
307 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
308 }
309 
310 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
311 			       struct cudbg_buffer *dbg_buff,
312 			       struct cudbg_error *cudbg_err)
313 {
314 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
315 }
316 
317 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
318 			       struct cudbg_buffer *dbg_buff,
319 			       struct cudbg_error *cudbg_err)
320 {
321 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
322 }
323 
324 u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
325 {
326 	u32 value;
327 
328 	t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
329 		     QUENUMSELECT_V(qid));
330 	value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
331 	value = CIMQSIZE_G(value) * 64; /* size in number of words */
332 	return value * sizeof(u32);
333 }
334 
335 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
336 			      struct cudbg_buffer *dbg_buff,
337 			      struct cudbg_error *cudbg_err, int qid)
338 {
339 	struct adapter *padap = pdbg_init->adap;
340 	struct cudbg_buffer temp_buff = { 0 };
341 	int no_of_read_words, rc = 0;
342 	u32 qsize;
343 
344 	/* collect CIM OBQ */
345 	qsize =  cudbg_cim_obq_size(padap, qid);
346 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
347 	if (rc)
348 		return rc;
349 
350 	/* t4_read_cim_obq will return no. of read words or error */
351 	no_of_read_words = t4_read_cim_obq(padap, qid,
352 					   (u32 *)temp_buff.data, qsize);
353 	/* no_of_read_words is less than or equal to 0 means error */
354 	if (no_of_read_words <= 0) {
355 		if (!no_of_read_words)
356 			rc = CUDBG_SYSTEM_ERROR;
357 		else
358 			rc = no_of_read_words;
359 		cudbg_err->sys_err = rc;
360 		cudbg_put_buff(&temp_buff, dbg_buff);
361 		return rc;
362 	}
363 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
364 	return rc;
365 }
366 
367 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
368 			       struct cudbg_buffer *dbg_buff,
369 			       struct cudbg_error *cudbg_err)
370 {
371 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
372 }
373 
374 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
375 			       struct cudbg_buffer *dbg_buff,
376 			       struct cudbg_error *cudbg_err)
377 {
378 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
379 }
380 
381 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
382 			       struct cudbg_buffer *dbg_buff,
383 			       struct cudbg_error *cudbg_err)
384 {
385 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
386 }
387 
388 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
389 			       struct cudbg_buffer *dbg_buff,
390 			       struct cudbg_error *cudbg_err)
391 {
392 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
393 }
394 
395 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
396 			      struct cudbg_buffer *dbg_buff,
397 			      struct cudbg_error *cudbg_err)
398 {
399 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
400 }
401 
402 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
403 			       struct cudbg_buffer *dbg_buff,
404 			       struct cudbg_error *cudbg_err)
405 {
406 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
407 }
408 
409 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
410 				struct cudbg_buffer *dbg_buff,
411 				struct cudbg_error *cudbg_err)
412 {
413 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
414 }
415 
416 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
417 				struct cudbg_buffer *dbg_buff,
418 				struct cudbg_error *cudbg_err)
419 {
420 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
421 }
422 
423 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
424 			     struct cudbg_buffer *dbg_buff, u8 mem_type,
425 			     unsigned long tot_len,
426 			     struct cudbg_error *cudbg_err)
427 {
428 	unsigned long bytes, bytes_left, bytes_read = 0;
429 	struct adapter *padap = pdbg_init->adap;
430 	struct cudbg_buffer temp_buff = { 0 };
431 	int rc = 0;
432 
433 	bytes_left = tot_len;
434 	while (bytes_left > 0) {
435 		bytes = min_t(unsigned long, bytes_left,
436 			      (unsigned long)CUDBG_CHUNK_SIZE);
437 		rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
438 		if (rc)
439 			return rc;
440 		spin_lock(&padap->win0_lock);
441 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
442 				  bytes_read, bytes,
443 				  (__be32 *)temp_buff.data,
444 				  1);
445 		spin_unlock(&padap->win0_lock);
446 		if (rc) {
447 			cudbg_err->sys_err = rc;
448 			cudbg_put_buff(&temp_buff, dbg_buff);
449 			return rc;
450 		}
451 		bytes_left -= bytes;
452 		bytes_read += bytes;
453 		cudbg_write_and_release_buff(&temp_buff, dbg_buff);
454 	}
455 	return rc;
456 }
457 
458 static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
459 				   struct card_mem *mem_info)
460 {
461 	struct adapter *padap = pdbg_init->adap;
462 	u32 value;
463 
464 	value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
465 	value = EDRAM0_SIZE_G(value);
466 	mem_info->size_edc0 = (u16)value;
467 
468 	value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
469 	value = EDRAM1_SIZE_G(value);
470 	mem_info->size_edc1 = (u16)value;
471 
472 	value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
473 	if (value & EDRAM0_ENABLE_F)
474 		mem_info->mem_flag |= (1 << EDC0_FLAG);
475 	if (value & EDRAM1_ENABLE_F)
476 		mem_info->mem_flag |= (1 << EDC1_FLAG);
477 }
478 
479 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
480 			     struct cudbg_error *cudbg_err)
481 {
482 	struct adapter *padap = pdbg_init->adap;
483 	int rc;
484 
485 	if (is_fw_attached(pdbg_init)) {
486 		/* Flush uP dcache before reading edcX/mcX  */
487 		rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
488 		if (rc)
489 			cudbg_err->sys_warn = rc;
490 	}
491 }
492 
493 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
494 				    struct cudbg_buffer *dbg_buff,
495 				    struct cudbg_error *cudbg_err,
496 				    u8 mem_type)
497 {
498 	struct card_mem mem_info = {0};
499 	unsigned long flag, size;
500 	int rc;
501 
502 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
503 	cudbg_collect_mem_info(pdbg_init, &mem_info);
504 	switch (mem_type) {
505 	case MEM_EDC0:
506 		flag = (1 << EDC0_FLAG);
507 		size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
508 		break;
509 	case MEM_EDC1:
510 		flag = (1 << EDC1_FLAG);
511 		size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
512 		break;
513 	default:
514 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
515 		goto err;
516 	}
517 
518 	if (mem_info.mem_flag & flag) {
519 		rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
520 				       size, cudbg_err);
521 		if (rc)
522 			goto err;
523 	} else {
524 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
525 		goto err;
526 	}
527 err:
528 	return rc;
529 }
530 
531 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
532 			       struct cudbg_buffer *dbg_buff,
533 			       struct cudbg_error *cudbg_err)
534 {
535 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
536 					MEM_EDC0);
537 }
538 
539 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
540 			       struct cudbg_buffer *dbg_buff,
541 			       struct cudbg_error *cudbg_err)
542 {
543 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
544 					MEM_EDC1);
545 }
546 
547 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
548 		      struct cudbg_buffer *dbg_buff,
549 		      struct cudbg_error *cudbg_err)
550 {
551 	struct adapter *padap = pdbg_init->adap;
552 	struct cudbg_buffer temp_buff = { 0 };
553 	int rc;
554 
555 	rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
556 	if (rc)
557 		return rc;
558 
559 	rc = t4_read_rss(padap, (u16 *)temp_buff.data);
560 	if (rc) {
561 		cudbg_err->sys_err = rc;
562 		cudbg_put_buff(&temp_buff, dbg_buff);
563 		return rc;
564 	}
565 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
566 	return rc;
567 }
568 
569 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
570 				struct cudbg_buffer *dbg_buff,
571 				struct cudbg_error *cudbg_err)
572 {
573 	struct adapter *padap = pdbg_init->adap;
574 	struct cudbg_buffer temp_buff = { 0 };
575 	struct cudbg_rss_vf_conf *vfconf;
576 	int vf, rc, vf_count;
577 
578 	vf_count = padap->params.arch.vfcount;
579 	rc = cudbg_get_buff(dbg_buff,
580 			    vf_count * sizeof(struct cudbg_rss_vf_conf),
581 			    &temp_buff);
582 	if (rc)
583 		return rc;
584 
585 	vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
586 	for (vf = 0; vf < vf_count; vf++)
587 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
588 				      &vfconf[vf].rss_vf_vfh, true);
589 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
590 	return rc;
591 }
592 
593 int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
594 			   struct cudbg_buffer *dbg_buff,
595 			   struct cudbg_error *cudbg_err)
596 {
597 	struct adapter *padap = pdbg_init->adap;
598 	struct cudbg_buffer temp_buff = { 0 };
599 	int rc;
600 
601 	rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
602 	if (rc)
603 		return rc;
604 
605 	t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
606 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
607 	return rc;
608 }
609 
610 int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
611 			   struct cudbg_buffer *dbg_buff,
612 			   struct cudbg_error *cudbg_err)
613 {
614 	struct adapter *padap = pdbg_init->adap;
615 	struct cudbg_buffer temp_buff = { 0 };
616 	struct cudbg_pm_stats *pm_stats_buff;
617 	int rc;
618 
619 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
620 			    &temp_buff);
621 	if (rc)
622 		return rc;
623 
624 	pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
625 	t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
626 	t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
627 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
628 	return rc;
629 }
630 
631 int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
632 			   struct cudbg_buffer *dbg_buff,
633 			   struct cudbg_error *cudbg_err)
634 {
635 	struct adapter *padap = pdbg_init->adap;
636 	struct cudbg_buffer temp_buff = { 0 };
637 	struct cudbg_hw_sched *hw_sched_buff;
638 	int i, rc = 0;
639 
640 	if (!padap->params.vpd.cclk)
641 		return CUDBG_STATUS_CCLK_NOT_DEFINED;
642 
643 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
644 			    &temp_buff);
645 	hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
646 	hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
647 	hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
648 	t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
649 	for (i = 0; i < NTX_SCHED; ++i)
650 		t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
651 				&hw_sched_buff->ipg[i], true);
652 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
653 	return rc;
654 }
655 
656 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
657 			      struct cudbg_buffer *dbg_buff,
658 			      struct cudbg_error *cudbg_err)
659 {
660 	struct adapter *padap = pdbg_init->adap;
661 	struct cudbg_buffer temp_buff = { 0 };
662 	struct ireg_buf *ch_tp_pio;
663 	int i, rc, n = 0;
664 	u32 size;
665 
666 	if (is_t5(padap->params.chip))
667 		n = sizeof(t5_tp_pio_array) +
668 		    sizeof(t5_tp_tm_pio_array) +
669 		    sizeof(t5_tp_mib_index_array);
670 	else
671 		n = sizeof(t6_tp_pio_array) +
672 		    sizeof(t6_tp_tm_pio_array) +
673 		    sizeof(t6_tp_mib_index_array);
674 
675 	n = n / (IREG_NUM_ELEM * sizeof(u32));
676 	size = sizeof(struct ireg_buf) * n;
677 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
678 	if (rc)
679 		return rc;
680 
681 	ch_tp_pio = (struct ireg_buf *)temp_buff.data;
682 
683 	/* TP_PIO */
684 	if (is_t5(padap->params.chip))
685 		n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
686 	else if (is_t6(padap->params.chip))
687 		n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
688 
689 	for (i = 0; i < n; i++) {
690 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
691 		u32 *buff = ch_tp_pio->outbuf;
692 
693 		if (is_t5(padap->params.chip)) {
694 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
695 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
696 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
697 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
698 		} else if (is_t6(padap->params.chip)) {
699 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
700 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
701 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
702 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
703 		}
704 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
705 			       tp_pio->ireg_local_offset, true);
706 		ch_tp_pio++;
707 	}
708 
709 	/* TP_TM_PIO */
710 	if (is_t5(padap->params.chip))
711 		n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
712 	else if (is_t6(padap->params.chip))
713 		n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
714 
715 	for (i = 0; i < n; i++) {
716 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
717 		u32 *buff = ch_tp_pio->outbuf;
718 
719 		if (is_t5(padap->params.chip)) {
720 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
721 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
722 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
723 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
724 		} else if (is_t6(padap->params.chip)) {
725 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
726 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
727 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
728 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
729 		}
730 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
731 				  tp_pio->ireg_local_offset, true);
732 		ch_tp_pio++;
733 	}
734 
735 	/* TP_MIB_INDEX */
736 	if (is_t5(padap->params.chip))
737 		n = sizeof(t5_tp_mib_index_array) /
738 		    (IREG_NUM_ELEM * sizeof(u32));
739 	else if (is_t6(padap->params.chip))
740 		n = sizeof(t6_tp_mib_index_array) /
741 		    (IREG_NUM_ELEM * sizeof(u32));
742 
743 	for (i = 0; i < n ; i++) {
744 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
745 		u32 *buff = ch_tp_pio->outbuf;
746 
747 		if (is_t5(padap->params.chip)) {
748 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
749 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
750 			tp_pio->ireg_local_offset =
751 				t5_tp_mib_index_array[i][2];
752 			tp_pio->ireg_offset_range =
753 				t5_tp_mib_index_array[i][3];
754 		} else if (is_t6(padap->params.chip)) {
755 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
756 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
757 			tp_pio->ireg_local_offset =
758 				t6_tp_mib_index_array[i][2];
759 			tp_pio->ireg_offset_range =
760 				t6_tp_mib_index_array[i][3];
761 		}
762 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
763 			       tp_pio->ireg_local_offset, true);
764 		ch_tp_pio++;
765 	}
766 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
767 	return rc;
768 }
769 
770 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
771 			       struct cudbg_buffer *dbg_buff,
772 			       struct cudbg_error *cudbg_err)
773 {
774 	struct adapter *padap = pdbg_init->adap;
775 	struct cudbg_buffer temp_buff = { 0 };
776 	struct ireg_buf *ch_sge_dbg;
777 	int i, rc;
778 
779 	rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
780 	if (rc)
781 		return rc;
782 
783 	ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
784 	for (i = 0; i < 2; i++) {
785 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
786 		u32 *buff = ch_sge_dbg->outbuf;
787 
788 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
789 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
790 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
791 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
792 		t4_read_indirect(padap,
793 				 sge_pio->ireg_addr,
794 				 sge_pio->ireg_data,
795 				 buff,
796 				 sge_pio->ireg_offset_range,
797 				 sge_pio->ireg_local_offset);
798 		ch_sge_dbg++;
799 	}
800 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
801 	return rc;
802 }
803 
804 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
805 			   struct cudbg_buffer *dbg_buff,
806 			   struct cudbg_error *cudbg_err)
807 {
808 	struct adapter *padap = pdbg_init->adap;
809 	struct cudbg_buffer temp_buff = { 0 };
810 	struct cudbg_ulprx_la *ulprx_la_buff;
811 	int rc;
812 
813 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
814 			    &temp_buff);
815 	if (rc)
816 		return rc;
817 
818 	ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
819 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
820 	ulprx_la_buff->size = ULPRX_LA_SIZE;
821 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
822 	return rc;
823 }
824 
825 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
826 			struct cudbg_buffer *dbg_buff,
827 			struct cudbg_error *cudbg_err)
828 {
829 	struct adapter *padap = pdbg_init->adap;
830 	struct cudbg_buffer temp_buff = { 0 };
831 	struct cudbg_tp_la *tp_la_buff;
832 	int size, rc;
833 
834 	size = sizeof(struct cudbg_tp_la) + TPLA_SIZE *  sizeof(u64);
835 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
836 	if (rc)
837 		return rc;
838 
839 	tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
840 	tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
841 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
842 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
843 	return rc;
844 }
845 
846 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
847 			     struct cudbg_buffer *dbg_buff,
848 			     struct cudbg_error *cudbg_err)
849 {
850 	struct cudbg_cim_pif_la *cim_pif_la_buff;
851 	struct adapter *padap = pdbg_init->adap;
852 	struct cudbg_buffer temp_buff = { 0 };
853 	int size, rc;
854 
855 	size = sizeof(struct cudbg_cim_pif_la) +
856 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
857 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
858 	if (rc)
859 		return rc;
860 
861 	cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
862 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
863 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
864 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
865 			   NULL, NULL);
866 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
867 	return rc;
868 }
869 
870 int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
871 			   struct cudbg_buffer *dbg_buff,
872 			   struct cudbg_error *cudbg_err)
873 {
874 	struct adapter *padap = pdbg_init->adap;
875 	struct cudbg_buffer temp_buff = { 0 };
876 	struct cudbg_clk_info *clk_info_buff;
877 	u64 tp_tick_us;
878 	int rc;
879 
880 	if (!padap->params.vpd.cclk)
881 		return CUDBG_STATUS_CCLK_NOT_DEFINED;
882 
883 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
884 			    &temp_buff);
885 	if (rc)
886 		return rc;
887 
888 	clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
889 	clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */
890 	clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
891 	clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
892 	clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
893 	tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
894 
895 	clk_info_buff->dack_timer =
896 		(clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
897 		t4_read_reg(padap, TP_DACK_TIMER_A);
898 	clk_info_buff->retransmit_min =
899 		tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
900 	clk_info_buff->retransmit_max =
901 		tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
902 	clk_info_buff->persist_timer_min =
903 		tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
904 	clk_info_buff->persist_timer_max =
905 		tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
906 	clk_info_buff->keepalive_idle_timer =
907 		tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
908 	clk_info_buff->keepalive_interval =
909 		tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
910 	clk_info_buff->initial_srtt =
911 		tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
912 	clk_info_buff->finwait2_timer =
913 		tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
914 
915 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
916 	return rc;
917 }
918 
919 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
920 				struct cudbg_buffer *dbg_buff,
921 				struct cudbg_error *cudbg_err)
922 {
923 	struct adapter *padap = pdbg_init->adap;
924 	struct cudbg_buffer temp_buff = { 0 };
925 	struct ireg_buf *ch_pcie;
926 	int i, rc, n;
927 	u32 size;
928 
929 	n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
930 	size = sizeof(struct ireg_buf) * n * 2;
931 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
932 	if (rc)
933 		return rc;
934 
935 	ch_pcie = (struct ireg_buf *)temp_buff.data;
936 	/* PCIE_PDBG */
937 	for (i = 0; i < n; i++) {
938 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
939 		u32 *buff = ch_pcie->outbuf;
940 
941 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
942 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
943 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
944 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
945 		t4_read_indirect(padap,
946 				 pcie_pio->ireg_addr,
947 				 pcie_pio->ireg_data,
948 				 buff,
949 				 pcie_pio->ireg_offset_range,
950 				 pcie_pio->ireg_local_offset);
951 		ch_pcie++;
952 	}
953 
954 	/* PCIE_CDBG */
955 	n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
956 	for (i = 0; i < n; i++) {
957 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
958 		u32 *buff = ch_pcie->outbuf;
959 
960 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
961 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
962 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
963 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
964 		t4_read_indirect(padap,
965 				 pcie_pio->ireg_addr,
966 				 pcie_pio->ireg_data,
967 				 buff,
968 				 pcie_pio->ireg_offset_range,
969 				 pcie_pio->ireg_local_offset);
970 		ch_pcie++;
971 	}
972 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
973 	return rc;
974 }
975 
976 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
977 			      struct cudbg_buffer *dbg_buff,
978 			      struct cudbg_error *cudbg_err)
979 {
980 	struct adapter *padap = pdbg_init->adap;
981 	struct cudbg_buffer temp_buff = { 0 };
982 	struct ireg_buf *ch_pm;
983 	int i, rc, n;
984 	u32 size;
985 
986 	n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
987 	size = sizeof(struct ireg_buf) * n * 2;
988 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
989 	if (rc)
990 		return rc;
991 
992 	ch_pm = (struct ireg_buf *)temp_buff.data;
993 	/* PM_RX */
994 	for (i = 0; i < n; i++) {
995 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
996 		u32 *buff = ch_pm->outbuf;
997 
998 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
999 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
1000 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
1001 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
1002 		t4_read_indirect(padap,
1003 				 pm_pio->ireg_addr,
1004 				 pm_pio->ireg_data,
1005 				 buff,
1006 				 pm_pio->ireg_offset_range,
1007 				 pm_pio->ireg_local_offset);
1008 		ch_pm++;
1009 	}
1010 
1011 	/* PM_TX */
1012 	n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
1013 	for (i = 0; i < n; i++) {
1014 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
1015 		u32 *buff = ch_pm->outbuf;
1016 
1017 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
1018 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
1019 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
1020 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
1021 		t4_read_indirect(padap,
1022 				 pm_pio->ireg_addr,
1023 				 pm_pio->ireg_data,
1024 				 buff,
1025 				 pm_pio->ireg_offset_range,
1026 				 pm_pio->ireg_local_offset);
1027 		ch_pm++;
1028 	}
1029 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1030 	return rc;
1031 }
1032 
1033 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
1034 		      struct cudbg_buffer *dbg_buff,
1035 		      struct cudbg_error *cudbg_err)
1036 {
1037 	struct adapter *padap = pdbg_init->adap;
1038 	struct cudbg_tid_info_region_rev1 *tid1;
1039 	struct cudbg_buffer temp_buff = { 0 };
1040 	struct cudbg_tid_info_region *tid;
1041 	u32 para[2], val[2];
1042 	int rc;
1043 
1044 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
1045 			    &temp_buff);
1046 	if (rc)
1047 		return rc;
1048 
1049 	tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
1050 	tid = &tid1->tid;
1051 	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1052 	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
1053 	tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
1054 			     sizeof(struct cudbg_ver_hdr);
1055 
1056 #define FW_PARAM_PFVF_A(param) \
1057 	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
1058 	 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
1059 	 FW_PARAMS_PARAM_Y_V(0) | \
1060 	 FW_PARAMS_PARAM_Z_V(0))
1061 
1062 	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
1063 	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
1064 	rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
1065 	if (rc <  0) {
1066 		cudbg_err->sys_err = rc;
1067 		cudbg_put_buff(&temp_buff, dbg_buff);
1068 		return rc;
1069 	}
1070 	tid->uotid_base = val[0];
1071 	tid->nuotids = val[1] - val[0] + 1;
1072 
1073 	if (is_t5(padap->params.chip)) {
1074 		tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
1075 	} else if (is_t6(padap->params.chip)) {
1076 		tid1->tid_start =
1077 			t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
1078 		tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
1079 
1080 		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
1081 		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
1082 		rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
1083 				     para, val);
1084 		if (rc < 0) {
1085 			cudbg_err->sys_err = rc;
1086 			cudbg_put_buff(&temp_buff, dbg_buff);
1087 			return rc;
1088 		}
1089 		tid->hpftid_base = val[0];
1090 		tid->nhpftids = val[1] - val[0] + 1;
1091 	}
1092 
1093 	tid->ntids = padap->tids.ntids;
1094 	tid->nstids = padap->tids.nstids;
1095 	tid->stid_base = padap->tids.stid_base;
1096 	tid->hash_base = padap->tids.hash_base;
1097 
1098 	tid->natids = padap->tids.natids;
1099 	tid->nftids = padap->tids.nftids;
1100 	tid->ftid_base = padap->tids.ftid_base;
1101 	tid->aftid_base = padap->tids.aftid_base;
1102 	tid->aftid_end = padap->tids.aftid_end;
1103 
1104 	tid->sftid_base = padap->tids.sftid_base;
1105 	tid->nsftids = padap->tids.nsftids;
1106 
1107 	tid->flags = padap->flags;
1108 	tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
1109 	tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
1110 	tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
1111 
1112 #undef FW_PARAM_PFVF_A
1113 
1114 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1115 	return rc;
1116 }
1117 
1118 int cudbg_dump_context_size(struct adapter *padap)
1119 {
1120 	u32 value, size;
1121 	u8 flq;
1122 
1123 	value = t4_read_reg(padap, SGE_FLM_CFG_A);
1124 
1125 	/* Get number of data freelist queues */
1126 	flq = HDRSTARTFLQ_G(value);
1127 	size = CUDBG_MAX_FL_QIDS >> flq;
1128 
1129 	/* Add extra space for congestion manager contexts.
1130 	 * The number of CONM contexts are same as number of freelist
1131 	 * queues.
1132 	 */
1133 	size += size;
1134 	return size * sizeof(struct cudbg_ch_cntxt);
1135 }
1136 
1137 static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
1138 				enum ctxt_type ctype, u32 *data)
1139 {
1140 	struct adapter *padap = pdbg_init->adap;
1141 	int rc = -1;
1142 
1143 	/* Under heavy traffic, the SGE Queue contexts registers will be
1144 	 * frequently accessed by firmware.
1145 	 *
1146 	 * To avoid conflicts with firmware, always ask firmware to fetch
1147 	 * the SGE Queue contexts via mailbox. On failure, fallback to
1148 	 * accessing hardware registers directly.
1149 	 */
1150 	if (is_fw_attached(pdbg_init))
1151 		rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data);
1152 	if (rc)
1153 		t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
1154 }
1155 
1156 int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
1157 			       struct cudbg_buffer *dbg_buff,
1158 			       struct cudbg_error *cudbg_err)
1159 {
1160 	struct adapter *padap = pdbg_init->adap;
1161 	struct cudbg_buffer temp_buff = { 0 };
1162 	struct cudbg_ch_cntxt *buff;
1163 	u32 size, i = 0;
1164 	int rc;
1165 
1166 	rc = cudbg_dump_context_size(padap);
1167 	if (rc <= 0)
1168 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1169 
1170 	size = rc;
1171 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1172 	if (rc)
1173 		return rc;
1174 
1175 	buff = (struct cudbg_ch_cntxt *)temp_buff.data;
1176 	while (size > 0) {
1177 		buff->cntxt_type = CTXT_FLM;
1178 		buff->cntxt_id = i;
1179 		cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data);
1180 		buff++;
1181 		size -= sizeof(struct cudbg_ch_cntxt);
1182 
1183 		buff->cntxt_type = CTXT_CNM;
1184 		buff->cntxt_id = i;
1185 		cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data);
1186 		buff++;
1187 		size -= sizeof(struct cudbg_ch_cntxt);
1188 
1189 		i++;
1190 	}
1191 
1192 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1193 	return rc;
1194 }
1195 
1196 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
1197 {
1198 	*mask = x | y;
1199 	y = (__force u64)cpu_to_be64(y);
1200 	memcpy(addr, (char *)&y + 2, ETH_ALEN);
1201 }
1202 
1203 static void cudbg_mps_rpl_backdoor(struct adapter *padap,
1204 				   struct fw_ldst_mps_rplc *mps_rplc)
1205 {
1206 	if (is_t5(padap->params.chip)) {
1207 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1208 							  MPS_VF_RPLCT_MAP3_A));
1209 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1210 							  MPS_VF_RPLCT_MAP2_A));
1211 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1212 							  MPS_VF_RPLCT_MAP1_A));
1213 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1214 							  MPS_VF_RPLCT_MAP0_A));
1215 	} else {
1216 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1217 							  MPS_VF_RPLCT_MAP7_A));
1218 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1219 							  MPS_VF_RPLCT_MAP6_A));
1220 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1221 							  MPS_VF_RPLCT_MAP5_A));
1222 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1223 							  MPS_VF_RPLCT_MAP4_A));
1224 	}
1225 	mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
1226 	mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
1227 	mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
1228 	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
1229 }
1230 
1231 static int cudbg_collect_tcam_index(struct adapter *padap,
1232 				    struct cudbg_mps_tcam *tcam, u32 idx)
1233 {
1234 	u64 tcamy, tcamx, val;
1235 	u32 ctl, data2;
1236 	int rc = 0;
1237 
1238 	if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
1239 		/* CtlReqID   - 1: use Host Driver Requester ID
1240 		 * CtlCmdType - 0: Read, 1: Write
1241 		 * CtlTcamSel - 0: TCAM0, 1: TCAM1
1242 		 * CtlXYBitSel- 0: Y bit, 1: X bit
1243 		 */
1244 
1245 		/* Read tcamy */
1246 		ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
1247 		if (idx < 256)
1248 			ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
1249 		else
1250 			ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
1251 
1252 		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1253 		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1254 		tcamy = DMACH_G(val) << 32;
1255 		tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1256 		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1257 		tcam->lookup_type = DATALKPTYPE_G(data2);
1258 
1259 		/* 0 - Outer header, 1 - Inner header
1260 		 * [71:48] bit locations are overloaded for
1261 		 * outer vs. inner lookup types.
1262 		 */
1263 		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1264 			/* Inner header VNI */
1265 			tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1266 			tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
1267 			tcam->dip_hit = data2 & DATADIPHIT_F;
1268 		} else {
1269 			tcam->vlan_vld = data2 & DATAVIDH2_F;
1270 			tcam->ivlan = VIDL_G(val);
1271 		}
1272 
1273 		tcam->port_num = DATAPORTNUM_G(data2);
1274 
1275 		/* Read tcamx. Change the control param */
1276 		ctl |= CTLXYBITSEL_V(1);
1277 		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1278 		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1279 		tcamx = DMACH_G(val) << 32;
1280 		tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1281 		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1282 		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1283 			/* Inner header VNI mask */
1284 			tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1285 			tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
1286 		}
1287 	} else {
1288 		tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
1289 		tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
1290 	}
1291 
1292 	/* If no entry, return */
1293 	if (tcamx & tcamy)
1294 		return rc;
1295 
1296 	tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
1297 	tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
1298 
1299 	if (is_t5(padap->params.chip))
1300 		tcam->repli = (tcam->cls_lo & REPLICATE_F);
1301 	else if (is_t6(padap->params.chip))
1302 		tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
1303 
1304 	if (tcam->repli) {
1305 		struct fw_ldst_cmd ldst_cmd;
1306 		struct fw_ldst_mps_rplc mps_rplc;
1307 
1308 		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
1309 		ldst_cmd.op_to_addrspace =
1310 			htonl(FW_CMD_OP_V(FW_LDST_CMD) |
1311 			      FW_CMD_REQUEST_F | FW_CMD_READ_F |
1312 			      FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
1313 		ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
1314 		ldst_cmd.u.mps.rplc.fid_idx =
1315 			htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
1316 			      FW_LDST_CMD_IDX_V(idx));
1317 
1318 		rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
1319 				&ldst_cmd);
1320 		if (rc)
1321 			cudbg_mps_rpl_backdoor(padap, &mps_rplc);
1322 		else
1323 			mps_rplc = ldst_cmd.u.mps.rplc;
1324 
1325 		tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
1326 		tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
1327 		tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
1328 		tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
1329 		if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
1330 			tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
1331 			tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
1332 			tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
1333 			tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
1334 		}
1335 	}
1336 	cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
1337 	tcam->idx = idx;
1338 	tcam->rplc_size = padap->params.arch.mps_rplc_size;
1339 	return rc;
1340 }
1341 
1342 int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
1343 			   struct cudbg_buffer *dbg_buff,
1344 			   struct cudbg_error *cudbg_err)
1345 {
1346 	struct adapter *padap = pdbg_init->adap;
1347 	struct cudbg_buffer temp_buff = { 0 };
1348 	u32 size = 0, i, n, total_size = 0;
1349 	struct cudbg_mps_tcam *tcam;
1350 	int rc;
1351 
1352 	n = padap->params.arch.mps_tcam_size;
1353 	size = sizeof(struct cudbg_mps_tcam) * n;
1354 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1355 	if (rc)
1356 		return rc;
1357 
1358 	tcam = (struct cudbg_mps_tcam *)temp_buff.data;
1359 	for (i = 0; i < n; i++) {
1360 		rc = cudbg_collect_tcam_index(padap, tcam, i);
1361 		if (rc) {
1362 			cudbg_err->sys_err = rc;
1363 			cudbg_put_buff(&temp_buff, dbg_buff);
1364 			return rc;
1365 		}
1366 		total_size += sizeof(struct cudbg_mps_tcam);
1367 		tcam++;
1368 	}
1369 
1370 	if (!total_size) {
1371 		rc = CUDBG_SYSTEM_ERROR;
1372 		cudbg_err->sys_err = rc;
1373 		cudbg_put_buff(&temp_buff, dbg_buff);
1374 		return rc;
1375 	}
1376 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1377 	return rc;
1378 }
1379 
1380 int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
1381 			   struct cudbg_buffer *dbg_buff,
1382 			   struct cudbg_error *cudbg_err)
1383 {
1384 	struct adapter *padap = pdbg_init->adap;
1385 	struct cudbg_buffer temp_buff = { 0 };
1386 	char vpd_str[CUDBG_VPD_VER_LEN + 1];
1387 	u32 scfg_vers, vpd_vers, fw_vers;
1388 	struct cudbg_vpd_data *vpd_data;
1389 	struct vpd_params vpd = { 0 };
1390 	int rc, ret;
1391 
1392 	rc = t4_get_raw_vpd_params(padap, &vpd);
1393 	if (rc)
1394 		return rc;
1395 
1396 	rc = t4_get_fw_version(padap, &fw_vers);
1397 	if (rc)
1398 		return rc;
1399 
1400 	/* Serial Configuration Version is located beyond the PF's vpd size.
1401 	 * Temporarily give access to entire EEPROM to get it.
1402 	 */
1403 	rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE);
1404 	if (rc < 0)
1405 		return rc;
1406 
1407 	ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN,
1408 				 &scfg_vers);
1409 
1410 	/* Restore back to original PF's vpd size */
1411 	rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE);
1412 	if (rc < 0)
1413 		return rc;
1414 
1415 	if (ret)
1416 		return ret;
1417 
1418 	rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN,
1419 				vpd_str);
1420 	if (rc)
1421 		return rc;
1422 
1423 	vpd_str[CUDBG_VPD_VER_LEN] = '\0';
1424 	rc = kstrtouint(vpd_str, 0, &vpd_vers);
1425 	if (rc)
1426 		return rc;
1427 
1428 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
1429 			    &temp_buff);
1430 	if (rc)
1431 		return rc;
1432 
1433 	vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
1434 	memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1);
1435 	memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1);
1436 	memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1);
1437 	memcpy(vpd_data->mn, vpd.id, ID_LEN + 1);
1438 	vpd_data->scfg_vers = scfg_vers;
1439 	vpd_data->vpd_vers = vpd_vers;
1440 	vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers);
1441 	vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
1442 	vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
1443 	vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
1444 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1445 	return rc;
1446 }
1447 
1448 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
1449 			  struct cudbg_tid_data *tid_data)
1450 {
1451 	struct adapter *padap = pdbg_init->adap;
1452 	int i, cmd_retry = 8;
1453 	u32 val;
1454 
1455 	/* Fill REQ_DATA regs with 0's */
1456 	for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++)
1457 		t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0);
1458 
1459 	/* Write DBIG command */
1460 	val = DBGICMD_V(4) | DBGITID_V(tid);
1461 	t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val);
1462 	tid_data->dbig_cmd = val;
1463 
1464 	val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */
1465 	t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val);
1466 	tid_data->dbig_conf = val;
1467 
1468 	/* Poll the DBGICMDBUSY bit */
1469 	val = 1;
1470 	while (val) {
1471 		val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A);
1472 		val = val & DBGICMDBUSY_F;
1473 		cmd_retry--;
1474 		if (!cmd_retry)
1475 			return CUDBG_SYSTEM_ERROR;
1476 	}
1477 
1478 	/* Check RESP status */
1479 	val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A);
1480 	tid_data->dbig_rsp_stat = val;
1481 	if (!(val & 1))
1482 		return CUDBG_SYSTEM_ERROR;
1483 
1484 	/* Read RESP data */
1485 	for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++)
1486 		tid_data->data[i] = t4_read_reg(padap,
1487 						LE_DB_DBGI_RSP_DATA_A +
1488 						(i << 2));
1489 	tid_data->tid = tid;
1490 	return 0;
1491 }
1492 
1493 static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region)
1494 {
1495 	int type = LE_ET_UNKNOWN;
1496 
1497 	if (tid < tcam_region.server_start)
1498 		type = LE_ET_TCAM_CON;
1499 	else if (tid < tcam_region.filter_start)
1500 		type = LE_ET_TCAM_SERVER;
1501 	else if (tid < tcam_region.clip_start)
1502 		type = LE_ET_TCAM_FILTER;
1503 	else if (tid < tcam_region.routing_start)
1504 		type = LE_ET_TCAM_CLIP;
1505 	else if (tid < tcam_region.tid_hash_base)
1506 		type = LE_ET_TCAM_ROUTING;
1507 	else if (tid < tcam_region.max_tid)
1508 		type = LE_ET_HASH_CON;
1509 	else
1510 		type = LE_ET_INVALID_TID;
1511 
1512 	return type;
1513 }
1514 
1515 static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data,
1516 			       struct cudbg_tcam tcam_region)
1517 {
1518 	int ipv6 = 0;
1519 	int le_type;
1520 
1521 	le_type = cudbg_get_le_type(tid_data->tid, tcam_region);
1522 	if (tid_data->tid & 1)
1523 		return 0;
1524 
1525 	if (le_type == LE_ET_HASH_CON) {
1526 		ipv6 = tid_data->data[16] & 0x8000;
1527 	} else if (le_type == LE_ET_TCAM_CON) {
1528 		ipv6 = tid_data->data[16] & 0x8000;
1529 		if (ipv6)
1530 			ipv6 = tid_data->data[9] == 0x00C00000;
1531 	} else {
1532 		ipv6 = 0;
1533 	}
1534 	return ipv6;
1535 }
1536 
1537 void cudbg_fill_le_tcam_info(struct adapter *padap,
1538 			     struct cudbg_tcam *tcam_region)
1539 {
1540 	u32 value;
1541 
1542 	/* Get the LE regions */
1543 	value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A); /* hash base index */
1544 	tcam_region->tid_hash_base = value;
1545 
1546 	/* Get routing table index */
1547 	value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A);
1548 	tcam_region->routing_start = value;
1549 
1550 	/*Get clip table index */
1551 	value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
1552 	tcam_region->clip_start = value;
1553 
1554 	/* Get filter table index */
1555 	value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A);
1556 	tcam_region->filter_start = value;
1557 
1558 	/* Get server table index */
1559 	value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A);
1560 	tcam_region->server_start = value;
1561 
1562 	/* Check whether hash is enabled and calculate the max tids */
1563 	value = t4_read_reg(padap, LE_DB_CONFIG_A);
1564 	if ((value >> HASHEN_S) & 1) {
1565 		value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A);
1566 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
1567 			tcam_region->max_tid = (value & 0xFFFFF) +
1568 					       tcam_region->tid_hash_base;
1569 		} else {
1570 			value = HASHTIDSIZE_G(value);
1571 			value = 1 << value;
1572 			tcam_region->max_tid = value +
1573 					       tcam_region->tid_hash_base;
1574 		}
1575 	} else { /* hash not enabled */
1576 		tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
1577 	}
1578 }
1579 
1580 int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
1581 			  struct cudbg_buffer *dbg_buff,
1582 			  struct cudbg_error *cudbg_err)
1583 {
1584 	struct adapter *padap = pdbg_init->adap;
1585 	struct cudbg_buffer temp_buff = { 0 };
1586 	struct cudbg_tcam tcam_region = { 0 };
1587 	struct cudbg_tid_data *tid_data;
1588 	u32 bytes = 0;
1589 	int rc, size;
1590 	u32 i;
1591 
1592 	cudbg_fill_le_tcam_info(padap, &tcam_region);
1593 
1594 	size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
1595 	size += sizeof(struct cudbg_tcam);
1596 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1597 	if (rc)
1598 		return rc;
1599 
1600 	memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
1601 	bytes = sizeof(struct cudbg_tcam);
1602 	tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes);
1603 	/* read all tid */
1604 	for (i = 0; i < tcam_region.max_tid; ) {
1605 		rc = cudbg_read_tid(pdbg_init, i, tid_data);
1606 		if (rc) {
1607 			cudbg_err->sys_err = rc;
1608 			cudbg_put_buff(&temp_buff, dbg_buff);
1609 			return rc;
1610 		}
1611 
1612 		/* ipv6 takes two tids */
1613 		cudbg_is_ipv6_entry(tid_data, tcam_region) ? i += 2 : i++;
1614 
1615 		tid_data++;
1616 		bytes += sizeof(struct cudbg_tid_data);
1617 	}
1618 
1619 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1620 	return rc;
1621 }
1622 
1623 int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
1624 			struct cudbg_buffer *dbg_buff,
1625 			struct cudbg_error *cudbg_err)
1626 {
1627 	struct adapter *padap = pdbg_init->adap;
1628 	struct cudbg_buffer temp_buff = { 0 };
1629 	u32 size;
1630 	int rc;
1631 
1632 	size = sizeof(u16) * NMTUS * NCCTRL_WIN;
1633 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1634 	if (rc)
1635 		return rc;
1636 
1637 	t4_read_cong_tbl(padap, (void *)temp_buff.data);
1638 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1639 	return rc;
1640 }
1641 
1642 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
1643 			      struct cudbg_buffer *dbg_buff,
1644 			      struct cudbg_error *cudbg_err)
1645 {
1646 	struct adapter *padap = pdbg_init->adap;
1647 	struct cudbg_buffer temp_buff = { 0 };
1648 	struct ireg_buf *ma_indr;
1649 	int i, rc, n;
1650 	u32 size, j;
1651 
1652 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1653 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1654 
1655 	n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1656 	size = sizeof(struct ireg_buf) * n * 2;
1657 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1658 	if (rc)
1659 		return rc;
1660 
1661 	ma_indr = (struct ireg_buf *)temp_buff.data;
1662 	for (i = 0; i < n; i++) {
1663 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
1664 		u32 *buff = ma_indr->outbuf;
1665 
1666 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
1667 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
1668 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
1669 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
1670 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
1671 				 buff, ma_fli->ireg_offset_range,
1672 				 ma_fli->ireg_local_offset);
1673 		ma_indr++;
1674 	}
1675 
1676 	n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
1677 	for (i = 0; i < n; i++) {
1678 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
1679 		u32 *buff = ma_indr->outbuf;
1680 
1681 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
1682 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
1683 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
1684 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
1685 			t4_read_indirect(padap, ma_fli->ireg_addr,
1686 					 ma_fli->ireg_data, buff, 1,
1687 					 ma_fli->ireg_local_offset);
1688 			buff++;
1689 			ma_fli->ireg_local_offset += 0x20;
1690 		}
1691 		ma_indr++;
1692 	}
1693 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1694 	return rc;
1695 }
1696 
1697 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
1698 			   struct cudbg_buffer *dbg_buff,
1699 			   struct cudbg_error *cudbg_err)
1700 {
1701 	struct adapter *padap = pdbg_init->adap;
1702 	struct cudbg_buffer temp_buff = { 0 };
1703 	struct cudbg_ulptx_la *ulptx_la_buff;
1704 	u32 i, j;
1705 	int rc;
1706 
1707 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
1708 			    &temp_buff);
1709 	if (rc)
1710 		return rc;
1711 
1712 	ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
1713 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
1714 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
1715 						      ULP_TX_LA_RDPTR_0_A +
1716 						      0x10 * i);
1717 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
1718 						      ULP_TX_LA_WRPTR_0_A +
1719 						      0x10 * i);
1720 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
1721 						       ULP_TX_LA_RDDATA_0_A +
1722 						       0x10 * i);
1723 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
1724 			ulptx_la_buff->rd_data[i][j] =
1725 				t4_read_reg(padap,
1726 					    ULP_TX_LA_RDDATA_0_A + 0x10 * i);
1727 	}
1728 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1729 	return rc;
1730 }
1731 
1732 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
1733 				  struct cudbg_buffer *dbg_buff,
1734 				  struct cudbg_error *cudbg_err)
1735 {
1736 	struct adapter *padap = pdbg_init->adap;
1737 	struct cudbg_buffer temp_buff = { 0 };
1738 	struct ireg_buf *up_cim;
1739 	int i, rc, n;
1740 	u32 size;
1741 
1742 	n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
1743 	size = sizeof(struct ireg_buf) * n;
1744 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1745 	if (rc)
1746 		return rc;
1747 
1748 	up_cim = (struct ireg_buf *)temp_buff.data;
1749 	for (i = 0; i < n; i++) {
1750 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
1751 		u32 *buff = up_cim->outbuf;
1752 
1753 		if (is_t5(padap->params.chip)) {
1754 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
1755 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
1756 			up_cim_reg->ireg_local_offset =
1757 						t5_up_cim_reg_array[i][2];
1758 			up_cim_reg->ireg_offset_range =
1759 						t5_up_cim_reg_array[i][3];
1760 		} else if (is_t6(padap->params.chip)) {
1761 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
1762 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
1763 			up_cim_reg->ireg_local_offset =
1764 						t6_up_cim_reg_array[i][2];
1765 			up_cim_reg->ireg_offset_range =
1766 						t6_up_cim_reg_array[i][3];
1767 		}
1768 
1769 		rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
1770 				 up_cim_reg->ireg_offset_range, buff);
1771 		if (rc) {
1772 			cudbg_put_buff(&temp_buff, dbg_buff);
1773 			return rc;
1774 		}
1775 		up_cim++;
1776 	}
1777 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1778 	return rc;
1779 }
1780 
1781 int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
1782 			     struct cudbg_buffer *dbg_buff,
1783 			     struct cudbg_error *cudbg_err)
1784 {
1785 	struct adapter *padap = pdbg_init->adap;
1786 	struct cudbg_buffer temp_buff = { 0 };
1787 	struct cudbg_pbt_tables *pbt;
1788 	int i, rc;
1789 	u32 addr;
1790 
1791 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
1792 			    &temp_buff);
1793 	if (rc)
1794 		return rc;
1795 
1796 	pbt = (struct cudbg_pbt_tables *)temp_buff.data;
1797 	/* PBT dynamic entries */
1798 	addr = CUDBG_CHAC_PBT_ADDR;
1799 	for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
1800 		rc = t4_cim_read(padap, addr + (i * 4), 1,
1801 				 &pbt->pbt_dynamic[i]);
1802 		if (rc) {
1803 			cudbg_err->sys_err = rc;
1804 			cudbg_put_buff(&temp_buff, dbg_buff);
1805 			return rc;
1806 		}
1807 	}
1808 
1809 	/* PBT static entries */
1810 	/* static entries start when bit 6 is set */
1811 	addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
1812 	for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
1813 		rc = t4_cim_read(padap, addr + (i * 4), 1,
1814 				 &pbt->pbt_static[i]);
1815 		if (rc) {
1816 			cudbg_err->sys_err = rc;
1817 			cudbg_put_buff(&temp_buff, dbg_buff);
1818 			return rc;
1819 		}
1820 	}
1821 
1822 	/* LRF entries */
1823 	addr = CUDBG_CHAC_PBT_LRF;
1824 	for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
1825 		rc = t4_cim_read(padap, addr + (i * 4), 1,
1826 				 &pbt->lrf_table[i]);
1827 		if (rc) {
1828 			cudbg_err->sys_err = rc;
1829 			cudbg_put_buff(&temp_buff, dbg_buff);
1830 			return rc;
1831 		}
1832 	}
1833 
1834 	/* PBT data entries */
1835 	addr = CUDBG_CHAC_PBT_DATA;
1836 	for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
1837 		rc = t4_cim_read(padap, addr + (i * 4), 1,
1838 				 &pbt->pbt_data[i]);
1839 		if (rc) {
1840 			cudbg_err->sys_err = rc;
1841 			cudbg_put_buff(&temp_buff, dbg_buff);
1842 			return rc;
1843 		}
1844 	}
1845 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1846 	return rc;
1847 }
1848 
1849 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
1850 			   struct cudbg_buffer *dbg_buff,
1851 			   struct cudbg_error *cudbg_err)
1852 {
1853 	struct adapter *padap = pdbg_init->adap;
1854 	struct cudbg_mbox_log *mboxlog = NULL;
1855 	struct cudbg_buffer temp_buff = { 0 };
1856 	struct mbox_cmd_log *log = NULL;
1857 	struct mbox_cmd *entry;
1858 	unsigned int entry_idx;
1859 	u16 mbox_cmds;
1860 	int i, k, rc;
1861 	u64 flit;
1862 	u32 size;
1863 
1864 	log = padap->mbox_log;
1865 	mbox_cmds = padap->mbox_log->size;
1866 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
1867 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1868 	if (rc)
1869 		return rc;
1870 
1871 	mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
1872 	for (k = 0; k < mbox_cmds; k++) {
1873 		entry_idx = log->cursor + k;
1874 		if (entry_idx >= log->size)
1875 			entry_idx -= log->size;
1876 
1877 		entry = mbox_cmd_log_entry(log, entry_idx);
1878 		/* skip over unused entries */
1879 		if (entry->timestamp == 0)
1880 			continue;
1881 
1882 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
1883 		for (i = 0; i < MBOX_LEN / 8; i++) {
1884 			flit = entry->cmd[i];
1885 			mboxlog->hi[i] = (u32)(flit >> 32);
1886 			mboxlog->lo[i] = (u32)flit;
1887 		}
1888 		mboxlog++;
1889 	}
1890 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1891 	return rc;
1892 }
1893 
1894 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
1895 			       struct cudbg_buffer *dbg_buff,
1896 			       struct cudbg_error *cudbg_err)
1897 {
1898 	struct adapter *padap = pdbg_init->adap;
1899 	struct cudbg_buffer temp_buff = { 0 };
1900 	struct ireg_buf *hma_indr;
1901 	int i, rc, n;
1902 	u32 size;
1903 
1904 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1905 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1906 
1907 	n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1908 	size = sizeof(struct ireg_buf) * n;
1909 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1910 	if (rc)
1911 		return rc;
1912 
1913 	hma_indr = (struct ireg_buf *)temp_buff.data;
1914 	for (i = 0; i < n; i++) {
1915 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
1916 		u32 *buff = hma_indr->outbuf;
1917 
1918 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
1919 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
1920 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
1921 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
1922 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
1923 				 buff, hma_fli->ireg_offset_range,
1924 				 hma_fli->ireg_local_offset);
1925 		hma_indr++;
1926 	}
1927 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1928 	return rc;
1929 }
1930