1 /*
2  *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify it
5  *  under the terms and conditions of the GNU General Public License,
6  *  version 2, as published by the Free Software Foundation.
7  *
8  *  This program is distributed in the hope it will be useful, but WITHOUT
9  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  *  more details.
12  *
13  *  The full GNU General Public License is included in this distribution in
14  *  the file called "COPYING".
15  *
16  */
17 
18 #include "t4_regs.h"
19 #include "cxgb4.h"
20 #include "cudbg_if.h"
21 #include "cudbg_lib_common.h"
22 #include "cudbg_lib.h"
23 #include "cudbg_entity.h"
24 
25 static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
26 					 struct cudbg_buffer *dbg_buff)
27 {
28 	cudbg_update_buff(pin_buff, dbg_buff);
29 	cudbg_put_buff(pin_buff, dbg_buff);
30 }
31 
32 static int is_fw_attached(struct cudbg_init *pdbg_init)
33 {
34 	struct adapter *padap = pdbg_init->adap;
35 
36 	if (!(padap->flags & FW_OK) || padap->use_bd)
37 		return 0;
38 
39 	return 1;
40 }
41 
42 /* This function will add additional padding bytes into debug_buffer to make it
43  * 4 byte aligned.
44  */
45 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
46 			      struct cudbg_entity_hdr *entity_hdr)
47 {
48 	u8 zero_buf[4] = {0};
49 	u8 padding, remain;
50 
51 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
52 	padding = 4 - remain;
53 	if (remain) {
54 		memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
55 		       padding);
56 		dbg_buff->offset += padding;
57 		entity_hdr->num_pad = padding;
58 	}
59 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
60 }
61 
62 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
63 {
64 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
65 
66 	return (struct cudbg_entity_hdr *)
67 	       ((char *)outbuf + cudbg_hdr->hdr_len +
68 		(sizeof(struct cudbg_entity_hdr) * (i - 1)));
69 }
70 
71 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
72 			   struct cudbg_buffer *dbg_buff,
73 			   struct cudbg_error *cudbg_err)
74 {
75 	struct adapter *padap = pdbg_init->adap;
76 	struct cudbg_buffer temp_buff = { 0 };
77 	u32 buf_size = 0;
78 	int rc = 0;
79 
80 	if (is_t4(padap->params.chip))
81 		buf_size = T4_REGMAP_SIZE;
82 	else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
83 		buf_size = T5_REGMAP_SIZE;
84 
85 	rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
86 	if (rc)
87 		return rc;
88 	t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
89 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
90 	return rc;
91 }
92 
93 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
94 			    struct cudbg_buffer *dbg_buff,
95 			    struct cudbg_error *cudbg_err)
96 {
97 	struct adapter *padap = pdbg_init->adap;
98 	struct cudbg_buffer temp_buff = { 0 };
99 	struct devlog_params *dparams;
100 	int rc = 0;
101 
102 	rc = t4_init_devlog_params(padap);
103 	if (rc < 0) {
104 		cudbg_err->sys_err = rc;
105 		return rc;
106 	}
107 
108 	dparams = &padap->params.devlog;
109 	rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
110 	if (rc)
111 		return rc;
112 
113 	/* Collect FW devlog */
114 	if (dparams->start != 0) {
115 		spin_lock(&padap->win0_lock);
116 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
117 				  dparams->memtype, dparams->start,
118 				  dparams->size,
119 				  (__be32 *)(char *)temp_buff.data,
120 				  1);
121 		spin_unlock(&padap->win0_lock);
122 		if (rc) {
123 			cudbg_err->sys_err = rc;
124 			cudbg_put_buff(&temp_buff, dbg_buff);
125 			return rc;
126 		}
127 	}
128 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
129 	return rc;
130 }
131 
132 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
133 			 struct cudbg_buffer *dbg_buff,
134 			 struct cudbg_error *cudbg_err)
135 {
136 	struct adapter *padap = pdbg_init->adap;
137 	struct cudbg_buffer temp_buff = { 0 };
138 	int size, rc;
139 	u32 cfg = 0;
140 
141 	if (is_t6(padap->params.chip)) {
142 		size = padap->params.cim_la_size / 10 + 1;
143 		size *= 11 * sizeof(u32);
144 	} else {
145 		size = padap->params.cim_la_size / 8;
146 		size *= 8 * sizeof(u32);
147 	}
148 
149 	size += sizeof(cfg);
150 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
151 	if (rc)
152 		return rc;
153 
154 	rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
155 	if (rc) {
156 		cudbg_err->sys_err = rc;
157 		cudbg_put_buff(&temp_buff, dbg_buff);
158 		return rc;
159 	}
160 
161 	memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
162 	rc = t4_cim_read_la(padap,
163 			    (u32 *)((char *)temp_buff.data + sizeof(cfg)),
164 			    NULL);
165 	if (rc < 0) {
166 		cudbg_err->sys_err = rc;
167 		cudbg_put_buff(&temp_buff, dbg_buff);
168 		return rc;
169 	}
170 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
171 	return rc;
172 }
173 
174 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
175 			    struct cudbg_buffer *dbg_buff,
176 			    struct cudbg_error *cudbg_err)
177 {
178 	struct adapter *padap = pdbg_init->adap;
179 	struct cudbg_buffer temp_buff = { 0 };
180 	int size, rc;
181 
182 	size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
183 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
184 	if (rc)
185 		return rc;
186 
187 	t4_cim_read_ma_la(padap,
188 			  (u32 *)temp_buff.data,
189 			  (u32 *)((char *)temp_buff.data +
190 				  5 * CIM_MALA_SIZE));
191 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
192 	return rc;
193 }
194 
195 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
196 			   struct cudbg_buffer *dbg_buff,
197 			   struct cudbg_error *cudbg_err)
198 {
199 	struct adapter *padap = pdbg_init->adap;
200 	struct cudbg_buffer temp_buff = { 0 };
201 	struct cudbg_cim_qcfg *cim_qcfg_data;
202 	int rc;
203 
204 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
205 			    &temp_buff);
206 	if (rc)
207 		return rc;
208 
209 	cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
210 	cim_qcfg_data->chip = padap->params.chip;
211 	rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
212 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
213 	if (rc) {
214 		cudbg_err->sys_err = rc;
215 		cudbg_put_buff(&temp_buff, dbg_buff);
216 		return rc;
217 	}
218 
219 	rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
220 			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
221 			 cim_qcfg_data->obq_wr);
222 	if (rc) {
223 		cudbg_err->sys_err = rc;
224 		cudbg_put_buff(&temp_buff, dbg_buff);
225 		return rc;
226 	}
227 
228 	t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
229 			 cim_qcfg_data->thres);
230 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
231 	return rc;
232 }
233 
234 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
235 			      struct cudbg_buffer *dbg_buff,
236 			      struct cudbg_error *cudbg_err, int qid)
237 {
238 	struct adapter *padap = pdbg_init->adap;
239 	struct cudbg_buffer temp_buff = { 0 };
240 	int no_of_read_words, rc = 0;
241 	u32 qsize;
242 
243 	/* collect CIM IBQ */
244 	qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
245 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
246 	if (rc)
247 		return rc;
248 
249 	/* t4_read_cim_ibq will return no. of read words or error */
250 	no_of_read_words = t4_read_cim_ibq(padap, qid,
251 					   (u32 *)temp_buff.data, qsize);
252 	/* no_of_read_words is less than or equal to 0 means error */
253 	if (no_of_read_words <= 0) {
254 		if (!no_of_read_words)
255 			rc = CUDBG_SYSTEM_ERROR;
256 		else
257 			rc = no_of_read_words;
258 		cudbg_err->sys_err = rc;
259 		cudbg_put_buff(&temp_buff, dbg_buff);
260 		return rc;
261 	}
262 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
263 	return rc;
264 }
265 
266 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
267 			      struct cudbg_buffer *dbg_buff,
268 			      struct cudbg_error *cudbg_err)
269 {
270 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
271 }
272 
273 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
274 			      struct cudbg_buffer *dbg_buff,
275 			      struct cudbg_error *cudbg_err)
276 {
277 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
278 }
279 
280 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
281 			      struct cudbg_buffer *dbg_buff,
282 			      struct cudbg_error *cudbg_err)
283 {
284 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
285 }
286 
287 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
288 			       struct cudbg_buffer *dbg_buff,
289 			       struct cudbg_error *cudbg_err)
290 {
291 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
292 }
293 
294 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
295 			       struct cudbg_buffer *dbg_buff,
296 			       struct cudbg_error *cudbg_err)
297 {
298 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
299 }
300 
301 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
302 			       struct cudbg_buffer *dbg_buff,
303 			       struct cudbg_error *cudbg_err)
304 {
305 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
306 }
307 
308 u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
309 {
310 	u32 value;
311 
312 	t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
313 		     QUENUMSELECT_V(qid));
314 	value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
315 	value = CIMQSIZE_G(value) * 64; /* size in number of words */
316 	return value * sizeof(u32);
317 }
318 
319 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
320 			      struct cudbg_buffer *dbg_buff,
321 			      struct cudbg_error *cudbg_err, int qid)
322 {
323 	struct adapter *padap = pdbg_init->adap;
324 	struct cudbg_buffer temp_buff = { 0 };
325 	int no_of_read_words, rc = 0;
326 	u32 qsize;
327 
328 	/* collect CIM OBQ */
329 	qsize =  cudbg_cim_obq_size(padap, qid);
330 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
331 	if (rc)
332 		return rc;
333 
334 	/* t4_read_cim_obq will return no. of read words or error */
335 	no_of_read_words = t4_read_cim_obq(padap, qid,
336 					   (u32 *)temp_buff.data, qsize);
337 	/* no_of_read_words is less than or equal to 0 means error */
338 	if (no_of_read_words <= 0) {
339 		if (!no_of_read_words)
340 			rc = CUDBG_SYSTEM_ERROR;
341 		else
342 			rc = no_of_read_words;
343 		cudbg_err->sys_err = rc;
344 		cudbg_put_buff(&temp_buff, dbg_buff);
345 		return rc;
346 	}
347 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
348 	return rc;
349 }
350 
351 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
352 			       struct cudbg_buffer *dbg_buff,
353 			       struct cudbg_error *cudbg_err)
354 {
355 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
356 }
357 
358 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
359 			       struct cudbg_buffer *dbg_buff,
360 			       struct cudbg_error *cudbg_err)
361 {
362 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
363 }
364 
365 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
366 			       struct cudbg_buffer *dbg_buff,
367 			       struct cudbg_error *cudbg_err)
368 {
369 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
370 }
371 
372 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
373 			       struct cudbg_buffer *dbg_buff,
374 			       struct cudbg_error *cudbg_err)
375 {
376 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
377 }
378 
379 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
380 			      struct cudbg_buffer *dbg_buff,
381 			      struct cudbg_error *cudbg_err)
382 {
383 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
384 }
385 
386 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
387 			       struct cudbg_buffer *dbg_buff,
388 			       struct cudbg_error *cudbg_err)
389 {
390 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
391 }
392 
393 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
394 				struct cudbg_buffer *dbg_buff,
395 				struct cudbg_error *cudbg_err)
396 {
397 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
398 }
399 
400 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
401 				struct cudbg_buffer *dbg_buff,
402 				struct cudbg_error *cudbg_err)
403 {
404 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
405 }
406 
407 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
408 			     struct cudbg_buffer *dbg_buff, u8 mem_type,
409 			     unsigned long tot_len,
410 			     struct cudbg_error *cudbg_err)
411 {
412 	unsigned long bytes, bytes_left, bytes_read = 0;
413 	struct adapter *padap = pdbg_init->adap;
414 	struct cudbg_buffer temp_buff = { 0 };
415 	int rc = 0;
416 
417 	bytes_left = tot_len;
418 	while (bytes_left > 0) {
419 		bytes = min_t(unsigned long, bytes_left,
420 			      (unsigned long)CUDBG_CHUNK_SIZE);
421 		rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
422 		if (rc)
423 			return rc;
424 		spin_lock(&padap->win0_lock);
425 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
426 				  bytes_read, bytes,
427 				  (__be32 *)temp_buff.data,
428 				  1);
429 		spin_unlock(&padap->win0_lock);
430 		if (rc) {
431 			cudbg_err->sys_err = rc;
432 			cudbg_put_buff(&temp_buff, dbg_buff);
433 			return rc;
434 		}
435 		bytes_left -= bytes;
436 		bytes_read += bytes;
437 		cudbg_write_and_release_buff(&temp_buff, dbg_buff);
438 	}
439 	return rc;
440 }
441 
442 static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
443 				   struct card_mem *mem_info)
444 {
445 	struct adapter *padap = pdbg_init->adap;
446 	u32 value;
447 
448 	value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
449 	value = EDRAM0_SIZE_G(value);
450 	mem_info->size_edc0 = (u16)value;
451 
452 	value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
453 	value = EDRAM1_SIZE_G(value);
454 	mem_info->size_edc1 = (u16)value;
455 
456 	value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
457 	if (value & EDRAM0_ENABLE_F)
458 		mem_info->mem_flag |= (1 << EDC0_FLAG);
459 	if (value & EDRAM1_ENABLE_F)
460 		mem_info->mem_flag |= (1 << EDC1_FLAG);
461 }
462 
463 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
464 			     struct cudbg_error *cudbg_err)
465 {
466 	struct adapter *padap = pdbg_init->adap;
467 	int rc;
468 
469 	if (is_fw_attached(pdbg_init)) {
470 		/* Flush uP dcache before reading edcX/mcX  */
471 		rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
472 		if (rc)
473 			cudbg_err->sys_warn = rc;
474 	}
475 }
476 
477 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
478 				    struct cudbg_buffer *dbg_buff,
479 				    struct cudbg_error *cudbg_err,
480 				    u8 mem_type)
481 {
482 	struct card_mem mem_info = {0};
483 	unsigned long flag, size;
484 	int rc;
485 
486 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
487 	cudbg_collect_mem_info(pdbg_init, &mem_info);
488 	switch (mem_type) {
489 	case MEM_EDC0:
490 		flag = (1 << EDC0_FLAG);
491 		size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
492 		break;
493 	case MEM_EDC1:
494 		flag = (1 << EDC1_FLAG);
495 		size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
496 		break;
497 	default:
498 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
499 		goto err;
500 	}
501 
502 	if (mem_info.mem_flag & flag) {
503 		rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
504 				       size, cudbg_err);
505 		if (rc)
506 			goto err;
507 	} else {
508 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
509 		goto err;
510 	}
511 err:
512 	return rc;
513 }
514 
515 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
516 			       struct cudbg_buffer *dbg_buff,
517 			       struct cudbg_error *cudbg_err)
518 {
519 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
520 					MEM_EDC0);
521 }
522 
523 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
524 			       struct cudbg_buffer *dbg_buff,
525 			       struct cudbg_error *cudbg_err)
526 {
527 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
528 					MEM_EDC1);
529 }
530 
531 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
532 		      struct cudbg_buffer *dbg_buff,
533 		      struct cudbg_error *cudbg_err)
534 {
535 	struct adapter *padap = pdbg_init->adap;
536 	struct cudbg_buffer temp_buff = { 0 };
537 	int rc;
538 
539 	rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
540 	if (rc)
541 		return rc;
542 
543 	rc = t4_read_rss(padap, (u16 *)temp_buff.data);
544 	if (rc) {
545 		cudbg_err->sys_err = rc;
546 		cudbg_put_buff(&temp_buff, dbg_buff);
547 		return rc;
548 	}
549 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
550 	return rc;
551 }
552 
553 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
554 				struct cudbg_buffer *dbg_buff,
555 				struct cudbg_error *cudbg_err)
556 {
557 	struct adapter *padap = pdbg_init->adap;
558 	struct cudbg_buffer temp_buff = { 0 };
559 	struct cudbg_rss_vf_conf *vfconf;
560 	int vf, rc, vf_count;
561 
562 	vf_count = padap->params.arch.vfcount;
563 	rc = cudbg_get_buff(dbg_buff,
564 			    vf_count * sizeof(struct cudbg_rss_vf_conf),
565 			    &temp_buff);
566 	if (rc)
567 		return rc;
568 
569 	vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
570 	for (vf = 0; vf < vf_count; vf++)
571 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
572 				      &vfconf[vf].rss_vf_vfh, true);
573 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
574 	return rc;
575 }
576 
577 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
578 			      struct cudbg_buffer *dbg_buff,
579 			      struct cudbg_error *cudbg_err)
580 {
581 	struct adapter *padap = pdbg_init->adap;
582 	struct cudbg_buffer temp_buff = { 0 };
583 	struct ireg_buf *ch_tp_pio;
584 	int i, rc, n = 0;
585 	u32 size;
586 
587 	if (is_t5(padap->params.chip))
588 		n = sizeof(t5_tp_pio_array) +
589 		    sizeof(t5_tp_tm_pio_array) +
590 		    sizeof(t5_tp_mib_index_array);
591 	else
592 		n = sizeof(t6_tp_pio_array) +
593 		    sizeof(t6_tp_tm_pio_array) +
594 		    sizeof(t6_tp_mib_index_array);
595 
596 	n = n / (IREG_NUM_ELEM * sizeof(u32));
597 	size = sizeof(struct ireg_buf) * n;
598 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
599 	if (rc)
600 		return rc;
601 
602 	ch_tp_pio = (struct ireg_buf *)temp_buff.data;
603 
604 	/* TP_PIO */
605 	if (is_t5(padap->params.chip))
606 		n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
607 	else if (is_t6(padap->params.chip))
608 		n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
609 
610 	for (i = 0; i < n; i++) {
611 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
612 		u32 *buff = ch_tp_pio->outbuf;
613 
614 		if (is_t5(padap->params.chip)) {
615 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
616 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
617 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
618 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
619 		} else if (is_t6(padap->params.chip)) {
620 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
621 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
622 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
623 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
624 		}
625 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
626 			       tp_pio->ireg_local_offset, true);
627 		ch_tp_pio++;
628 	}
629 
630 	/* TP_TM_PIO */
631 	if (is_t5(padap->params.chip))
632 		n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
633 	else if (is_t6(padap->params.chip))
634 		n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
635 
636 	for (i = 0; i < n; i++) {
637 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
638 		u32 *buff = ch_tp_pio->outbuf;
639 
640 		if (is_t5(padap->params.chip)) {
641 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
642 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
643 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
644 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
645 		} else if (is_t6(padap->params.chip)) {
646 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
647 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
648 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
649 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
650 		}
651 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
652 				  tp_pio->ireg_local_offset, true);
653 		ch_tp_pio++;
654 	}
655 
656 	/* TP_MIB_INDEX */
657 	if (is_t5(padap->params.chip))
658 		n = sizeof(t5_tp_mib_index_array) /
659 		    (IREG_NUM_ELEM * sizeof(u32));
660 	else if (is_t6(padap->params.chip))
661 		n = sizeof(t6_tp_mib_index_array) /
662 		    (IREG_NUM_ELEM * sizeof(u32));
663 
664 	for (i = 0; i < n ; i++) {
665 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
666 		u32 *buff = ch_tp_pio->outbuf;
667 
668 		if (is_t5(padap->params.chip)) {
669 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
670 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
671 			tp_pio->ireg_local_offset =
672 				t5_tp_mib_index_array[i][2];
673 			tp_pio->ireg_offset_range =
674 				t5_tp_mib_index_array[i][3];
675 		} else if (is_t6(padap->params.chip)) {
676 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
677 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
678 			tp_pio->ireg_local_offset =
679 				t6_tp_mib_index_array[i][2];
680 			tp_pio->ireg_offset_range =
681 				t6_tp_mib_index_array[i][3];
682 		}
683 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
684 			       tp_pio->ireg_local_offset, true);
685 		ch_tp_pio++;
686 	}
687 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
688 	return rc;
689 }
690 
691 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
692 			       struct cudbg_buffer *dbg_buff,
693 			       struct cudbg_error *cudbg_err)
694 {
695 	struct adapter *padap = pdbg_init->adap;
696 	struct cudbg_buffer temp_buff = { 0 };
697 	struct ireg_buf *ch_sge_dbg;
698 	int i, rc;
699 
700 	rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
701 	if (rc)
702 		return rc;
703 
704 	ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
705 	for (i = 0; i < 2; i++) {
706 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
707 		u32 *buff = ch_sge_dbg->outbuf;
708 
709 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
710 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
711 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
712 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
713 		t4_read_indirect(padap,
714 				 sge_pio->ireg_addr,
715 				 sge_pio->ireg_data,
716 				 buff,
717 				 sge_pio->ireg_offset_range,
718 				 sge_pio->ireg_local_offset);
719 		ch_sge_dbg++;
720 	}
721 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
722 	return rc;
723 }
724 
725 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
726 			   struct cudbg_buffer *dbg_buff,
727 			   struct cudbg_error *cudbg_err)
728 {
729 	struct adapter *padap = pdbg_init->adap;
730 	struct cudbg_buffer temp_buff = { 0 };
731 	struct cudbg_ulprx_la *ulprx_la_buff;
732 	int rc;
733 
734 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
735 			    &temp_buff);
736 	if (rc)
737 		return rc;
738 
739 	ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
740 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
741 	ulprx_la_buff->size = ULPRX_LA_SIZE;
742 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
743 	return rc;
744 }
745 
746 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
747 			struct cudbg_buffer *dbg_buff,
748 			struct cudbg_error *cudbg_err)
749 {
750 	struct adapter *padap = pdbg_init->adap;
751 	struct cudbg_buffer temp_buff = { 0 };
752 	struct cudbg_tp_la *tp_la_buff;
753 	int size, rc;
754 
755 	size = sizeof(struct cudbg_tp_la) + TPLA_SIZE *  sizeof(u64);
756 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
757 	if (rc)
758 		return rc;
759 
760 	tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
761 	tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
762 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
763 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
764 	return rc;
765 }
766 
767 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
768 			     struct cudbg_buffer *dbg_buff,
769 			     struct cudbg_error *cudbg_err)
770 {
771 	struct cudbg_cim_pif_la *cim_pif_la_buff;
772 	struct adapter *padap = pdbg_init->adap;
773 	struct cudbg_buffer temp_buff = { 0 };
774 	int size, rc;
775 
776 	size = sizeof(struct cudbg_cim_pif_la) +
777 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
778 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
779 	if (rc)
780 		return rc;
781 
782 	cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
783 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
784 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
785 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
786 			   NULL, NULL);
787 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
788 	return rc;
789 }
790 
791 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
792 				struct cudbg_buffer *dbg_buff,
793 				struct cudbg_error *cudbg_err)
794 {
795 	struct adapter *padap = pdbg_init->adap;
796 	struct cudbg_buffer temp_buff = { 0 };
797 	struct ireg_buf *ch_pcie;
798 	int i, rc, n;
799 	u32 size;
800 
801 	n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
802 	size = sizeof(struct ireg_buf) * n * 2;
803 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
804 	if (rc)
805 		return rc;
806 
807 	ch_pcie = (struct ireg_buf *)temp_buff.data;
808 	/* PCIE_PDBG */
809 	for (i = 0; i < n; i++) {
810 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
811 		u32 *buff = ch_pcie->outbuf;
812 
813 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
814 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
815 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
816 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
817 		t4_read_indirect(padap,
818 				 pcie_pio->ireg_addr,
819 				 pcie_pio->ireg_data,
820 				 buff,
821 				 pcie_pio->ireg_offset_range,
822 				 pcie_pio->ireg_local_offset);
823 		ch_pcie++;
824 	}
825 
826 	/* PCIE_CDBG */
827 	n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
828 	for (i = 0; i < n; i++) {
829 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
830 		u32 *buff = ch_pcie->outbuf;
831 
832 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
833 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
834 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
835 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
836 		t4_read_indirect(padap,
837 				 pcie_pio->ireg_addr,
838 				 pcie_pio->ireg_data,
839 				 buff,
840 				 pcie_pio->ireg_offset_range,
841 				 pcie_pio->ireg_local_offset);
842 		ch_pcie++;
843 	}
844 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
845 	return rc;
846 }
847 
848 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
849 			      struct cudbg_buffer *dbg_buff,
850 			      struct cudbg_error *cudbg_err)
851 {
852 	struct adapter *padap = pdbg_init->adap;
853 	struct cudbg_buffer temp_buff = { 0 };
854 	struct ireg_buf *ch_pm;
855 	int i, rc, n;
856 	u32 size;
857 
858 	n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
859 	size = sizeof(struct ireg_buf) * n * 2;
860 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
861 	if (rc)
862 		return rc;
863 
864 	ch_pm = (struct ireg_buf *)temp_buff.data;
865 	/* PM_RX */
866 	for (i = 0; i < n; i++) {
867 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
868 		u32 *buff = ch_pm->outbuf;
869 
870 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
871 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
872 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
873 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
874 		t4_read_indirect(padap,
875 				 pm_pio->ireg_addr,
876 				 pm_pio->ireg_data,
877 				 buff,
878 				 pm_pio->ireg_offset_range,
879 				 pm_pio->ireg_local_offset);
880 		ch_pm++;
881 	}
882 
883 	/* PM_TX */
884 	n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
885 	for (i = 0; i < n; i++) {
886 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
887 		u32 *buff = ch_pm->outbuf;
888 
889 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
890 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
891 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
892 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
893 		t4_read_indirect(padap,
894 				 pm_pio->ireg_addr,
895 				 pm_pio->ireg_data,
896 				 buff,
897 				 pm_pio->ireg_offset_range,
898 				 pm_pio->ireg_local_offset);
899 		ch_pm++;
900 	}
901 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
902 	return rc;
903 }
904 
905 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
906 		      struct cudbg_buffer *dbg_buff,
907 		      struct cudbg_error *cudbg_err)
908 {
909 	struct adapter *padap = pdbg_init->adap;
910 	struct cudbg_tid_info_region_rev1 *tid1;
911 	struct cudbg_buffer temp_buff = { 0 };
912 	struct cudbg_tid_info_region *tid;
913 	u32 para[2], val[2];
914 	int rc;
915 
916 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
917 			    &temp_buff);
918 	if (rc)
919 		return rc;
920 
921 	tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
922 	tid = &tid1->tid;
923 	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
924 	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
925 	tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
926 			     sizeof(struct cudbg_ver_hdr);
927 
928 #define FW_PARAM_PFVF_A(param) \
929 	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
930 	 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
931 	 FW_PARAMS_PARAM_Y_V(0) | \
932 	 FW_PARAMS_PARAM_Z_V(0))
933 
934 	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
935 	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
936 	rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
937 	if (rc <  0) {
938 		cudbg_err->sys_err = rc;
939 		cudbg_put_buff(&temp_buff, dbg_buff);
940 		return rc;
941 	}
942 	tid->uotid_base = val[0];
943 	tid->nuotids = val[1] - val[0] + 1;
944 
945 	if (is_t5(padap->params.chip)) {
946 		tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
947 	} else if (is_t6(padap->params.chip)) {
948 		tid1->tid_start =
949 			t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
950 		tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
951 
952 		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
953 		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
954 		rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
955 				     para, val);
956 		if (rc < 0) {
957 			cudbg_err->sys_err = rc;
958 			cudbg_put_buff(&temp_buff, dbg_buff);
959 			return rc;
960 		}
961 		tid->hpftid_base = val[0];
962 		tid->nhpftids = val[1] - val[0] + 1;
963 	}
964 
965 	tid->ntids = padap->tids.ntids;
966 	tid->nstids = padap->tids.nstids;
967 	tid->stid_base = padap->tids.stid_base;
968 	tid->hash_base = padap->tids.hash_base;
969 
970 	tid->natids = padap->tids.natids;
971 	tid->nftids = padap->tids.nftids;
972 	tid->ftid_base = padap->tids.ftid_base;
973 	tid->aftid_base = padap->tids.aftid_base;
974 	tid->aftid_end = padap->tids.aftid_end;
975 
976 	tid->sftid_base = padap->tids.sftid_base;
977 	tid->nsftids = padap->tids.nsftids;
978 
979 	tid->flags = padap->flags;
980 	tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
981 	tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
982 	tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
983 
984 #undef FW_PARAM_PFVF_A
985 
986 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
987 	return rc;
988 }
989 
990 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
991 {
992 	*mask = x | y;
993 	y = (__force u64)cpu_to_be64(y);
994 	memcpy(addr, (char *)&y + 2, ETH_ALEN);
995 }
996 
997 static void cudbg_mps_rpl_backdoor(struct adapter *padap,
998 				   struct fw_ldst_mps_rplc *mps_rplc)
999 {
1000 	if (is_t5(padap->params.chip)) {
1001 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1002 							  MPS_VF_RPLCT_MAP3_A));
1003 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1004 							  MPS_VF_RPLCT_MAP2_A));
1005 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1006 							  MPS_VF_RPLCT_MAP1_A));
1007 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1008 							  MPS_VF_RPLCT_MAP0_A));
1009 	} else {
1010 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1011 							  MPS_VF_RPLCT_MAP7_A));
1012 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1013 							  MPS_VF_RPLCT_MAP6_A));
1014 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1015 							  MPS_VF_RPLCT_MAP5_A));
1016 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1017 							  MPS_VF_RPLCT_MAP4_A));
1018 	}
1019 	mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
1020 	mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
1021 	mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
1022 	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
1023 }
1024 
1025 static int cudbg_collect_tcam_index(struct adapter *padap,
1026 				    struct cudbg_mps_tcam *tcam, u32 idx)
1027 {
1028 	u64 tcamy, tcamx, val;
1029 	u32 ctl, data2;
1030 	int rc = 0;
1031 
1032 	if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
1033 		/* CtlReqID   - 1: use Host Driver Requester ID
1034 		 * CtlCmdType - 0: Read, 1: Write
1035 		 * CtlTcamSel - 0: TCAM0, 1: TCAM1
1036 		 * CtlXYBitSel- 0: Y bit, 1: X bit
1037 		 */
1038 
1039 		/* Read tcamy */
1040 		ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
1041 		if (idx < 256)
1042 			ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
1043 		else
1044 			ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
1045 
1046 		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1047 		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1048 		tcamy = DMACH_G(val) << 32;
1049 		tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1050 		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1051 		tcam->lookup_type = DATALKPTYPE_G(data2);
1052 
1053 		/* 0 - Outer header, 1 - Inner header
1054 		 * [71:48] bit locations are overloaded for
1055 		 * outer vs. inner lookup types.
1056 		 */
1057 		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1058 			/* Inner header VNI */
1059 			tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1060 			tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
1061 			tcam->dip_hit = data2 & DATADIPHIT_F;
1062 		} else {
1063 			tcam->vlan_vld = data2 & DATAVIDH2_F;
1064 			tcam->ivlan = VIDL_G(val);
1065 		}
1066 
1067 		tcam->port_num = DATAPORTNUM_G(data2);
1068 
1069 		/* Read tcamx. Change the control param */
1070 		ctl |= CTLXYBITSEL_V(1);
1071 		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1072 		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1073 		tcamx = DMACH_G(val) << 32;
1074 		tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1075 		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1076 		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1077 			/* Inner header VNI mask */
1078 			tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1079 			tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
1080 		}
1081 	} else {
1082 		tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
1083 		tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
1084 	}
1085 
1086 	/* If no entry, return */
1087 	if (tcamx & tcamy)
1088 		return rc;
1089 
1090 	tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
1091 	tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
1092 
1093 	if (is_t5(padap->params.chip))
1094 		tcam->repli = (tcam->cls_lo & REPLICATE_F);
1095 	else if (is_t6(padap->params.chip))
1096 		tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
1097 
1098 	if (tcam->repli) {
1099 		struct fw_ldst_cmd ldst_cmd;
1100 		struct fw_ldst_mps_rplc mps_rplc;
1101 
1102 		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
1103 		ldst_cmd.op_to_addrspace =
1104 			htonl(FW_CMD_OP_V(FW_LDST_CMD) |
1105 			      FW_CMD_REQUEST_F | FW_CMD_READ_F |
1106 			      FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
1107 		ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
1108 		ldst_cmd.u.mps.rplc.fid_idx =
1109 			htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
1110 			      FW_LDST_CMD_IDX_V(idx));
1111 
1112 		rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
1113 				&ldst_cmd);
1114 		if (rc)
1115 			cudbg_mps_rpl_backdoor(padap, &mps_rplc);
1116 		else
1117 			mps_rplc = ldst_cmd.u.mps.rplc;
1118 
1119 		tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
1120 		tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
1121 		tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
1122 		tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
1123 		if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
1124 			tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
1125 			tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
1126 			tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
1127 			tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
1128 		}
1129 	}
1130 	cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
1131 	tcam->idx = idx;
1132 	tcam->rplc_size = padap->params.arch.mps_rplc_size;
1133 	return rc;
1134 }
1135 
1136 int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
1137 			   struct cudbg_buffer *dbg_buff,
1138 			   struct cudbg_error *cudbg_err)
1139 {
1140 	struct adapter *padap = pdbg_init->adap;
1141 	struct cudbg_buffer temp_buff = { 0 };
1142 	u32 size = 0, i, n, total_size = 0;
1143 	struct cudbg_mps_tcam *tcam;
1144 	int rc;
1145 
1146 	n = padap->params.arch.mps_tcam_size;
1147 	size = sizeof(struct cudbg_mps_tcam) * n;
1148 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1149 	if (rc)
1150 		return rc;
1151 
1152 	tcam = (struct cudbg_mps_tcam *)temp_buff.data;
1153 	for (i = 0; i < n; i++) {
1154 		rc = cudbg_collect_tcam_index(padap, tcam, i);
1155 		if (rc) {
1156 			cudbg_err->sys_err = rc;
1157 			cudbg_put_buff(&temp_buff, dbg_buff);
1158 			return rc;
1159 		}
1160 		total_size += sizeof(struct cudbg_mps_tcam);
1161 		tcam++;
1162 	}
1163 
1164 	if (!total_size) {
1165 		rc = CUDBG_SYSTEM_ERROR;
1166 		cudbg_err->sys_err = rc;
1167 		cudbg_put_buff(&temp_buff, dbg_buff);
1168 		return rc;
1169 	}
1170 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1171 	return rc;
1172 }
1173 
1174 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
1175 			      struct cudbg_buffer *dbg_buff,
1176 			      struct cudbg_error *cudbg_err)
1177 {
1178 	struct adapter *padap = pdbg_init->adap;
1179 	struct cudbg_buffer temp_buff = { 0 };
1180 	struct ireg_buf *ma_indr;
1181 	int i, rc, n;
1182 	u32 size, j;
1183 
1184 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1185 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1186 
1187 	n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1188 	size = sizeof(struct ireg_buf) * n * 2;
1189 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1190 	if (rc)
1191 		return rc;
1192 
1193 	ma_indr = (struct ireg_buf *)temp_buff.data;
1194 	for (i = 0; i < n; i++) {
1195 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
1196 		u32 *buff = ma_indr->outbuf;
1197 
1198 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
1199 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
1200 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
1201 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
1202 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
1203 				 buff, ma_fli->ireg_offset_range,
1204 				 ma_fli->ireg_local_offset);
1205 		ma_indr++;
1206 	}
1207 
1208 	n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
1209 	for (i = 0; i < n; i++) {
1210 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
1211 		u32 *buff = ma_indr->outbuf;
1212 
1213 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
1214 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
1215 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
1216 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
1217 			t4_read_indirect(padap, ma_fli->ireg_addr,
1218 					 ma_fli->ireg_data, buff, 1,
1219 					 ma_fli->ireg_local_offset);
1220 			buff++;
1221 			ma_fli->ireg_local_offset += 0x20;
1222 		}
1223 		ma_indr++;
1224 	}
1225 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1226 	return rc;
1227 }
1228 
1229 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
1230 			   struct cudbg_buffer *dbg_buff,
1231 			   struct cudbg_error *cudbg_err)
1232 {
1233 	struct adapter *padap = pdbg_init->adap;
1234 	struct cudbg_buffer temp_buff = { 0 };
1235 	struct cudbg_ulptx_la *ulptx_la_buff;
1236 	u32 i, j;
1237 	int rc;
1238 
1239 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
1240 			    &temp_buff);
1241 	if (rc)
1242 		return rc;
1243 
1244 	ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
1245 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
1246 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
1247 						      ULP_TX_LA_RDPTR_0_A +
1248 						      0x10 * i);
1249 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
1250 						      ULP_TX_LA_WRPTR_0_A +
1251 						      0x10 * i);
1252 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
1253 						       ULP_TX_LA_RDDATA_0_A +
1254 						       0x10 * i);
1255 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
1256 			ulptx_la_buff->rd_data[i][j] =
1257 				t4_read_reg(padap,
1258 					    ULP_TX_LA_RDDATA_0_A + 0x10 * i);
1259 	}
1260 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1261 	return rc;
1262 }
1263 
1264 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
1265 				  struct cudbg_buffer *dbg_buff,
1266 				  struct cudbg_error *cudbg_err)
1267 {
1268 	struct adapter *padap = pdbg_init->adap;
1269 	struct cudbg_buffer temp_buff = { 0 };
1270 	struct ireg_buf *up_cim;
1271 	int i, rc, n;
1272 	u32 size;
1273 
1274 	n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
1275 	size = sizeof(struct ireg_buf) * n;
1276 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1277 	if (rc)
1278 		return rc;
1279 
1280 	up_cim = (struct ireg_buf *)temp_buff.data;
1281 	for (i = 0; i < n; i++) {
1282 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
1283 		u32 *buff = up_cim->outbuf;
1284 
1285 		if (is_t5(padap->params.chip)) {
1286 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
1287 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
1288 			up_cim_reg->ireg_local_offset =
1289 						t5_up_cim_reg_array[i][2];
1290 			up_cim_reg->ireg_offset_range =
1291 						t5_up_cim_reg_array[i][3];
1292 		} else if (is_t6(padap->params.chip)) {
1293 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
1294 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
1295 			up_cim_reg->ireg_local_offset =
1296 						t6_up_cim_reg_array[i][2];
1297 			up_cim_reg->ireg_offset_range =
1298 						t6_up_cim_reg_array[i][3];
1299 		}
1300 
1301 		rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
1302 				 up_cim_reg->ireg_offset_range, buff);
1303 		if (rc) {
1304 			cudbg_put_buff(&temp_buff, dbg_buff);
1305 			return rc;
1306 		}
1307 		up_cim++;
1308 	}
1309 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1310 	return rc;
1311 }
1312 
1313 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
1314 			   struct cudbg_buffer *dbg_buff,
1315 			   struct cudbg_error *cudbg_err)
1316 {
1317 	struct adapter *padap = pdbg_init->adap;
1318 	struct cudbg_mbox_log *mboxlog = NULL;
1319 	struct cudbg_buffer temp_buff = { 0 };
1320 	struct mbox_cmd_log *log = NULL;
1321 	struct mbox_cmd *entry;
1322 	unsigned int entry_idx;
1323 	u16 mbox_cmds;
1324 	int i, k, rc;
1325 	u64 flit;
1326 	u32 size;
1327 
1328 	log = padap->mbox_log;
1329 	mbox_cmds = padap->mbox_log->size;
1330 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
1331 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1332 	if (rc)
1333 		return rc;
1334 
1335 	mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
1336 	for (k = 0; k < mbox_cmds; k++) {
1337 		entry_idx = log->cursor + k;
1338 		if (entry_idx >= log->size)
1339 			entry_idx -= log->size;
1340 
1341 		entry = mbox_cmd_log_entry(log, entry_idx);
1342 		/* skip over unused entries */
1343 		if (entry->timestamp == 0)
1344 			continue;
1345 
1346 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
1347 		for (i = 0; i < MBOX_LEN / 8; i++) {
1348 			flit = entry->cmd[i];
1349 			mboxlog->hi[i] = (u32)(flit >> 32);
1350 			mboxlog->lo[i] = (u32)flit;
1351 		}
1352 		mboxlog++;
1353 	}
1354 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1355 	return rc;
1356 }
1357 
1358 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
1359 			       struct cudbg_buffer *dbg_buff,
1360 			       struct cudbg_error *cudbg_err)
1361 {
1362 	struct adapter *padap = pdbg_init->adap;
1363 	struct cudbg_buffer temp_buff = { 0 };
1364 	struct ireg_buf *hma_indr;
1365 	int i, rc, n;
1366 	u32 size;
1367 
1368 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1369 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1370 
1371 	n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1372 	size = sizeof(struct ireg_buf) * n;
1373 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1374 	if (rc)
1375 		return rc;
1376 
1377 	hma_indr = (struct ireg_buf *)temp_buff.data;
1378 	for (i = 0; i < n; i++) {
1379 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
1380 		u32 *buff = hma_indr->outbuf;
1381 
1382 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
1383 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
1384 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
1385 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
1386 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
1387 				 buff, hma_fli->ireg_offset_range,
1388 				 hma_fli->ireg_local_offset);
1389 		hma_indr++;
1390 	}
1391 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1392 	return rc;
1393 }
1394