1 /*
2  *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify it
5  *  under the terms and conditions of the GNU General Public License,
6  *  version 2, as published by the Free Software Foundation.
7  *
8  *  This program is distributed in the hope it will be useful, but WITHOUT
9  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  *  more details.
12  *
13  *  The full GNU General Public License is included in this distribution in
14  *  the file called "COPYING".
15  *
16  */
17 
18 #include "t4_regs.h"
19 #include "cxgb4.h"
20 #include "cudbg_if.h"
21 #include "cudbg_lib_common.h"
22 #include "cudbg_lib.h"
23 #include "cudbg_entity.h"
24 
25 static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
26 					 struct cudbg_buffer *dbg_buff)
27 {
28 	cudbg_update_buff(pin_buff, dbg_buff);
29 	cudbg_put_buff(pin_buff, dbg_buff);
30 }
31 
32 static int is_fw_attached(struct cudbg_init *pdbg_init)
33 {
34 	struct adapter *padap = pdbg_init->adap;
35 
36 	if (!(padap->flags & FW_OK) || padap->use_bd)
37 		return 0;
38 
39 	return 1;
40 }
41 
42 /* This function will add additional padding bytes into debug_buffer to make it
43  * 4 byte aligned.
44  */
45 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
46 			      struct cudbg_entity_hdr *entity_hdr)
47 {
48 	u8 zero_buf[4] = {0};
49 	u8 padding, remain;
50 
51 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
52 	padding = 4 - remain;
53 	if (remain) {
54 		memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
55 		       padding);
56 		dbg_buff->offset += padding;
57 		entity_hdr->num_pad = padding;
58 	}
59 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
60 }
61 
62 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
63 {
64 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
65 
66 	return (struct cudbg_entity_hdr *)
67 	       ((char *)outbuf + cudbg_hdr->hdr_len +
68 		(sizeof(struct cudbg_entity_hdr) * (i - 1)));
69 }
70 
71 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
72 			   struct cudbg_buffer *dbg_buff,
73 			   struct cudbg_error *cudbg_err)
74 {
75 	struct adapter *padap = pdbg_init->adap;
76 	struct cudbg_buffer temp_buff = { 0 };
77 	u32 buf_size = 0;
78 	int rc = 0;
79 
80 	if (is_t4(padap->params.chip))
81 		buf_size = T4_REGMAP_SIZE;
82 	else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
83 		buf_size = T5_REGMAP_SIZE;
84 
85 	rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
86 	if (rc)
87 		return rc;
88 	t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
89 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
90 	return rc;
91 }
92 
93 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
94 			    struct cudbg_buffer *dbg_buff,
95 			    struct cudbg_error *cudbg_err)
96 {
97 	struct adapter *padap = pdbg_init->adap;
98 	struct cudbg_buffer temp_buff = { 0 };
99 	struct devlog_params *dparams;
100 	int rc = 0;
101 
102 	rc = t4_init_devlog_params(padap);
103 	if (rc < 0) {
104 		cudbg_err->sys_err = rc;
105 		return rc;
106 	}
107 
108 	dparams = &padap->params.devlog;
109 	rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
110 	if (rc)
111 		return rc;
112 
113 	/* Collect FW devlog */
114 	if (dparams->start != 0) {
115 		spin_lock(&padap->win0_lock);
116 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
117 				  dparams->memtype, dparams->start,
118 				  dparams->size,
119 				  (__be32 *)(char *)temp_buff.data,
120 				  1);
121 		spin_unlock(&padap->win0_lock);
122 		if (rc) {
123 			cudbg_err->sys_err = rc;
124 			cudbg_put_buff(&temp_buff, dbg_buff);
125 			return rc;
126 		}
127 	}
128 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
129 	return rc;
130 }
131 
132 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
133 			 struct cudbg_buffer *dbg_buff,
134 			 struct cudbg_error *cudbg_err)
135 {
136 	struct adapter *padap = pdbg_init->adap;
137 	struct cudbg_buffer temp_buff = { 0 };
138 	int size, rc;
139 	u32 cfg = 0;
140 
141 	if (is_t6(padap->params.chip)) {
142 		size = padap->params.cim_la_size / 10 + 1;
143 		size *= 11 * sizeof(u32);
144 	} else {
145 		size = padap->params.cim_la_size / 8;
146 		size *= 8 * sizeof(u32);
147 	}
148 
149 	size += sizeof(cfg);
150 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
151 	if (rc)
152 		return rc;
153 
154 	rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
155 	if (rc) {
156 		cudbg_err->sys_err = rc;
157 		cudbg_put_buff(&temp_buff, dbg_buff);
158 		return rc;
159 	}
160 
161 	memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
162 	rc = t4_cim_read_la(padap,
163 			    (u32 *)((char *)temp_buff.data + sizeof(cfg)),
164 			    NULL);
165 	if (rc < 0) {
166 		cudbg_err->sys_err = rc;
167 		cudbg_put_buff(&temp_buff, dbg_buff);
168 		return rc;
169 	}
170 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
171 	return rc;
172 }
173 
174 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
175 			    struct cudbg_buffer *dbg_buff,
176 			    struct cudbg_error *cudbg_err)
177 {
178 	struct adapter *padap = pdbg_init->adap;
179 	struct cudbg_buffer temp_buff = { 0 };
180 	int size, rc;
181 
182 	size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
183 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
184 	if (rc)
185 		return rc;
186 
187 	t4_cim_read_ma_la(padap,
188 			  (u32 *)temp_buff.data,
189 			  (u32 *)((char *)temp_buff.data +
190 				  5 * CIM_MALA_SIZE));
191 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
192 	return rc;
193 }
194 
195 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
196 			      struct cudbg_buffer *dbg_buff,
197 			      struct cudbg_error *cudbg_err, int qid)
198 {
199 	struct adapter *padap = pdbg_init->adap;
200 	struct cudbg_buffer temp_buff = { 0 };
201 	int no_of_read_words, rc = 0;
202 	u32 qsize;
203 
204 	/* collect CIM IBQ */
205 	qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
206 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
207 	if (rc)
208 		return rc;
209 
210 	/* t4_read_cim_ibq will return no. of read words or error */
211 	no_of_read_words = t4_read_cim_ibq(padap, qid,
212 					   (u32 *)temp_buff.data, qsize);
213 	/* no_of_read_words is less than or equal to 0 means error */
214 	if (no_of_read_words <= 0) {
215 		if (!no_of_read_words)
216 			rc = CUDBG_SYSTEM_ERROR;
217 		else
218 			rc = no_of_read_words;
219 		cudbg_err->sys_err = rc;
220 		cudbg_put_buff(&temp_buff, dbg_buff);
221 		return rc;
222 	}
223 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
224 	return rc;
225 }
226 
227 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
228 			      struct cudbg_buffer *dbg_buff,
229 			      struct cudbg_error *cudbg_err)
230 {
231 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
232 }
233 
234 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
235 			      struct cudbg_buffer *dbg_buff,
236 			      struct cudbg_error *cudbg_err)
237 {
238 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
239 }
240 
241 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
242 			      struct cudbg_buffer *dbg_buff,
243 			      struct cudbg_error *cudbg_err)
244 {
245 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
246 }
247 
248 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
249 			       struct cudbg_buffer *dbg_buff,
250 			       struct cudbg_error *cudbg_err)
251 {
252 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
253 }
254 
255 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
256 			       struct cudbg_buffer *dbg_buff,
257 			       struct cudbg_error *cudbg_err)
258 {
259 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
260 }
261 
262 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
263 			       struct cudbg_buffer *dbg_buff,
264 			       struct cudbg_error *cudbg_err)
265 {
266 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
267 }
268 
269 u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
270 {
271 	u32 value;
272 
273 	t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
274 		     QUENUMSELECT_V(qid));
275 	value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
276 	value = CIMQSIZE_G(value) * 64; /* size in number of words */
277 	return value * sizeof(u32);
278 }
279 
280 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
281 			      struct cudbg_buffer *dbg_buff,
282 			      struct cudbg_error *cudbg_err, int qid)
283 {
284 	struct adapter *padap = pdbg_init->adap;
285 	struct cudbg_buffer temp_buff = { 0 };
286 	int no_of_read_words, rc = 0;
287 	u32 qsize;
288 
289 	/* collect CIM OBQ */
290 	qsize =  cudbg_cim_obq_size(padap, qid);
291 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
292 	if (rc)
293 		return rc;
294 
295 	/* t4_read_cim_obq will return no. of read words or error */
296 	no_of_read_words = t4_read_cim_obq(padap, qid,
297 					   (u32 *)temp_buff.data, qsize);
298 	/* no_of_read_words is less than or equal to 0 means error */
299 	if (no_of_read_words <= 0) {
300 		if (!no_of_read_words)
301 			rc = CUDBG_SYSTEM_ERROR;
302 		else
303 			rc = no_of_read_words;
304 		cudbg_err->sys_err = rc;
305 		cudbg_put_buff(&temp_buff, dbg_buff);
306 		return rc;
307 	}
308 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
309 	return rc;
310 }
311 
312 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
313 			       struct cudbg_buffer *dbg_buff,
314 			       struct cudbg_error *cudbg_err)
315 {
316 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
317 }
318 
319 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
320 			       struct cudbg_buffer *dbg_buff,
321 			       struct cudbg_error *cudbg_err)
322 {
323 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
324 }
325 
326 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
327 			       struct cudbg_buffer *dbg_buff,
328 			       struct cudbg_error *cudbg_err)
329 {
330 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
331 }
332 
333 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
334 			       struct cudbg_buffer *dbg_buff,
335 			       struct cudbg_error *cudbg_err)
336 {
337 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
338 }
339 
340 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
341 			      struct cudbg_buffer *dbg_buff,
342 			      struct cudbg_error *cudbg_err)
343 {
344 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
345 }
346 
347 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
348 			       struct cudbg_buffer *dbg_buff,
349 			       struct cudbg_error *cudbg_err)
350 {
351 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
352 }
353 
354 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
355 				struct cudbg_buffer *dbg_buff,
356 				struct cudbg_error *cudbg_err)
357 {
358 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
359 }
360 
361 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
362 				struct cudbg_buffer *dbg_buff,
363 				struct cudbg_error *cudbg_err)
364 {
365 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
366 }
367 
368 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
369 			     struct cudbg_buffer *dbg_buff, u8 mem_type,
370 			     unsigned long tot_len,
371 			     struct cudbg_error *cudbg_err)
372 {
373 	unsigned long bytes, bytes_left, bytes_read = 0;
374 	struct adapter *padap = pdbg_init->adap;
375 	struct cudbg_buffer temp_buff = { 0 };
376 	int rc = 0;
377 
378 	bytes_left = tot_len;
379 	while (bytes_left > 0) {
380 		bytes = min_t(unsigned long, bytes_left,
381 			      (unsigned long)CUDBG_CHUNK_SIZE);
382 		rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
383 		if (rc)
384 			return rc;
385 		spin_lock(&padap->win0_lock);
386 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
387 				  bytes_read, bytes,
388 				  (__be32 *)temp_buff.data,
389 				  1);
390 		spin_unlock(&padap->win0_lock);
391 		if (rc) {
392 			cudbg_err->sys_err = rc;
393 			cudbg_put_buff(&temp_buff, dbg_buff);
394 			return rc;
395 		}
396 		bytes_left -= bytes;
397 		bytes_read += bytes;
398 		cudbg_write_and_release_buff(&temp_buff, dbg_buff);
399 	}
400 	return rc;
401 }
402 
403 static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
404 				   struct card_mem *mem_info)
405 {
406 	struct adapter *padap = pdbg_init->adap;
407 	u32 value;
408 
409 	value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
410 	value = EDRAM0_SIZE_G(value);
411 	mem_info->size_edc0 = (u16)value;
412 
413 	value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
414 	value = EDRAM1_SIZE_G(value);
415 	mem_info->size_edc1 = (u16)value;
416 
417 	value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
418 	if (value & EDRAM0_ENABLE_F)
419 		mem_info->mem_flag |= (1 << EDC0_FLAG);
420 	if (value & EDRAM1_ENABLE_F)
421 		mem_info->mem_flag |= (1 << EDC1_FLAG);
422 }
423 
424 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
425 			     struct cudbg_error *cudbg_err)
426 {
427 	struct adapter *padap = pdbg_init->adap;
428 	int rc;
429 
430 	if (is_fw_attached(pdbg_init)) {
431 		/* Flush uP dcache before reading edcX/mcX  */
432 		rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
433 		if (rc)
434 			cudbg_err->sys_warn = rc;
435 	}
436 }
437 
438 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
439 				    struct cudbg_buffer *dbg_buff,
440 				    struct cudbg_error *cudbg_err,
441 				    u8 mem_type)
442 {
443 	struct card_mem mem_info = {0};
444 	unsigned long flag, size;
445 	int rc;
446 
447 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
448 	cudbg_collect_mem_info(pdbg_init, &mem_info);
449 	switch (mem_type) {
450 	case MEM_EDC0:
451 		flag = (1 << EDC0_FLAG);
452 		size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
453 		break;
454 	case MEM_EDC1:
455 		flag = (1 << EDC1_FLAG);
456 		size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
457 		break;
458 	default:
459 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
460 		goto err;
461 	}
462 
463 	if (mem_info.mem_flag & flag) {
464 		rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
465 				       size, cudbg_err);
466 		if (rc)
467 			goto err;
468 	} else {
469 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
470 		goto err;
471 	}
472 err:
473 	return rc;
474 }
475 
476 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
477 			       struct cudbg_buffer *dbg_buff,
478 			       struct cudbg_error *cudbg_err)
479 {
480 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
481 					MEM_EDC0);
482 }
483 
484 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
485 			       struct cudbg_buffer *dbg_buff,
486 			       struct cudbg_error *cudbg_err)
487 {
488 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
489 					MEM_EDC1);
490 }
491 
492 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
493 			      struct cudbg_buffer *dbg_buff,
494 			      struct cudbg_error *cudbg_err)
495 {
496 	struct adapter *padap = pdbg_init->adap;
497 	struct cudbg_buffer temp_buff = { 0 };
498 	struct ireg_buf *ch_tp_pio;
499 	int i, rc, n = 0;
500 	u32 size;
501 
502 	if (is_t5(padap->params.chip))
503 		n = sizeof(t5_tp_pio_array) +
504 		    sizeof(t5_tp_tm_pio_array) +
505 		    sizeof(t5_tp_mib_index_array);
506 	else
507 		n = sizeof(t6_tp_pio_array) +
508 		    sizeof(t6_tp_tm_pio_array) +
509 		    sizeof(t6_tp_mib_index_array);
510 
511 	n = n / (IREG_NUM_ELEM * sizeof(u32));
512 	size = sizeof(struct ireg_buf) * n;
513 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
514 	if (rc)
515 		return rc;
516 
517 	ch_tp_pio = (struct ireg_buf *)temp_buff.data;
518 
519 	/* TP_PIO */
520 	if (is_t5(padap->params.chip))
521 		n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
522 	else if (is_t6(padap->params.chip))
523 		n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
524 
525 	for (i = 0; i < n; i++) {
526 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
527 		u32 *buff = ch_tp_pio->outbuf;
528 
529 		if (is_t5(padap->params.chip)) {
530 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
531 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
532 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
533 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
534 		} else if (is_t6(padap->params.chip)) {
535 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
536 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
537 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
538 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
539 		}
540 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
541 			       tp_pio->ireg_local_offset, true);
542 		ch_tp_pio++;
543 	}
544 
545 	/* TP_TM_PIO */
546 	if (is_t5(padap->params.chip))
547 		n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
548 	else if (is_t6(padap->params.chip))
549 		n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
550 
551 	for (i = 0; i < n; i++) {
552 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
553 		u32 *buff = ch_tp_pio->outbuf;
554 
555 		if (is_t5(padap->params.chip)) {
556 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
557 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
558 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
559 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
560 		} else if (is_t6(padap->params.chip)) {
561 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
562 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
563 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
564 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
565 		}
566 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
567 				  tp_pio->ireg_local_offset, true);
568 		ch_tp_pio++;
569 	}
570 
571 	/* TP_MIB_INDEX */
572 	if (is_t5(padap->params.chip))
573 		n = sizeof(t5_tp_mib_index_array) /
574 		    (IREG_NUM_ELEM * sizeof(u32));
575 	else if (is_t6(padap->params.chip))
576 		n = sizeof(t6_tp_mib_index_array) /
577 		    (IREG_NUM_ELEM * sizeof(u32));
578 
579 	for (i = 0; i < n ; i++) {
580 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
581 		u32 *buff = ch_tp_pio->outbuf;
582 
583 		if (is_t5(padap->params.chip)) {
584 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
585 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
586 			tp_pio->ireg_local_offset =
587 				t5_tp_mib_index_array[i][2];
588 			tp_pio->ireg_offset_range =
589 				t5_tp_mib_index_array[i][3];
590 		} else if (is_t6(padap->params.chip)) {
591 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
592 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
593 			tp_pio->ireg_local_offset =
594 				t6_tp_mib_index_array[i][2];
595 			tp_pio->ireg_offset_range =
596 				t6_tp_mib_index_array[i][3];
597 		}
598 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
599 			       tp_pio->ireg_local_offset, true);
600 		ch_tp_pio++;
601 	}
602 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
603 	return rc;
604 }
605 
606 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
607 			       struct cudbg_buffer *dbg_buff,
608 			       struct cudbg_error *cudbg_err)
609 {
610 	struct adapter *padap = pdbg_init->adap;
611 	struct cudbg_buffer temp_buff = { 0 };
612 	struct ireg_buf *ch_sge_dbg;
613 	int i, rc;
614 
615 	rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
616 	if (rc)
617 		return rc;
618 
619 	ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
620 	for (i = 0; i < 2; i++) {
621 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
622 		u32 *buff = ch_sge_dbg->outbuf;
623 
624 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
625 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
626 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
627 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
628 		t4_read_indirect(padap,
629 				 sge_pio->ireg_addr,
630 				 sge_pio->ireg_data,
631 				 buff,
632 				 sge_pio->ireg_offset_range,
633 				 sge_pio->ireg_local_offset);
634 		ch_sge_dbg++;
635 	}
636 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
637 	return rc;
638 }
639 
640 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
641 			   struct cudbg_buffer *dbg_buff,
642 			   struct cudbg_error *cudbg_err)
643 {
644 	struct adapter *padap = pdbg_init->adap;
645 	struct cudbg_buffer temp_buff = { 0 };
646 	struct cudbg_ulprx_la *ulprx_la_buff;
647 	int rc;
648 
649 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
650 			    &temp_buff);
651 	if (rc)
652 		return rc;
653 
654 	ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
655 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
656 	ulprx_la_buff->size = ULPRX_LA_SIZE;
657 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
658 	return rc;
659 }
660 
661 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
662 			struct cudbg_buffer *dbg_buff,
663 			struct cudbg_error *cudbg_err)
664 {
665 	struct adapter *padap = pdbg_init->adap;
666 	struct cudbg_buffer temp_buff = { 0 };
667 	struct cudbg_tp_la *tp_la_buff;
668 	int size, rc;
669 
670 	size = sizeof(struct cudbg_tp_la) + TPLA_SIZE *  sizeof(u64);
671 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
672 	if (rc)
673 		return rc;
674 
675 	tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
676 	tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
677 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
678 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
679 	return rc;
680 }
681 
682 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
683 			     struct cudbg_buffer *dbg_buff,
684 			     struct cudbg_error *cudbg_err)
685 {
686 	struct cudbg_cim_pif_la *cim_pif_la_buff;
687 	struct adapter *padap = pdbg_init->adap;
688 	struct cudbg_buffer temp_buff = { 0 };
689 	int size, rc;
690 
691 	size = sizeof(struct cudbg_cim_pif_la) +
692 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
693 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
694 	if (rc)
695 		return rc;
696 
697 	cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
698 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
699 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
700 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
701 			   NULL, NULL);
702 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
703 	return rc;
704 }
705 
706 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
707 				struct cudbg_buffer *dbg_buff,
708 				struct cudbg_error *cudbg_err)
709 {
710 	struct adapter *padap = pdbg_init->adap;
711 	struct cudbg_buffer temp_buff = { 0 };
712 	struct ireg_buf *ch_pcie;
713 	int i, rc, n;
714 	u32 size;
715 
716 	n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
717 	size = sizeof(struct ireg_buf) * n * 2;
718 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
719 	if (rc)
720 		return rc;
721 
722 	ch_pcie = (struct ireg_buf *)temp_buff.data;
723 	/* PCIE_PDBG */
724 	for (i = 0; i < n; i++) {
725 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
726 		u32 *buff = ch_pcie->outbuf;
727 
728 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
729 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
730 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
731 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
732 		t4_read_indirect(padap,
733 				 pcie_pio->ireg_addr,
734 				 pcie_pio->ireg_data,
735 				 buff,
736 				 pcie_pio->ireg_offset_range,
737 				 pcie_pio->ireg_local_offset);
738 		ch_pcie++;
739 	}
740 
741 	/* PCIE_CDBG */
742 	n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
743 	for (i = 0; i < n; i++) {
744 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
745 		u32 *buff = ch_pcie->outbuf;
746 
747 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
748 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
749 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
750 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
751 		t4_read_indirect(padap,
752 				 pcie_pio->ireg_addr,
753 				 pcie_pio->ireg_data,
754 				 buff,
755 				 pcie_pio->ireg_offset_range,
756 				 pcie_pio->ireg_local_offset);
757 		ch_pcie++;
758 	}
759 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
760 	return rc;
761 }
762 
763 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
764 			      struct cudbg_buffer *dbg_buff,
765 			      struct cudbg_error *cudbg_err)
766 {
767 	struct adapter *padap = pdbg_init->adap;
768 	struct cudbg_buffer temp_buff = { 0 };
769 	struct ireg_buf *ch_pm;
770 	int i, rc, n;
771 	u32 size;
772 
773 	n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
774 	size = sizeof(struct ireg_buf) * n * 2;
775 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
776 	if (rc)
777 		return rc;
778 
779 	ch_pm = (struct ireg_buf *)temp_buff.data;
780 	/* PM_RX */
781 	for (i = 0; i < n; i++) {
782 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
783 		u32 *buff = ch_pm->outbuf;
784 
785 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
786 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
787 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
788 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
789 		t4_read_indirect(padap,
790 				 pm_pio->ireg_addr,
791 				 pm_pio->ireg_data,
792 				 buff,
793 				 pm_pio->ireg_offset_range,
794 				 pm_pio->ireg_local_offset);
795 		ch_pm++;
796 	}
797 
798 	/* PM_TX */
799 	n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
800 	for (i = 0; i < n; i++) {
801 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
802 		u32 *buff = ch_pm->outbuf;
803 
804 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
805 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
806 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
807 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
808 		t4_read_indirect(padap,
809 				 pm_pio->ireg_addr,
810 				 pm_pio->ireg_data,
811 				 buff,
812 				 pm_pio->ireg_offset_range,
813 				 pm_pio->ireg_local_offset);
814 		ch_pm++;
815 	}
816 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
817 	return rc;
818 }
819 
820 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
821 			      struct cudbg_buffer *dbg_buff,
822 			      struct cudbg_error *cudbg_err)
823 {
824 	struct adapter *padap = pdbg_init->adap;
825 	struct cudbg_buffer temp_buff = { 0 };
826 	struct ireg_buf *ma_indr;
827 	int i, rc, n;
828 	u32 size, j;
829 
830 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
831 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
832 
833 	n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
834 	size = sizeof(struct ireg_buf) * n * 2;
835 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
836 	if (rc)
837 		return rc;
838 
839 	ma_indr = (struct ireg_buf *)temp_buff.data;
840 	for (i = 0; i < n; i++) {
841 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
842 		u32 *buff = ma_indr->outbuf;
843 
844 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
845 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
846 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
847 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
848 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
849 				 buff, ma_fli->ireg_offset_range,
850 				 ma_fli->ireg_local_offset);
851 		ma_indr++;
852 	}
853 
854 	n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
855 	for (i = 0; i < n; i++) {
856 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
857 		u32 *buff = ma_indr->outbuf;
858 
859 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
860 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
861 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
862 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
863 			t4_read_indirect(padap, ma_fli->ireg_addr,
864 					 ma_fli->ireg_data, buff, 1,
865 					 ma_fli->ireg_local_offset);
866 			buff++;
867 			ma_fli->ireg_local_offset += 0x20;
868 		}
869 		ma_indr++;
870 	}
871 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
872 	return rc;
873 }
874 
875 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
876 			   struct cudbg_buffer *dbg_buff,
877 			   struct cudbg_error *cudbg_err)
878 {
879 	struct adapter *padap = pdbg_init->adap;
880 	struct cudbg_buffer temp_buff = { 0 };
881 	struct cudbg_ulptx_la *ulptx_la_buff;
882 	u32 i, j;
883 	int rc;
884 
885 	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
886 			    &temp_buff);
887 	if (rc)
888 		return rc;
889 
890 	ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
891 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
892 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
893 						      ULP_TX_LA_RDPTR_0_A +
894 						      0x10 * i);
895 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
896 						      ULP_TX_LA_WRPTR_0_A +
897 						      0x10 * i);
898 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
899 						       ULP_TX_LA_RDDATA_0_A +
900 						       0x10 * i);
901 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
902 			ulptx_la_buff->rd_data[i][j] =
903 				t4_read_reg(padap,
904 					    ULP_TX_LA_RDDATA_0_A + 0x10 * i);
905 	}
906 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
907 	return rc;
908 }
909 
910 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
911 				  struct cudbg_buffer *dbg_buff,
912 				  struct cudbg_error *cudbg_err)
913 {
914 	struct adapter *padap = pdbg_init->adap;
915 	struct cudbg_buffer temp_buff = { 0 };
916 	struct ireg_buf *up_cim;
917 	int i, rc, n;
918 	u32 size;
919 
920 	n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
921 	size = sizeof(struct ireg_buf) * n;
922 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
923 	if (rc)
924 		return rc;
925 
926 	up_cim = (struct ireg_buf *)temp_buff.data;
927 	for (i = 0; i < n; i++) {
928 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
929 		u32 *buff = up_cim->outbuf;
930 
931 		if (is_t5(padap->params.chip)) {
932 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
933 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
934 			up_cim_reg->ireg_local_offset =
935 						t5_up_cim_reg_array[i][2];
936 			up_cim_reg->ireg_offset_range =
937 						t5_up_cim_reg_array[i][3];
938 		} else if (is_t6(padap->params.chip)) {
939 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
940 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
941 			up_cim_reg->ireg_local_offset =
942 						t6_up_cim_reg_array[i][2];
943 			up_cim_reg->ireg_offset_range =
944 						t6_up_cim_reg_array[i][3];
945 		}
946 
947 		rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
948 				 up_cim_reg->ireg_offset_range, buff);
949 		if (rc) {
950 			cudbg_put_buff(&temp_buff, dbg_buff);
951 			return rc;
952 		}
953 		up_cim++;
954 	}
955 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
956 	return rc;
957 }
958 
959 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
960 			   struct cudbg_buffer *dbg_buff,
961 			   struct cudbg_error *cudbg_err)
962 {
963 	struct adapter *padap = pdbg_init->adap;
964 	struct cudbg_mbox_log *mboxlog = NULL;
965 	struct cudbg_buffer temp_buff = { 0 };
966 	struct mbox_cmd_log *log = NULL;
967 	struct mbox_cmd *entry;
968 	unsigned int entry_idx;
969 	u16 mbox_cmds;
970 	int i, k, rc;
971 	u64 flit;
972 	u32 size;
973 
974 	log = padap->mbox_log;
975 	mbox_cmds = padap->mbox_log->size;
976 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
977 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
978 	if (rc)
979 		return rc;
980 
981 	mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
982 	for (k = 0; k < mbox_cmds; k++) {
983 		entry_idx = log->cursor + k;
984 		if (entry_idx >= log->size)
985 			entry_idx -= log->size;
986 
987 		entry = mbox_cmd_log_entry(log, entry_idx);
988 		/* skip over unused entries */
989 		if (entry->timestamp == 0)
990 			continue;
991 
992 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
993 		for (i = 0; i < MBOX_LEN / 8; i++) {
994 			flit = entry->cmd[i];
995 			mboxlog->hi[i] = (u32)(flit >> 32);
996 			mboxlog->lo[i] = (u32)flit;
997 		}
998 		mboxlog++;
999 	}
1000 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1001 	return rc;
1002 }
1003 
1004 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
1005 			       struct cudbg_buffer *dbg_buff,
1006 			       struct cudbg_error *cudbg_err)
1007 {
1008 	struct adapter *padap = pdbg_init->adap;
1009 	struct cudbg_buffer temp_buff = { 0 };
1010 	struct ireg_buf *hma_indr;
1011 	int i, rc, n;
1012 	u32 size;
1013 
1014 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1015 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
1016 
1017 	n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1018 	size = sizeof(struct ireg_buf) * n;
1019 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1020 	if (rc)
1021 		return rc;
1022 
1023 	hma_indr = (struct ireg_buf *)temp_buff.data;
1024 	for (i = 0; i < n; i++) {
1025 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
1026 		u32 *buff = hma_indr->outbuf;
1027 
1028 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
1029 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
1030 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
1031 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
1032 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
1033 				 buff, hma_fli->ireg_offset_range,
1034 				 hma_fli->ireg_local_offset);
1035 		hma_indr++;
1036 	}
1037 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1038 	return rc;
1039 }
1040