1 /* 2 * Copyright (C) 2017 Chelsio Communications. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * The full GNU General Public License is included in this distribution in 14 * the file called "COPYING". 15 * 16 */ 17 18 #include "t4_regs.h" 19 #include "cxgb4.h" 20 #include "cudbg_if.h" 21 #include "cudbg_lib_common.h" 22 #include "cudbg_lib.h" 23 #include "cudbg_entity.h" 24 25 static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff, 26 struct cudbg_buffer *dbg_buff) 27 { 28 cudbg_update_buff(pin_buff, dbg_buff); 29 cudbg_put_buff(pin_buff, dbg_buff); 30 } 31 32 static int is_fw_attached(struct cudbg_init *pdbg_init) 33 { 34 struct adapter *padap = pdbg_init->adap; 35 36 if (!(padap->flags & FW_OK) || padap->use_bd) 37 return 0; 38 39 return 1; 40 } 41 42 /* This function will add additional padding bytes into debug_buffer to make it 43 * 4 byte aligned. 44 */ 45 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, 46 struct cudbg_entity_hdr *entity_hdr) 47 { 48 u8 zero_buf[4] = {0}; 49 u8 padding, remain; 50 51 remain = (dbg_buff->offset - entity_hdr->start_offset) % 4; 52 padding = 4 - remain; 53 if (remain) { 54 memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf, 55 padding); 56 dbg_buff->offset += padding; 57 entity_hdr->num_pad = padding; 58 } 59 entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset; 60 } 61 62 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i) 63 { 64 struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf; 65 66 return (struct cudbg_entity_hdr *) 67 ((char *)outbuf + cudbg_hdr->hdr_len + 68 (sizeof(struct cudbg_entity_hdr) * (i - 1))); 69 } 70 71 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, 72 struct cudbg_buffer *dbg_buff, 73 struct cudbg_error *cudbg_err) 74 { 75 struct adapter *padap = pdbg_init->adap; 76 struct cudbg_buffer temp_buff = { 0 }; 77 u32 buf_size = 0; 78 int rc = 0; 79 80 if (is_t4(padap->params.chip)) 81 buf_size = T4_REGMAP_SIZE; 82 else if (is_t5(padap->params.chip) || is_t6(padap->params.chip)) 83 buf_size = T5_REGMAP_SIZE; 84 85 rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff); 86 if (rc) 87 return rc; 88 t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size); 89 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 90 return rc; 91 } 92 93 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, 94 struct cudbg_buffer *dbg_buff, 95 struct cudbg_error *cudbg_err) 96 { 97 struct adapter *padap = pdbg_init->adap; 98 struct cudbg_buffer temp_buff = { 0 }; 99 struct devlog_params *dparams; 100 int rc = 0; 101 102 rc = t4_init_devlog_params(padap); 103 if (rc < 0) { 104 cudbg_err->sys_err = rc; 105 return rc; 106 } 107 108 dparams = &padap->params.devlog; 109 rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff); 110 if (rc) 111 return rc; 112 113 /* Collect FW devlog */ 114 if (dparams->start != 0) { 115 spin_lock(&padap->win0_lock); 116 rc = t4_memory_rw(padap, padap->params.drv_memwin, 117 dparams->memtype, dparams->start, 118 dparams->size, 119 (__be32 *)(char *)temp_buff.data, 120 1); 121 spin_unlock(&padap->win0_lock); 122 if (rc) { 123 cudbg_err->sys_err = rc; 124 cudbg_put_buff(&temp_buff, dbg_buff); 125 return rc; 126 } 127 } 128 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 129 return rc; 130 } 131 132 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, 133 struct cudbg_buffer *dbg_buff, 134 struct cudbg_error *cudbg_err) 135 { 136 struct adapter *padap = pdbg_init->adap; 137 struct cudbg_buffer temp_buff = { 0 }; 138 int size, rc; 139 u32 cfg = 0; 140 141 if (is_t6(padap->params.chip)) { 142 size = padap->params.cim_la_size / 10 + 1; 143 size *= 11 * sizeof(u32); 144 } else { 145 size = padap->params.cim_la_size / 8; 146 size *= 8 * sizeof(u32); 147 } 148 149 size += sizeof(cfg); 150 rc = cudbg_get_buff(dbg_buff, size, &temp_buff); 151 if (rc) 152 return rc; 153 154 rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg); 155 if (rc) { 156 cudbg_err->sys_err = rc; 157 cudbg_put_buff(&temp_buff, dbg_buff); 158 return rc; 159 } 160 161 memcpy((char *)temp_buff.data, &cfg, sizeof(cfg)); 162 rc = t4_cim_read_la(padap, 163 (u32 *)((char *)temp_buff.data + sizeof(cfg)), 164 NULL); 165 if (rc < 0) { 166 cudbg_err->sys_err = rc; 167 cudbg_put_buff(&temp_buff, dbg_buff); 168 return rc; 169 } 170 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 171 return rc; 172 } 173 174 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init, 175 struct cudbg_buffer *dbg_buff, 176 struct cudbg_error *cudbg_err) 177 { 178 struct adapter *padap = pdbg_init->adap; 179 struct cudbg_buffer temp_buff = { 0 }; 180 int size, rc; 181 182 size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); 183 rc = cudbg_get_buff(dbg_buff, size, &temp_buff); 184 if (rc) 185 return rc; 186 187 t4_cim_read_ma_la(padap, 188 (u32 *)temp_buff.data, 189 (u32 *)((char *)temp_buff.data + 190 5 * CIM_MALA_SIZE)); 191 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 192 return rc; 193 } 194 195 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init, 196 struct cudbg_buffer *dbg_buff, 197 struct cudbg_error *cudbg_err) 198 { 199 struct adapter *padap = pdbg_init->adap; 200 struct cudbg_buffer temp_buff = { 0 }; 201 struct cudbg_cim_qcfg *cim_qcfg_data; 202 int rc; 203 204 rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg), 205 &temp_buff); 206 if (rc) 207 return rc; 208 209 cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data; 210 cim_qcfg_data->chip = padap->params.chip; 211 rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A, 212 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat); 213 if (rc) { 214 cudbg_err->sys_err = rc; 215 cudbg_put_buff(&temp_buff, dbg_buff); 216 return rc; 217 } 218 219 rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A, 220 ARRAY_SIZE(cim_qcfg_data->obq_wr), 221 cim_qcfg_data->obq_wr); 222 if (rc) { 223 cudbg_err->sys_err = rc; 224 cudbg_put_buff(&temp_buff, dbg_buff); 225 return rc; 226 } 227 228 t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size, 229 cim_qcfg_data->thres); 230 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 231 return rc; 232 } 233 234 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init, 235 struct cudbg_buffer *dbg_buff, 236 struct cudbg_error *cudbg_err, int qid) 237 { 238 struct adapter *padap = pdbg_init->adap; 239 struct cudbg_buffer temp_buff = { 0 }; 240 int no_of_read_words, rc = 0; 241 u32 qsize; 242 243 /* collect CIM IBQ */ 244 qsize = CIM_IBQ_SIZE * 4 * sizeof(u32); 245 rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff); 246 if (rc) 247 return rc; 248 249 /* t4_read_cim_ibq will return no. of read words or error */ 250 no_of_read_words = t4_read_cim_ibq(padap, qid, 251 (u32 *)temp_buff.data, qsize); 252 /* no_of_read_words is less than or equal to 0 means error */ 253 if (no_of_read_words <= 0) { 254 if (!no_of_read_words) 255 rc = CUDBG_SYSTEM_ERROR; 256 else 257 rc = no_of_read_words; 258 cudbg_err->sys_err = rc; 259 cudbg_put_buff(&temp_buff, dbg_buff); 260 return rc; 261 } 262 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 263 return rc; 264 } 265 266 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init, 267 struct cudbg_buffer *dbg_buff, 268 struct cudbg_error *cudbg_err) 269 { 270 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0); 271 } 272 273 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init, 274 struct cudbg_buffer *dbg_buff, 275 struct cudbg_error *cudbg_err) 276 { 277 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1); 278 } 279 280 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init, 281 struct cudbg_buffer *dbg_buff, 282 struct cudbg_error *cudbg_err) 283 { 284 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2); 285 } 286 287 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init, 288 struct cudbg_buffer *dbg_buff, 289 struct cudbg_error *cudbg_err) 290 { 291 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3); 292 } 293 294 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init, 295 struct cudbg_buffer *dbg_buff, 296 struct cudbg_error *cudbg_err) 297 { 298 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4); 299 } 300 301 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init, 302 struct cudbg_buffer *dbg_buff, 303 struct cudbg_error *cudbg_err) 304 { 305 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5); 306 } 307 308 u32 cudbg_cim_obq_size(struct adapter *padap, int qid) 309 { 310 u32 value; 311 312 t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F | 313 QUENUMSELECT_V(qid)); 314 value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A); 315 value = CIMQSIZE_G(value) * 64; /* size in number of words */ 316 return value * sizeof(u32); 317 } 318 319 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init, 320 struct cudbg_buffer *dbg_buff, 321 struct cudbg_error *cudbg_err, int qid) 322 { 323 struct adapter *padap = pdbg_init->adap; 324 struct cudbg_buffer temp_buff = { 0 }; 325 int no_of_read_words, rc = 0; 326 u32 qsize; 327 328 /* collect CIM OBQ */ 329 qsize = cudbg_cim_obq_size(padap, qid); 330 rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff); 331 if (rc) 332 return rc; 333 334 /* t4_read_cim_obq will return no. of read words or error */ 335 no_of_read_words = t4_read_cim_obq(padap, qid, 336 (u32 *)temp_buff.data, qsize); 337 /* no_of_read_words is less than or equal to 0 means error */ 338 if (no_of_read_words <= 0) { 339 if (!no_of_read_words) 340 rc = CUDBG_SYSTEM_ERROR; 341 else 342 rc = no_of_read_words; 343 cudbg_err->sys_err = rc; 344 cudbg_put_buff(&temp_buff, dbg_buff); 345 return rc; 346 } 347 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 348 return rc; 349 } 350 351 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init, 352 struct cudbg_buffer *dbg_buff, 353 struct cudbg_error *cudbg_err) 354 { 355 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0); 356 } 357 358 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init, 359 struct cudbg_buffer *dbg_buff, 360 struct cudbg_error *cudbg_err) 361 { 362 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1); 363 } 364 365 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init, 366 struct cudbg_buffer *dbg_buff, 367 struct cudbg_error *cudbg_err) 368 { 369 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2); 370 } 371 372 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init, 373 struct cudbg_buffer *dbg_buff, 374 struct cudbg_error *cudbg_err) 375 { 376 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3); 377 } 378 379 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init, 380 struct cudbg_buffer *dbg_buff, 381 struct cudbg_error *cudbg_err) 382 { 383 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4); 384 } 385 386 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init, 387 struct cudbg_buffer *dbg_buff, 388 struct cudbg_error *cudbg_err) 389 { 390 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5); 391 } 392 393 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init, 394 struct cudbg_buffer *dbg_buff, 395 struct cudbg_error *cudbg_err) 396 { 397 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6); 398 } 399 400 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init, 401 struct cudbg_buffer *dbg_buff, 402 struct cudbg_error *cudbg_err) 403 { 404 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7); 405 } 406 407 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, 408 struct cudbg_buffer *dbg_buff, u8 mem_type, 409 unsigned long tot_len, 410 struct cudbg_error *cudbg_err) 411 { 412 unsigned long bytes, bytes_left, bytes_read = 0; 413 struct adapter *padap = pdbg_init->adap; 414 struct cudbg_buffer temp_buff = { 0 }; 415 int rc = 0; 416 417 bytes_left = tot_len; 418 while (bytes_left > 0) { 419 bytes = min_t(unsigned long, bytes_left, 420 (unsigned long)CUDBG_CHUNK_SIZE); 421 rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff); 422 if (rc) 423 return rc; 424 spin_lock(&padap->win0_lock); 425 rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, 426 bytes_read, bytes, 427 (__be32 *)temp_buff.data, 428 1); 429 spin_unlock(&padap->win0_lock); 430 if (rc) { 431 cudbg_err->sys_err = rc; 432 cudbg_put_buff(&temp_buff, dbg_buff); 433 return rc; 434 } 435 bytes_left -= bytes; 436 bytes_read += bytes; 437 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 438 } 439 return rc; 440 } 441 442 static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init, 443 struct card_mem *mem_info) 444 { 445 struct adapter *padap = pdbg_init->adap; 446 u32 value; 447 448 value = t4_read_reg(padap, MA_EDRAM0_BAR_A); 449 value = EDRAM0_SIZE_G(value); 450 mem_info->size_edc0 = (u16)value; 451 452 value = t4_read_reg(padap, MA_EDRAM1_BAR_A); 453 value = EDRAM1_SIZE_G(value); 454 mem_info->size_edc1 = (u16)value; 455 456 value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A); 457 if (value & EDRAM0_ENABLE_F) 458 mem_info->mem_flag |= (1 << EDC0_FLAG); 459 if (value & EDRAM1_ENABLE_F) 460 mem_info->mem_flag |= (1 << EDC1_FLAG); 461 } 462 463 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, 464 struct cudbg_error *cudbg_err) 465 { 466 struct adapter *padap = pdbg_init->adap; 467 int rc; 468 469 if (is_fw_attached(pdbg_init)) { 470 /* Flush uP dcache before reading edcX/mcX */ 471 rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH); 472 if (rc) 473 cudbg_err->sys_warn = rc; 474 } 475 } 476 477 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, 478 struct cudbg_buffer *dbg_buff, 479 struct cudbg_error *cudbg_err, 480 u8 mem_type) 481 { 482 struct card_mem mem_info = {0}; 483 unsigned long flag, size; 484 int rc; 485 486 cudbg_t4_fwcache(pdbg_init, cudbg_err); 487 cudbg_collect_mem_info(pdbg_init, &mem_info); 488 switch (mem_type) { 489 case MEM_EDC0: 490 flag = (1 << EDC0_FLAG); 491 size = cudbg_mbytes_to_bytes(mem_info.size_edc0); 492 break; 493 case MEM_EDC1: 494 flag = (1 << EDC1_FLAG); 495 size = cudbg_mbytes_to_bytes(mem_info.size_edc1); 496 break; 497 default: 498 rc = CUDBG_STATUS_ENTITY_NOT_FOUND; 499 goto err; 500 } 501 502 if (mem_info.mem_flag & flag) { 503 rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, 504 size, cudbg_err); 505 if (rc) 506 goto err; 507 } else { 508 rc = CUDBG_STATUS_ENTITY_NOT_FOUND; 509 goto err; 510 } 511 err: 512 return rc; 513 } 514 515 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, 516 struct cudbg_buffer *dbg_buff, 517 struct cudbg_error *cudbg_err) 518 { 519 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, 520 MEM_EDC0); 521 } 522 523 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, 524 struct cudbg_buffer *dbg_buff, 525 struct cudbg_error *cudbg_err) 526 { 527 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, 528 MEM_EDC1); 529 } 530 531 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, 532 struct cudbg_buffer *dbg_buff, 533 struct cudbg_error *cudbg_err) 534 { 535 struct adapter *padap = pdbg_init->adap; 536 struct cudbg_buffer temp_buff = { 0 }; 537 struct ireg_buf *ch_tp_pio; 538 int i, rc, n = 0; 539 u32 size; 540 541 if (is_t5(padap->params.chip)) 542 n = sizeof(t5_tp_pio_array) + 543 sizeof(t5_tp_tm_pio_array) + 544 sizeof(t5_tp_mib_index_array); 545 else 546 n = sizeof(t6_tp_pio_array) + 547 sizeof(t6_tp_tm_pio_array) + 548 sizeof(t6_tp_mib_index_array); 549 550 n = n / (IREG_NUM_ELEM * sizeof(u32)); 551 size = sizeof(struct ireg_buf) * n; 552 rc = cudbg_get_buff(dbg_buff, size, &temp_buff); 553 if (rc) 554 return rc; 555 556 ch_tp_pio = (struct ireg_buf *)temp_buff.data; 557 558 /* TP_PIO */ 559 if (is_t5(padap->params.chip)) 560 n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); 561 else if (is_t6(padap->params.chip)) 562 n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); 563 564 for (i = 0; i < n; i++) { 565 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; 566 u32 *buff = ch_tp_pio->outbuf; 567 568 if (is_t5(padap->params.chip)) { 569 tp_pio->ireg_addr = t5_tp_pio_array[i][0]; 570 tp_pio->ireg_data = t5_tp_pio_array[i][1]; 571 tp_pio->ireg_local_offset = t5_tp_pio_array[i][2]; 572 tp_pio->ireg_offset_range = t5_tp_pio_array[i][3]; 573 } else if (is_t6(padap->params.chip)) { 574 tp_pio->ireg_addr = t6_tp_pio_array[i][0]; 575 tp_pio->ireg_data = t6_tp_pio_array[i][1]; 576 tp_pio->ireg_local_offset = t6_tp_pio_array[i][2]; 577 tp_pio->ireg_offset_range = t6_tp_pio_array[i][3]; 578 } 579 t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range, 580 tp_pio->ireg_local_offset, true); 581 ch_tp_pio++; 582 } 583 584 /* TP_TM_PIO */ 585 if (is_t5(padap->params.chip)) 586 n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); 587 else if (is_t6(padap->params.chip)) 588 n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); 589 590 for (i = 0; i < n; i++) { 591 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; 592 u32 *buff = ch_tp_pio->outbuf; 593 594 if (is_t5(padap->params.chip)) { 595 tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0]; 596 tp_pio->ireg_data = t5_tp_tm_pio_array[i][1]; 597 tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2]; 598 tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3]; 599 } else if (is_t6(padap->params.chip)) { 600 tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0]; 601 tp_pio->ireg_data = t6_tp_tm_pio_array[i][1]; 602 tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2]; 603 tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3]; 604 } 605 t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range, 606 tp_pio->ireg_local_offset, true); 607 ch_tp_pio++; 608 } 609 610 /* TP_MIB_INDEX */ 611 if (is_t5(padap->params.chip)) 612 n = sizeof(t5_tp_mib_index_array) / 613 (IREG_NUM_ELEM * sizeof(u32)); 614 else if (is_t6(padap->params.chip)) 615 n = sizeof(t6_tp_mib_index_array) / 616 (IREG_NUM_ELEM * sizeof(u32)); 617 618 for (i = 0; i < n ; i++) { 619 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; 620 u32 *buff = ch_tp_pio->outbuf; 621 622 if (is_t5(padap->params.chip)) { 623 tp_pio->ireg_addr = t5_tp_mib_index_array[i][0]; 624 tp_pio->ireg_data = t5_tp_mib_index_array[i][1]; 625 tp_pio->ireg_local_offset = 626 t5_tp_mib_index_array[i][2]; 627 tp_pio->ireg_offset_range = 628 t5_tp_mib_index_array[i][3]; 629 } else if (is_t6(padap->params.chip)) { 630 tp_pio->ireg_addr = t6_tp_mib_index_array[i][0]; 631 tp_pio->ireg_data = t6_tp_mib_index_array[i][1]; 632 tp_pio->ireg_local_offset = 633 t6_tp_mib_index_array[i][2]; 634 tp_pio->ireg_offset_range = 635 t6_tp_mib_index_array[i][3]; 636 } 637 t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range, 638 tp_pio->ireg_local_offset, true); 639 ch_tp_pio++; 640 } 641 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 642 return rc; 643 } 644 645 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, 646 struct cudbg_buffer *dbg_buff, 647 struct cudbg_error *cudbg_err) 648 { 649 struct adapter *padap = pdbg_init->adap; 650 struct cudbg_buffer temp_buff = { 0 }; 651 struct ireg_buf *ch_sge_dbg; 652 int i, rc; 653 654 rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff); 655 if (rc) 656 return rc; 657 658 ch_sge_dbg = (struct ireg_buf *)temp_buff.data; 659 for (i = 0; i < 2; i++) { 660 struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio; 661 u32 *buff = ch_sge_dbg->outbuf; 662 663 sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0]; 664 sge_pio->ireg_data = t5_sge_dbg_index_array[i][1]; 665 sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2]; 666 sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3]; 667 t4_read_indirect(padap, 668 sge_pio->ireg_addr, 669 sge_pio->ireg_data, 670 buff, 671 sge_pio->ireg_offset_range, 672 sge_pio->ireg_local_offset); 673 ch_sge_dbg++; 674 } 675 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 676 return rc; 677 } 678 679 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init, 680 struct cudbg_buffer *dbg_buff, 681 struct cudbg_error *cudbg_err) 682 { 683 struct adapter *padap = pdbg_init->adap; 684 struct cudbg_buffer temp_buff = { 0 }; 685 struct cudbg_ulprx_la *ulprx_la_buff; 686 int rc; 687 688 rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la), 689 &temp_buff); 690 if (rc) 691 return rc; 692 693 ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data; 694 t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data); 695 ulprx_la_buff->size = ULPRX_LA_SIZE; 696 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 697 return rc; 698 } 699 700 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init, 701 struct cudbg_buffer *dbg_buff, 702 struct cudbg_error *cudbg_err) 703 { 704 struct adapter *padap = pdbg_init->adap; 705 struct cudbg_buffer temp_buff = { 0 }; 706 struct cudbg_tp_la *tp_la_buff; 707 int size, rc; 708 709 size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); 710 rc = cudbg_get_buff(dbg_buff, size, &temp_buff); 711 if (rc) 712 return rc; 713 714 tp_la_buff = (struct cudbg_tp_la *)temp_buff.data; 715 tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A)); 716 t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL); 717 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 718 return rc; 719 } 720 721 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, 722 struct cudbg_buffer *dbg_buff, 723 struct cudbg_error *cudbg_err) 724 { 725 struct cudbg_cim_pif_la *cim_pif_la_buff; 726 struct adapter *padap = pdbg_init->adap; 727 struct cudbg_buffer temp_buff = { 0 }; 728 int size, rc; 729 730 size = sizeof(struct cudbg_cim_pif_la) + 731 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); 732 rc = cudbg_get_buff(dbg_buff, size, &temp_buff); 733 if (rc) 734 return rc; 735 736 cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data; 737 cim_pif_la_buff->size = CIM_PIFLA_SIZE; 738 t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data, 739 (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE, 740 NULL, NULL); 741 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 742 return rc; 743 } 744 745 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, 746 struct cudbg_buffer *dbg_buff, 747 struct cudbg_error *cudbg_err) 748 { 749 struct adapter *padap = pdbg_init->adap; 750 struct cudbg_buffer temp_buff = { 0 }; 751 struct ireg_buf *ch_pcie; 752 int i, rc, n; 753 u32 size; 754 755 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); 756 size = sizeof(struct ireg_buf) * n * 2; 757 rc = cudbg_get_buff(dbg_buff, size, &temp_buff); 758 if (rc) 759 return rc; 760 761 ch_pcie = (struct ireg_buf *)temp_buff.data; 762 /* PCIE_PDBG */ 763 for (i = 0; i < n; i++) { 764 struct ireg_field *pcie_pio = &ch_pcie->tp_pio; 765 u32 *buff = ch_pcie->outbuf; 766 767 pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0]; 768 pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1]; 769 pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2]; 770 pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3]; 771 t4_read_indirect(padap, 772 pcie_pio->ireg_addr, 773 pcie_pio->ireg_data, 774 buff, 775 pcie_pio->ireg_offset_range, 776 pcie_pio->ireg_local_offset); 777 ch_pcie++; 778 } 779 780 /* PCIE_CDBG */ 781 n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); 782 for (i = 0; i < n; i++) { 783 struct ireg_field *pcie_pio = &ch_pcie->tp_pio; 784 u32 *buff = ch_pcie->outbuf; 785 786 pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0]; 787 pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1]; 788 pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2]; 789 pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3]; 790 t4_read_indirect(padap, 791 pcie_pio->ireg_addr, 792 pcie_pio->ireg_data, 793 buff, 794 pcie_pio->ireg_offset_range, 795 pcie_pio->ireg_local_offset); 796 ch_pcie++; 797 } 798 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 799 return rc; 800 } 801 802 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, 803 struct cudbg_buffer *dbg_buff, 804 struct cudbg_error *cudbg_err) 805 { 806 struct adapter *padap = pdbg_init->adap; 807 struct cudbg_buffer temp_buff = { 0 }; 808 struct ireg_buf *ch_pm; 809 int i, rc, n; 810 u32 size; 811 812 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); 813 size = sizeof(struct ireg_buf) * n * 2; 814 rc = cudbg_get_buff(dbg_buff, size, &temp_buff); 815 if (rc) 816 return rc; 817 818 ch_pm = (struct ireg_buf *)temp_buff.data; 819 /* PM_RX */ 820 for (i = 0; i < n; i++) { 821 struct ireg_field *pm_pio = &ch_pm->tp_pio; 822 u32 *buff = ch_pm->outbuf; 823 824 pm_pio->ireg_addr = t5_pm_rx_array[i][0]; 825 pm_pio->ireg_data = t5_pm_rx_array[i][1]; 826 pm_pio->ireg_local_offset = t5_pm_rx_array[i][2]; 827 pm_pio->ireg_offset_range = t5_pm_rx_array[i][3]; 828 t4_read_indirect(padap, 829 pm_pio->ireg_addr, 830 pm_pio->ireg_data, 831 buff, 832 pm_pio->ireg_offset_range, 833 pm_pio->ireg_local_offset); 834 ch_pm++; 835 } 836 837 /* PM_TX */ 838 n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32)); 839 for (i = 0; i < n; i++) { 840 struct ireg_field *pm_pio = &ch_pm->tp_pio; 841 u32 *buff = ch_pm->outbuf; 842 843 pm_pio->ireg_addr = t5_pm_tx_array[i][0]; 844 pm_pio->ireg_data = t5_pm_tx_array[i][1]; 845 pm_pio->ireg_local_offset = t5_pm_tx_array[i][2]; 846 pm_pio->ireg_offset_range = t5_pm_tx_array[i][3]; 847 t4_read_indirect(padap, 848 pm_pio->ireg_addr, 849 pm_pio->ireg_data, 850 buff, 851 pm_pio->ireg_offset_range, 852 pm_pio->ireg_local_offset); 853 ch_pm++; 854 } 855 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 856 return rc; 857 } 858 859 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, 860 struct cudbg_buffer *dbg_buff, 861 struct cudbg_error *cudbg_err) 862 { 863 struct adapter *padap = pdbg_init->adap; 864 struct cudbg_buffer temp_buff = { 0 }; 865 struct ireg_buf *ma_indr; 866 int i, rc, n; 867 u32 size, j; 868 869 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) 870 return CUDBG_STATUS_ENTITY_NOT_FOUND; 871 872 n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); 873 size = sizeof(struct ireg_buf) * n * 2; 874 rc = cudbg_get_buff(dbg_buff, size, &temp_buff); 875 if (rc) 876 return rc; 877 878 ma_indr = (struct ireg_buf *)temp_buff.data; 879 for (i = 0; i < n; i++) { 880 struct ireg_field *ma_fli = &ma_indr->tp_pio; 881 u32 *buff = ma_indr->outbuf; 882 883 ma_fli->ireg_addr = t6_ma_ireg_array[i][0]; 884 ma_fli->ireg_data = t6_ma_ireg_array[i][1]; 885 ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2]; 886 ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3]; 887 t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data, 888 buff, ma_fli->ireg_offset_range, 889 ma_fli->ireg_local_offset); 890 ma_indr++; 891 } 892 893 n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32)); 894 for (i = 0; i < n; i++) { 895 struct ireg_field *ma_fli = &ma_indr->tp_pio; 896 u32 *buff = ma_indr->outbuf; 897 898 ma_fli->ireg_addr = t6_ma_ireg_array2[i][0]; 899 ma_fli->ireg_data = t6_ma_ireg_array2[i][1]; 900 ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2]; 901 for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) { 902 t4_read_indirect(padap, ma_fli->ireg_addr, 903 ma_fli->ireg_data, buff, 1, 904 ma_fli->ireg_local_offset); 905 buff++; 906 ma_fli->ireg_local_offset += 0x20; 907 } 908 ma_indr++; 909 } 910 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 911 return rc; 912 } 913 914 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, 915 struct cudbg_buffer *dbg_buff, 916 struct cudbg_error *cudbg_err) 917 { 918 struct adapter *padap = pdbg_init->adap; 919 struct cudbg_buffer temp_buff = { 0 }; 920 struct cudbg_ulptx_la *ulptx_la_buff; 921 u32 i, j; 922 int rc; 923 924 rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la), 925 &temp_buff); 926 if (rc) 927 return rc; 928 929 ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data; 930 for (i = 0; i < CUDBG_NUM_ULPTX; i++) { 931 ulptx_la_buff->rdptr[i] = t4_read_reg(padap, 932 ULP_TX_LA_RDPTR_0_A + 933 0x10 * i); 934 ulptx_la_buff->wrptr[i] = t4_read_reg(padap, 935 ULP_TX_LA_WRPTR_0_A + 936 0x10 * i); 937 ulptx_la_buff->rddata[i] = t4_read_reg(padap, 938 ULP_TX_LA_RDDATA_0_A + 939 0x10 * i); 940 for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) 941 ulptx_la_buff->rd_data[i][j] = 942 t4_read_reg(padap, 943 ULP_TX_LA_RDDATA_0_A + 0x10 * i); 944 } 945 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 946 return rc; 947 } 948 949 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, 950 struct cudbg_buffer *dbg_buff, 951 struct cudbg_error *cudbg_err) 952 { 953 struct adapter *padap = pdbg_init->adap; 954 struct cudbg_buffer temp_buff = { 0 }; 955 struct ireg_buf *up_cim; 956 int i, rc, n; 957 u32 size; 958 959 n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32)); 960 size = sizeof(struct ireg_buf) * n; 961 rc = cudbg_get_buff(dbg_buff, size, &temp_buff); 962 if (rc) 963 return rc; 964 965 up_cim = (struct ireg_buf *)temp_buff.data; 966 for (i = 0; i < n; i++) { 967 struct ireg_field *up_cim_reg = &up_cim->tp_pio; 968 u32 *buff = up_cim->outbuf; 969 970 if (is_t5(padap->params.chip)) { 971 up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0]; 972 up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1]; 973 up_cim_reg->ireg_local_offset = 974 t5_up_cim_reg_array[i][2]; 975 up_cim_reg->ireg_offset_range = 976 t5_up_cim_reg_array[i][3]; 977 } else if (is_t6(padap->params.chip)) { 978 up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0]; 979 up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1]; 980 up_cim_reg->ireg_local_offset = 981 t6_up_cim_reg_array[i][2]; 982 up_cim_reg->ireg_offset_range = 983 t6_up_cim_reg_array[i][3]; 984 } 985 986 rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset, 987 up_cim_reg->ireg_offset_range, buff); 988 if (rc) { 989 cudbg_put_buff(&temp_buff, dbg_buff); 990 return rc; 991 } 992 up_cim++; 993 } 994 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 995 return rc; 996 } 997 998 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, 999 struct cudbg_buffer *dbg_buff, 1000 struct cudbg_error *cudbg_err) 1001 { 1002 struct adapter *padap = pdbg_init->adap; 1003 struct cudbg_mbox_log *mboxlog = NULL; 1004 struct cudbg_buffer temp_buff = { 0 }; 1005 struct mbox_cmd_log *log = NULL; 1006 struct mbox_cmd *entry; 1007 unsigned int entry_idx; 1008 u16 mbox_cmds; 1009 int i, k, rc; 1010 u64 flit; 1011 u32 size; 1012 1013 log = padap->mbox_log; 1014 mbox_cmds = padap->mbox_log->size; 1015 size = sizeof(struct cudbg_mbox_log) * mbox_cmds; 1016 rc = cudbg_get_buff(dbg_buff, size, &temp_buff); 1017 if (rc) 1018 return rc; 1019 1020 mboxlog = (struct cudbg_mbox_log *)temp_buff.data; 1021 for (k = 0; k < mbox_cmds; k++) { 1022 entry_idx = log->cursor + k; 1023 if (entry_idx >= log->size) 1024 entry_idx -= log->size; 1025 1026 entry = mbox_cmd_log_entry(log, entry_idx); 1027 /* skip over unused entries */ 1028 if (entry->timestamp == 0) 1029 continue; 1030 1031 memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd)); 1032 for (i = 0; i < MBOX_LEN / 8; i++) { 1033 flit = entry->cmd[i]; 1034 mboxlog->hi[i] = (u32)(flit >> 32); 1035 mboxlog->lo[i] = (u32)flit; 1036 } 1037 mboxlog++; 1038 } 1039 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 1040 return rc; 1041 } 1042 1043 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, 1044 struct cudbg_buffer *dbg_buff, 1045 struct cudbg_error *cudbg_err) 1046 { 1047 struct adapter *padap = pdbg_init->adap; 1048 struct cudbg_buffer temp_buff = { 0 }; 1049 struct ireg_buf *hma_indr; 1050 int i, rc, n; 1051 u32 size; 1052 1053 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) 1054 return CUDBG_STATUS_ENTITY_NOT_FOUND; 1055 1056 n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); 1057 size = sizeof(struct ireg_buf) * n; 1058 rc = cudbg_get_buff(dbg_buff, size, &temp_buff); 1059 if (rc) 1060 return rc; 1061 1062 hma_indr = (struct ireg_buf *)temp_buff.data; 1063 for (i = 0; i < n; i++) { 1064 struct ireg_field *hma_fli = &hma_indr->tp_pio; 1065 u32 *buff = hma_indr->outbuf; 1066 1067 hma_fli->ireg_addr = t6_hma_ireg_array[i][0]; 1068 hma_fli->ireg_data = t6_hma_ireg_array[i][1]; 1069 hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2]; 1070 hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3]; 1071 t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data, 1072 buff, hma_fli->ireg_offset_range, 1073 hma_fli->ireg_local_offset); 1074 hma_indr++; 1075 } 1076 cudbg_write_and_release_buff(&temp_buff, dbg_buff); 1077 return rc; 1078 } 1079