1 /* 2 * Copyright (C) 2017 Chelsio Communications. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * The full GNU General Public License is included in this distribution in 14 * the file called "COPYING". 15 * 16 */ 17 18 #include "t4_regs.h" 19 #include "cxgb4.h" 20 #include "cxgb4_cudbg.h" 21 #include "cudbg_zlib.h" 22 23 static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = { 24 { CUDBG_EDC0, cudbg_collect_edc0_meminfo }, 25 { CUDBG_EDC1, cudbg_collect_edc1_meminfo }, 26 { CUDBG_MC0, cudbg_collect_mc0_meminfo }, 27 { CUDBG_MC1, cudbg_collect_mc1_meminfo }, 28 { CUDBG_HMA, cudbg_collect_hma_meminfo }, 29 }; 30 31 static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { 32 { CUDBG_MBOX_LOG, cudbg_collect_mbox_log }, 33 { CUDBG_QDESC, cudbg_collect_qdesc }, 34 { CUDBG_DEV_LOG, cudbg_collect_fw_devlog }, 35 { CUDBG_REG_DUMP, cudbg_collect_reg_dump }, 36 { CUDBG_CIM_LA, cudbg_collect_cim_la }, 37 { CUDBG_CIM_MA_LA, cudbg_collect_cim_ma_la }, 38 { CUDBG_CIM_QCFG, cudbg_collect_cim_qcfg }, 39 { CUDBG_CIM_IBQ_TP0, cudbg_collect_cim_ibq_tp0 }, 40 { CUDBG_CIM_IBQ_TP1, cudbg_collect_cim_ibq_tp1 }, 41 { CUDBG_CIM_IBQ_ULP, cudbg_collect_cim_ibq_ulp }, 42 { CUDBG_CIM_IBQ_SGE0, cudbg_collect_cim_ibq_sge0 }, 43 { CUDBG_CIM_IBQ_SGE1, cudbg_collect_cim_ibq_sge1 }, 44 { CUDBG_CIM_IBQ_NCSI, cudbg_collect_cim_ibq_ncsi }, 45 { CUDBG_CIM_OBQ_ULP0, cudbg_collect_cim_obq_ulp0 }, 46 { CUDBG_CIM_OBQ_ULP1, cudbg_collect_cim_obq_ulp1 }, 47 { CUDBG_CIM_OBQ_ULP2, cudbg_collect_cim_obq_ulp2 }, 48 { CUDBG_CIM_OBQ_ULP3, cudbg_collect_cim_obq_ulp3 }, 49 { CUDBG_CIM_OBQ_SGE, cudbg_collect_cim_obq_sge }, 50 { CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi }, 51 { CUDBG_RSS, cudbg_collect_rss }, 52 { CUDBG_RSS_VF_CONF, cudbg_collect_rss_vf_config }, 53 { CUDBG_PATH_MTU, cudbg_collect_path_mtu }, 54 { CUDBG_PM_STATS, cudbg_collect_pm_stats }, 55 { CUDBG_HW_SCHED, cudbg_collect_hw_sched }, 56 { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect }, 57 { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect }, 58 { CUDBG_ULPRX_LA, cudbg_collect_ulprx_la }, 59 { CUDBG_TP_LA, cudbg_collect_tp_la }, 60 { CUDBG_MEMINFO, cudbg_collect_meminfo }, 61 { CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la }, 62 { CUDBG_CLK, cudbg_collect_clk_info }, 63 { CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 }, 64 { CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 }, 65 { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect }, 66 { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect }, 67 { CUDBG_TID_INFO, cudbg_collect_tid }, 68 { CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config }, 69 { CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context }, 70 { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam }, 71 { CUDBG_VPD_DATA, cudbg_collect_vpd_data }, 72 { CUDBG_LE_TCAM, cudbg_collect_le_tcam }, 73 { CUDBG_CCTRL, cudbg_collect_cctrl }, 74 { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect }, 75 { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la }, 76 { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect }, 77 { CUDBG_PBT_TABLE, cudbg_collect_pbt_tables }, 78 { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect }, 79 }; 80 81 static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) 82 { 83 struct cudbg_tcam tcam_region = { 0 }; 84 u32 value, n = 0, len = 0; 85 86 switch (entity) { 87 case CUDBG_REG_DUMP: 88 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 89 case CHELSIO_T4: 90 len = T4_REGMAP_SIZE; 91 break; 92 case CHELSIO_T5: 93 case CHELSIO_T6: 94 len = T5_REGMAP_SIZE; 95 break; 96 default: 97 break; 98 } 99 break; 100 case CUDBG_DEV_LOG: 101 len = adap->params.devlog.size; 102 break; 103 case CUDBG_CIM_LA: 104 if (is_t6(adap->params.chip)) { 105 len = adap->params.cim_la_size / 10 + 1; 106 len *= 10 * sizeof(u32); 107 } else { 108 len = adap->params.cim_la_size / 8; 109 len *= 8 * sizeof(u32); 110 } 111 len += sizeof(u32); /* for reading CIM LA configuration */ 112 break; 113 case CUDBG_CIM_MA_LA: 114 len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); 115 break; 116 case CUDBG_CIM_QCFG: 117 len = sizeof(struct cudbg_cim_qcfg); 118 break; 119 case CUDBG_CIM_IBQ_TP0: 120 case CUDBG_CIM_IBQ_TP1: 121 case CUDBG_CIM_IBQ_ULP: 122 case CUDBG_CIM_IBQ_SGE0: 123 case CUDBG_CIM_IBQ_SGE1: 124 case CUDBG_CIM_IBQ_NCSI: 125 len = CIM_IBQ_SIZE * 4 * sizeof(u32); 126 break; 127 case CUDBG_CIM_OBQ_ULP0: 128 len = cudbg_cim_obq_size(adap, 0); 129 break; 130 case CUDBG_CIM_OBQ_ULP1: 131 len = cudbg_cim_obq_size(adap, 1); 132 break; 133 case CUDBG_CIM_OBQ_ULP2: 134 len = cudbg_cim_obq_size(adap, 2); 135 break; 136 case CUDBG_CIM_OBQ_ULP3: 137 len = cudbg_cim_obq_size(adap, 3); 138 break; 139 case CUDBG_CIM_OBQ_SGE: 140 len = cudbg_cim_obq_size(adap, 4); 141 break; 142 case CUDBG_CIM_OBQ_NCSI: 143 len = cudbg_cim_obq_size(adap, 5); 144 break; 145 case CUDBG_CIM_OBQ_RXQ0: 146 len = cudbg_cim_obq_size(adap, 6); 147 break; 148 case CUDBG_CIM_OBQ_RXQ1: 149 len = cudbg_cim_obq_size(adap, 7); 150 break; 151 case CUDBG_EDC0: 152 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 153 if (value & EDRAM0_ENABLE_F) { 154 value = t4_read_reg(adap, MA_EDRAM0_BAR_A); 155 len = EDRAM0_SIZE_G(value); 156 } 157 len = cudbg_mbytes_to_bytes(len); 158 break; 159 case CUDBG_EDC1: 160 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 161 if (value & EDRAM1_ENABLE_F) { 162 value = t4_read_reg(adap, MA_EDRAM1_BAR_A); 163 len = EDRAM1_SIZE_G(value); 164 } 165 len = cudbg_mbytes_to_bytes(len); 166 break; 167 case CUDBG_MC0: 168 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 169 if (value & EXT_MEM0_ENABLE_F) { 170 value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); 171 len = EXT_MEM0_SIZE_G(value); 172 } 173 len = cudbg_mbytes_to_bytes(len); 174 break; 175 case CUDBG_MC1: 176 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 177 if (value & EXT_MEM1_ENABLE_F) { 178 value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); 179 len = EXT_MEM1_SIZE_G(value); 180 } 181 len = cudbg_mbytes_to_bytes(len); 182 break; 183 case CUDBG_RSS: 184 len = t4_chip_rss_size(adap) * sizeof(u16); 185 break; 186 case CUDBG_RSS_VF_CONF: 187 len = adap->params.arch.vfcount * 188 sizeof(struct cudbg_rss_vf_conf); 189 break; 190 case CUDBG_PATH_MTU: 191 len = NMTUS * sizeof(u16); 192 break; 193 case CUDBG_PM_STATS: 194 len = sizeof(struct cudbg_pm_stats); 195 break; 196 case CUDBG_HW_SCHED: 197 len = sizeof(struct cudbg_hw_sched); 198 break; 199 case CUDBG_TP_INDIRECT: 200 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 201 case CHELSIO_T5: 202 n = sizeof(t5_tp_pio_array) + 203 sizeof(t5_tp_tm_pio_array) + 204 sizeof(t5_tp_mib_index_array); 205 break; 206 case CHELSIO_T6: 207 n = sizeof(t6_tp_pio_array) + 208 sizeof(t6_tp_tm_pio_array) + 209 sizeof(t6_tp_mib_index_array); 210 break; 211 default: 212 break; 213 } 214 n = n / (IREG_NUM_ELEM * sizeof(u32)); 215 len = sizeof(struct ireg_buf) * n; 216 break; 217 case CUDBG_SGE_INDIRECT: 218 len = sizeof(struct ireg_buf) * 2 + 219 sizeof(struct sge_qbase_reg_field); 220 break; 221 case CUDBG_ULPRX_LA: 222 len = sizeof(struct cudbg_ulprx_la); 223 break; 224 case CUDBG_TP_LA: 225 len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); 226 break; 227 case CUDBG_MEMINFO: 228 len = sizeof(struct cudbg_ver_hdr) + 229 sizeof(struct cudbg_meminfo); 230 break; 231 case CUDBG_CIM_PIF_LA: 232 len = sizeof(struct cudbg_cim_pif_la); 233 len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); 234 break; 235 case CUDBG_CLK: 236 len = sizeof(struct cudbg_clk_info); 237 break; 238 case CUDBG_PCIE_INDIRECT: 239 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); 240 len = sizeof(struct ireg_buf) * n * 2; 241 break; 242 case CUDBG_PM_INDIRECT: 243 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); 244 len = sizeof(struct ireg_buf) * n * 2; 245 break; 246 case CUDBG_TID_INFO: 247 len = sizeof(struct cudbg_tid_info_region_rev1); 248 break; 249 case CUDBG_PCIE_CONFIG: 250 len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; 251 break; 252 case CUDBG_DUMP_CONTEXT: 253 len = cudbg_dump_context_size(adap); 254 break; 255 case CUDBG_MPS_TCAM: 256 len = sizeof(struct cudbg_mps_tcam) * 257 adap->params.arch.mps_tcam_size; 258 break; 259 case CUDBG_VPD_DATA: 260 len = sizeof(struct cudbg_vpd_data); 261 break; 262 case CUDBG_LE_TCAM: 263 cudbg_fill_le_tcam_info(adap, &tcam_region); 264 len = sizeof(struct cudbg_tcam) + 265 sizeof(struct cudbg_tid_data) * tcam_region.max_tid; 266 break; 267 case CUDBG_CCTRL: 268 len = sizeof(u16) * NMTUS * NCCTRL_WIN; 269 break; 270 case CUDBG_MA_INDIRECT: 271 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { 272 n = sizeof(t6_ma_ireg_array) / 273 (IREG_NUM_ELEM * sizeof(u32)); 274 len = sizeof(struct ireg_buf) * n * 2; 275 } 276 break; 277 case CUDBG_ULPTX_LA: 278 len = sizeof(struct cudbg_ver_hdr) + 279 sizeof(struct cudbg_ulptx_la); 280 break; 281 case CUDBG_UP_CIM_INDIRECT: 282 n = 0; 283 if (is_t5(adap->params.chip)) 284 n = sizeof(t5_up_cim_reg_array) / 285 ((IREG_NUM_ELEM + 1) * sizeof(u32)); 286 else if (is_t6(adap->params.chip)) 287 n = sizeof(t6_up_cim_reg_array) / 288 ((IREG_NUM_ELEM + 1) * sizeof(u32)); 289 len = sizeof(struct ireg_buf) * n; 290 break; 291 case CUDBG_PBT_TABLE: 292 len = sizeof(struct cudbg_pbt_tables); 293 break; 294 case CUDBG_MBOX_LOG: 295 len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; 296 break; 297 case CUDBG_HMA_INDIRECT: 298 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { 299 n = sizeof(t6_hma_ireg_array) / 300 (IREG_NUM_ELEM * sizeof(u32)); 301 len = sizeof(struct ireg_buf) * n; 302 } 303 break; 304 case CUDBG_HMA: 305 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 306 if (value & HMA_MUX_F) { 307 /* In T6, there's no MC1. So, HMA shares MC1 308 * address space. 309 */ 310 value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); 311 len = EXT_MEM1_SIZE_G(value); 312 } 313 len = cudbg_mbytes_to_bytes(len); 314 break; 315 case CUDBG_QDESC: 316 cudbg_fill_qdesc_num_and_size(adap, NULL, &len); 317 break; 318 default: 319 break; 320 } 321 322 return len; 323 } 324 325 u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag) 326 { 327 u32 i, entity; 328 u32 len = 0; 329 u32 wsize; 330 331 if (flag & CXGB4_ETH_DUMP_HW) { 332 for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) { 333 entity = cxgb4_collect_hw_dump[i].entity; 334 len += cxgb4_get_entity_length(adap, entity); 335 } 336 } 337 338 if (flag & CXGB4_ETH_DUMP_MEM) { 339 for (i = 0; i < ARRAY_SIZE(cxgb4_collect_mem_dump); i++) { 340 entity = cxgb4_collect_mem_dump[i].entity; 341 len += cxgb4_get_entity_length(adap, entity); 342 } 343 } 344 345 /* If compression is enabled, a smaller destination buffer is enough */ 346 wsize = cudbg_get_workspace_size(); 347 if (wsize && len > CUDBG_DUMP_BUFF_SIZE) 348 len = CUDBG_DUMP_BUFF_SIZE; 349 350 return len; 351 } 352 353 static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init, 354 struct cudbg_buffer *dbg_buff, 355 const struct cxgb4_collect_entity *e_arr, 356 u32 arr_size, void *buf, u32 *tot_size) 357 { 358 struct cudbg_error cudbg_err = { 0 }; 359 struct cudbg_entity_hdr *entity_hdr; 360 u32 i, total_size = 0; 361 int ret; 362 363 for (i = 0; i < arr_size; i++) { 364 const struct cxgb4_collect_entity *e = &e_arr[i]; 365 366 entity_hdr = cudbg_get_entity_hdr(buf, e->entity); 367 entity_hdr->entity_type = e->entity; 368 entity_hdr->start_offset = dbg_buff->offset; 369 memset(&cudbg_err, 0, sizeof(struct cudbg_error)); 370 ret = e->collect_cb(pdbg_init, dbg_buff, &cudbg_err); 371 if (ret) { 372 entity_hdr->size = 0; 373 dbg_buff->offset = entity_hdr->start_offset; 374 } else { 375 cudbg_align_debug_buffer(dbg_buff, entity_hdr); 376 } 377 378 /* Log error and continue with next entity */ 379 if (cudbg_err.sys_err) 380 ret = CUDBG_SYSTEM_ERROR; 381 382 entity_hdr->hdr_flags = ret; 383 entity_hdr->sys_err = cudbg_err.sys_err; 384 entity_hdr->sys_warn = cudbg_err.sys_warn; 385 total_size += entity_hdr->size; 386 } 387 388 *tot_size += total_size; 389 } 390 391 static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init) 392 { 393 u32 workspace_size; 394 395 workspace_size = cudbg_get_workspace_size(); 396 pdbg_init->compress_buff = vzalloc(CUDBG_COMPRESS_BUFF_SIZE + 397 workspace_size); 398 if (!pdbg_init->compress_buff) 399 return -ENOMEM; 400 401 pdbg_init->compress_buff_size = CUDBG_COMPRESS_BUFF_SIZE; 402 pdbg_init->workspace = (u8 *)pdbg_init->compress_buff + 403 CUDBG_COMPRESS_BUFF_SIZE - workspace_size; 404 return 0; 405 } 406 407 static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init) 408 { 409 if (pdbg_init->compress_buff) 410 vfree(pdbg_init->compress_buff); 411 } 412 413 int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, 414 u32 flag) 415 { 416 struct cudbg_buffer dbg_buff = { 0 }; 417 u32 size, min_size, total_size = 0; 418 struct cudbg_init cudbg_init; 419 struct cudbg_hdr *cudbg_hdr; 420 int rc; 421 422 size = *buf_size; 423 424 memset(&cudbg_init, 0, sizeof(struct cudbg_init)); 425 cudbg_init.adap = adap; 426 cudbg_init.outbuf = buf; 427 cudbg_init.outbuf_size = size; 428 429 dbg_buff.data = buf; 430 dbg_buff.size = size; 431 dbg_buff.offset = 0; 432 433 cudbg_hdr = (struct cudbg_hdr *)buf; 434 cudbg_hdr->signature = CUDBG_SIGNATURE; 435 cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr); 436 cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION; 437 cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION; 438 cudbg_hdr->max_entities = CUDBG_MAX_ENTITY; 439 cudbg_hdr->chip_ver = adap->params.chip; 440 cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI; 441 442 min_size = sizeof(struct cudbg_hdr) + 443 sizeof(struct cudbg_entity_hdr) * 444 cudbg_hdr->max_entities; 445 if (size < min_size) 446 return -ENOMEM; 447 448 rc = cudbg_get_workspace_size(); 449 if (rc) { 450 /* Zlib available. So, use zlib deflate */ 451 cudbg_init.compress_type = CUDBG_COMPRESSION_ZLIB; 452 rc = cudbg_alloc_compress_buff(&cudbg_init); 453 if (rc) { 454 /* Ignore error and continue without compression. */ 455 dev_warn(adap->pdev_dev, 456 "Fail allocating compression buffer ret: %d. Continuing without compression.\n", 457 rc); 458 cudbg_init.compress_type = CUDBG_COMPRESSION_NONE; 459 rc = 0; 460 } 461 } else { 462 cudbg_init.compress_type = CUDBG_COMPRESSION_NONE; 463 } 464 465 cudbg_hdr->compress_type = cudbg_init.compress_type; 466 dbg_buff.offset += min_size; 467 total_size = dbg_buff.offset; 468 469 if (flag & CXGB4_ETH_DUMP_HW) 470 cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff, 471 cxgb4_collect_hw_dump, 472 ARRAY_SIZE(cxgb4_collect_hw_dump), 473 buf, 474 &total_size); 475 476 if (flag & CXGB4_ETH_DUMP_MEM) 477 cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff, 478 cxgb4_collect_mem_dump, 479 ARRAY_SIZE(cxgb4_collect_mem_dump), 480 buf, 481 &total_size); 482 483 cudbg_free_compress_buff(&cudbg_init); 484 cudbg_hdr->data_len = total_size; 485 if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE) 486 *buf_size = size; 487 else 488 *buf_size = total_size; 489 return 0; 490 } 491 492 void cxgb4_init_ethtool_dump(struct adapter *adapter) 493 { 494 adapter->eth_dump.flag = CXGB4_ETH_DUMP_NONE; 495 adapter->eth_dump.version = adapter->params.fw_vers; 496 adapter->eth_dump.len = 0; 497 } 498 499 static int cxgb4_cudbg_vmcoredd_collect(struct vmcoredd_data *data, void *buf) 500 { 501 struct adapter *adap = container_of(data, struct adapter, vmcoredd); 502 u32 len = data->size; 503 504 return cxgb4_cudbg_collect(adap, buf, &len, CXGB4_ETH_DUMP_ALL); 505 } 506 507 int cxgb4_cudbg_vmcore_add_dump(struct adapter *adap) 508 { 509 struct vmcoredd_data *data = &adap->vmcoredd; 510 u32 len; 511 512 len = sizeof(struct cudbg_hdr) + 513 sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY; 514 len += CUDBG_DUMP_BUFF_SIZE; 515 516 data->size = len; 517 snprintf(data->dump_name, sizeof(data->dump_name), "%s_%s", 518 cxgb4_driver_name, adap->name); 519 data->vmcoredd_callback = cxgb4_cudbg_vmcoredd_collect; 520 521 return vmcore_add_device_dump(data); 522 } 523