1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017 Chelsio Communications. All rights reserved. 4 */ 5 6 #include <linux/sort.h> 7 #include <linux/string.h> 8 9 #include "t4_regs.h" 10 #include "cxgb4.h" 11 #include "cxgb4_cudbg.h" 12 #include "cudbg_if.h" 13 #include "cudbg_lib_common.h" 14 #include "cudbg_entity.h" 15 #include "cudbg_lib.h" 16 #include "cudbg_zlib.h" 17 18 static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { 19 {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */ 20 {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */ 21 {0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */ 22 {0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */ 23 {0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */ 24 {0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */ 25 {0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */ 26 {0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */ 27 {0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */ 28 {0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */ 29 {0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */ 30 {0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */ 31 }; 32 33 static const u32 t5_tp_pio_array[][IREG_NUM_ELEM] = { 34 {0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */ 35 {0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */ 36 {0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */ 37 {0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */ 38 {0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */ 39 {0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */ 40 {0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */ 41 {0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */ 42 {0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */ 43 {0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */ 44 {0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */ 45 }; 46 47 static const u32 t6_tp_tm_pio_array[][IREG_NUM_ELEM] = { 48 {0x7e18, 0x7e1c, 0x0, 12} 49 }; 50 51 static const u32 t5_tp_tm_pio_array[][IREG_NUM_ELEM] = { 52 {0x7e18, 0x7e1c, 0x0, 12} 53 }; 54 55 static const u32 t6_tp_mib_index_array[6][IREG_NUM_ELEM] = { 56 {0x7e50, 0x7e54, 0x0, 13}, 57 {0x7e50, 0x7e54, 0x10, 6}, 58 {0x7e50, 0x7e54, 0x18, 21}, 59 {0x7e50, 0x7e54, 0x30, 32}, 60 {0x7e50, 0x7e54, 0x50, 22}, 61 {0x7e50, 0x7e54, 0x68, 12} 62 }; 63 64 static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = { 65 {0x7e50, 0x7e54, 0x0, 13}, 66 {0x7e50, 0x7e54, 0x10, 6}, 67 {0x7e50, 0x7e54, 0x18, 8}, 68 {0x7e50, 0x7e54, 0x20, 13}, 69 {0x7e50, 0x7e54, 0x30, 16}, 70 {0x7e50, 0x7e54, 0x40, 16}, 71 {0x7e50, 0x7e54, 0x50, 16}, 72 {0x7e50, 0x7e54, 0x60, 6}, 73 {0x7e50, 0x7e54, 0x68, 4} 74 }; 75 76 static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = { 77 {0x10cc, 0x10d0, 0x0, 16}, 78 {0x10cc, 0x10d4, 0x0, 16}, 79 }; 80 81 static const u32 t6_sge_qbase_index_array[] = { 82 /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */ 83 0x1250, 0x1240, 0x1244, 0x1248, 0x124c, 84 }; 85 86 static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = { 87 {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */ 88 {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */ 89 {0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */ 90 }; 91 92 static const u32 t5_pcie_cdbg_array[][IREG_NUM_ELEM] = { 93 {0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */ 94 {0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */ 95 }; 96 97 static const u32 t5_pm_rx_array[][IREG_NUM_ELEM] = { 98 {0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */ 99 {0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */ 100 }; 101 102 static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = { 103 {0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */ 104 {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */ 105 }; 106 107 static const u32 t5_pcie_config_array[][2] = { 108 {0x0, 0x34}, 109 {0x3c, 0x40}, 110 {0x50, 0x64}, 111 {0x70, 0x80}, 112 {0x94, 0xa0}, 113 {0xb0, 0xb8}, 114 {0xd0, 0xd4}, 115 {0x100, 0x128}, 116 {0x140, 0x148}, 117 {0x150, 0x164}, 118 {0x170, 0x178}, 119 {0x180, 0x194}, 120 {0x1a0, 0x1b8}, 121 {0x1c0, 0x208}, 122 }; 123 124 static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = { 125 {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */ 126 {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */ 127 {0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */ 128 }; 129 130 static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = { 131 {0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */ 132 {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */ 133 }; 134 135 static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { 136 {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ 137 {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */ 138 {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ 139 {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ 140 {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ 141 {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ 142 {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ 143 {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ 144 {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ 145 {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ 146 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ 147 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ 148 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ 149 {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */ 150 {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */ 151 {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */ 152 {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */ 153 {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */ 154 {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */ 155 {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */ 156 {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */ 157 {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */ 158 }; 159 160 static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { 161 {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ 162 {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */ 163 {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ 164 {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ 165 {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ 166 {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ 167 {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ 168 {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ 169 {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ 170 {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ 171 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ 172 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ 173 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ 174 }; 175 176 static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { 177 {0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */ 178 }; 179 180 u32 cudbg_get_entity_length(struct adapter *adap, u32 entity) 181 { 182 struct cudbg_tcam tcam_region = { 0 }; 183 u32 value, n = 0, len = 0; 184 185 switch (entity) { 186 case CUDBG_REG_DUMP: 187 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 188 case CHELSIO_T4: 189 len = T4_REGMAP_SIZE; 190 break; 191 case CHELSIO_T5: 192 case CHELSIO_T6: 193 len = T5_REGMAP_SIZE; 194 break; 195 default: 196 break; 197 } 198 break; 199 case CUDBG_DEV_LOG: 200 len = adap->params.devlog.size; 201 break; 202 case CUDBG_CIM_LA: 203 if (is_t6(adap->params.chip)) { 204 len = adap->params.cim_la_size / 10 + 1; 205 len *= 10 * sizeof(u32); 206 } else { 207 len = adap->params.cim_la_size / 8; 208 len *= 8 * sizeof(u32); 209 } 210 len += sizeof(u32); /* for reading CIM LA configuration */ 211 break; 212 case CUDBG_CIM_MA_LA: 213 len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); 214 break; 215 case CUDBG_CIM_QCFG: 216 len = sizeof(struct cudbg_cim_qcfg); 217 break; 218 case CUDBG_CIM_IBQ_TP0: 219 case CUDBG_CIM_IBQ_TP1: 220 case CUDBG_CIM_IBQ_ULP: 221 case CUDBG_CIM_IBQ_SGE0: 222 case CUDBG_CIM_IBQ_SGE1: 223 case CUDBG_CIM_IBQ_NCSI: 224 len = CIM_IBQ_SIZE * 4 * sizeof(u32); 225 break; 226 case CUDBG_CIM_OBQ_ULP0: 227 len = cudbg_cim_obq_size(adap, 0); 228 break; 229 case CUDBG_CIM_OBQ_ULP1: 230 len = cudbg_cim_obq_size(adap, 1); 231 break; 232 case CUDBG_CIM_OBQ_ULP2: 233 len = cudbg_cim_obq_size(adap, 2); 234 break; 235 case CUDBG_CIM_OBQ_ULP3: 236 len = cudbg_cim_obq_size(adap, 3); 237 break; 238 case CUDBG_CIM_OBQ_SGE: 239 len = cudbg_cim_obq_size(adap, 4); 240 break; 241 case CUDBG_CIM_OBQ_NCSI: 242 len = cudbg_cim_obq_size(adap, 5); 243 break; 244 case CUDBG_CIM_OBQ_RXQ0: 245 len = cudbg_cim_obq_size(adap, 6); 246 break; 247 case CUDBG_CIM_OBQ_RXQ1: 248 len = cudbg_cim_obq_size(adap, 7); 249 break; 250 case CUDBG_EDC0: 251 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 252 if (value & EDRAM0_ENABLE_F) { 253 value = t4_read_reg(adap, MA_EDRAM0_BAR_A); 254 len = EDRAM0_SIZE_G(value); 255 } 256 len = cudbg_mbytes_to_bytes(len); 257 break; 258 case CUDBG_EDC1: 259 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 260 if (value & EDRAM1_ENABLE_F) { 261 value = t4_read_reg(adap, MA_EDRAM1_BAR_A); 262 len = EDRAM1_SIZE_G(value); 263 } 264 len = cudbg_mbytes_to_bytes(len); 265 break; 266 case CUDBG_MC0: 267 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 268 if (value & EXT_MEM0_ENABLE_F) { 269 value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); 270 len = EXT_MEM0_SIZE_G(value); 271 } 272 len = cudbg_mbytes_to_bytes(len); 273 break; 274 case CUDBG_MC1: 275 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 276 if (value & EXT_MEM1_ENABLE_F) { 277 value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); 278 len = EXT_MEM1_SIZE_G(value); 279 } 280 len = cudbg_mbytes_to_bytes(len); 281 break; 282 case CUDBG_RSS: 283 len = t4_chip_rss_size(adap) * sizeof(u16); 284 break; 285 case CUDBG_RSS_VF_CONF: 286 len = adap->params.arch.vfcount * 287 sizeof(struct cudbg_rss_vf_conf); 288 break; 289 case CUDBG_PATH_MTU: 290 len = NMTUS * sizeof(u16); 291 break; 292 case CUDBG_PM_STATS: 293 len = sizeof(struct cudbg_pm_stats); 294 break; 295 case CUDBG_HW_SCHED: 296 len = sizeof(struct cudbg_hw_sched); 297 break; 298 case CUDBG_TP_INDIRECT: 299 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 300 case CHELSIO_T5: 301 n = sizeof(t5_tp_pio_array) + 302 sizeof(t5_tp_tm_pio_array) + 303 sizeof(t5_tp_mib_index_array); 304 break; 305 case CHELSIO_T6: 306 n = sizeof(t6_tp_pio_array) + 307 sizeof(t6_tp_tm_pio_array) + 308 sizeof(t6_tp_mib_index_array); 309 break; 310 default: 311 break; 312 } 313 n = n / (IREG_NUM_ELEM * sizeof(u32)); 314 len = sizeof(struct ireg_buf) * n; 315 break; 316 case CUDBG_SGE_INDIRECT: 317 len = sizeof(struct ireg_buf) * 2 + 318 sizeof(struct sge_qbase_reg_field); 319 break; 320 case CUDBG_ULPRX_LA: 321 len = sizeof(struct cudbg_ulprx_la); 322 break; 323 case CUDBG_TP_LA: 324 len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); 325 break; 326 case CUDBG_MEMINFO: 327 len = sizeof(struct cudbg_ver_hdr) + 328 sizeof(struct cudbg_meminfo); 329 break; 330 case CUDBG_CIM_PIF_LA: 331 len = sizeof(struct cudbg_cim_pif_la); 332 len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); 333 break; 334 case CUDBG_CLK: 335 len = sizeof(struct cudbg_clk_info); 336 break; 337 case CUDBG_PCIE_INDIRECT: 338 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); 339 len = sizeof(struct ireg_buf) * n * 2; 340 break; 341 case CUDBG_PM_INDIRECT: 342 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); 343 len = sizeof(struct ireg_buf) * n * 2; 344 break; 345 case CUDBG_TID_INFO: 346 len = sizeof(struct cudbg_tid_info_region_rev1); 347 break; 348 case CUDBG_PCIE_CONFIG: 349 len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; 350 break; 351 case CUDBG_DUMP_CONTEXT: 352 len = cudbg_dump_context_size(adap); 353 break; 354 case CUDBG_MPS_TCAM: 355 len = sizeof(struct cudbg_mps_tcam) * 356 adap->params.arch.mps_tcam_size; 357 break; 358 case CUDBG_VPD_DATA: 359 len = sizeof(struct cudbg_vpd_data); 360 break; 361 case CUDBG_LE_TCAM: 362 cudbg_fill_le_tcam_info(adap, &tcam_region); 363 len = sizeof(struct cudbg_tcam) + 364 sizeof(struct cudbg_tid_data) * tcam_region.max_tid; 365 break; 366 case CUDBG_CCTRL: 367 len = sizeof(u16) * NMTUS * NCCTRL_WIN; 368 break; 369 case CUDBG_MA_INDIRECT: 370 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { 371 n = sizeof(t6_ma_ireg_array) / 372 (IREG_NUM_ELEM * sizeof(u32)); 373 len = sizeof(struct ireg_buf) * n * 2; 374 } 375 break; 376 case CUDBG_ULPTX_LA: 377 len = sizeof(struct cudbg_ver_hdr) + 378 sizeof(struct cudbg_ulptx_la); 379 break; 380 case CUDBG_UP_CIM_INDIRECT: 381 n = 0; 382 if (is_t5(adap->params.chip)) 383 n = sizeof(t5_up_cim_reg_array) / 384 ((IREG_NUM_ELEM + 1) * sizeof(u32)); 385 else if (is_t6(adap->params.chip)) 386 n = sizeof(t6_up_cim_reg_array) / 387 ((IREG_NUM_ELEM + 1) * sizeof(u32)); 388 len = sizeof(struct ireg_buf) * n; 389 break; 390 case CUDBG_PBT_TABLE: 391 len = sizeof(struct cudbg_pbt_tables); 392 break; 393 case CUDBG_MBOX_LOG: 394 len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; 395 break; 396 case CUDBG_HMA_INDIRECT: 397 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { 398 n = sizeof(t6_hma_ireg_array) / 399 (IREG_NUM_ELEM * sizeof(u32)); 400 len = sizeof(struct ireg_buf) * n; 401 } 402 break; 403 case CUDBG_HMA: 404 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 405 if (value & HMA_MUX_F) { 406 /* In T6, there's no MC1. So, HMA shares MC1 407 * address space. 408 */ 409 value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); 410 len = EXT_MEM1_SIZE_G(value); 411 } 412 len = cudbg_mbytes_to_bytes(len); 413 break; 414 case CUDBG_QDESC: 415 cudbg_fill_qdesc_num_and_size(adap, NULL, &len); 416 break; 417 default: 418 break; 419 } 420 421 return len; 422 } 423 424 static int cudbg_do_compression(struct cudbg_init *pdbg_init, 425 struct cudbg_buffer *pin_buff, 426 struct cudbg_buffer *dbg_buff) 427 { 428 struct cudbg_buffer temp_in_buff = { 0 }; 429 int bytes_left, bytes_read, bytes; 430 u32 offset = dbg_buff->offset; 431 int rc; 432 433 temp_in_buff.offset = pin_buff->offset; 434 temp_in_buff.data = pin_buff->data; 435 temp_in_buff.size = pin_buff->size; 436 437 bytes_left = pin_buff->size; 438 bytes_read = 0; 439 while (bytes_left > 0) { 440 /* Do compression in smaller chunks */ 441 bytes = min_t(unsigned long, bytes_left, 442 (unsigned long)CUDBG_CHUNK_SIZE); 443 temp_in_buff.data = (char *)pin_buff->data + bytes_read; 444 temp_in_buff.size = bytes; 445 rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff); 446 if (rc) 447 return rc; 448 bytes_left -= bytes; 449 bytes_read += bytes; 450 } 451 452 pin_buff->size = dbg_buff->offset - offset; 453 return 0; 454 } 455 456 static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init, 457 struct cudbg_buffer *pin_buff, 458 struct cudbg_buffer *dbg_buff) 459 { 460 int rc = 0; 461 462 if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) { 463 cudbg_update_buff(pin_buff, dbg_buff); 464 } else { 465 rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff); 466 if (rc) 467 goto out; 468 } 469 470 out: 471 cudbg_put_buff(pdbg_init, pin_buff); 472 return rc; 473 } 474 475 static int is_fw_attached(struct cudbg_init *pdbg_init) 476 { 477 struct adapter *padap = pdbg_init->adap; 478 479 if (!(padap->flags & CXGB4_FW_OK) || padap->use_bd) 480 return 0; 481 482 return 1; 483 } 484 485 /* This function will add additional padding bytes into debug_buffer to make it 486 * 4 byte aligned. 487 */ 488 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, 489 struct cudbg_entity_hdr *entity_hdr) 490 { 491 u8 zero_buf[4] = {0}; 492 u8 padding, remain; 493 494 remain = (dbg_buff->offset - entity_hdr->start_offset) % 4; 495 padding = 4 - remain; 496 if (remain) { 497 memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf, 498 padding); 499 dbg_buff->offset += padding; 500 entity_hdr->num_pad = padding; 501 } 502 entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset; 503 } 504 505 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i) 506 { 507 struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf; 508 509 return (struct cudbg_entity_hdr *) 510 ((char *)outbuf + cudbg_hdr->hdr_len + 511 (sizeof(struct cudbg_entity_hdr) * (i - 1))); 512 } 513 514 static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len, 515 void *dest) 516 { 517 int vaddr, rc; 518 519 vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE); 520 if (vaddr < 0) 521 return vaddr; 522 523 rc = pci_read_vpd(padap->pdev, vaddr, len, dest); 524 if (rc < 0) 525 return rc; 526 527 return 0; 528 } 529 530 static int cudbg_mem_desc_cmp(const void *a, const void *b) 531 { 532 return ((const struct cudbg_mem_desc *)a)->base - 533 ((const struct cudbg_mem_desc *)b)->base; 534 } 535 536 int cudbg_fill_meminfo(struct adapter *padap, 537 struct cudbg_meminfo *meminfo_buff) 538 { 539 struct cudbg_mem_desc *md; 540 u32 lo, hi, used, alloc; 541 int n, i; 542 543 memset(meminfo_buff->avail, 0, 544 ARRAY_SIZE(meminfo_buff->avail) * 545 sizeof(struct cudbg_mem_desc)); 546 memset(meminfo_buff->mem, 0, 547 (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc)); 548 md = meminfo_buff->mem; 549 550 for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) { 551 meminfo_buff->mem[i].limit = 0; 552 meminfo_buff->mem[i].idx = i; 553 } 554 555 /* Find and sort the populated memory ranges */ 556 i = 0; 557 lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A); 558 if (lo & EDRAM0_ENABLE_F) { 559 hi = t4_read_reg(padap, MA_EDRAM0_BAR_A); 560 meminfo_buff->avail[i].base = 561 cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi)); 562 meminfo_buff->avail[i].limit = 563 meminfo_buff->avail[i].base + 564 cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi)); 565 meminfo_buff->avail[i].idx = 0; 566 i++; 567 } 568 569 if (lo & EDRAM1_ENABLE_F) { 570 hi = t4_read_reg(padap, MA_EDRAM1_BAR_A); 571 meminfo_buff->avail[i].base = 572 cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi)); 573 meminfo_buff->avail[i].limit = 574 meminfo_buff->avail[i].base + 575 cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi)); 576 meminfo_buff->avail[i].idx = 1; 577 i++; 578 } 579 580 if (is_t5(padap->params.chip)) { 581 if (lo & EXT_MEM0_ENABLE_F) { 582 hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A); 583 meminfo_buff->avail[i].base = 584 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi)); 585 meminfo_buff->avail[i].limit = 586 meminfo_buff->avail[i].base + 587 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi)); 588 meminfo_buff->avail[i].idx = 3; 589 i++; 590 } 591 592 if (lo & EXT_MEM1_ENABLE_F) { 593 hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A); 594 meminfo_buff->avail[i].base = 595 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi)); 596 meminfo_buff->avail[i].limit = 597 meminfo_buff->avail[i].base + 598 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi)); 599 meminfo_buff->avail[i].idx = 4; 600 i++; 601 } 602 } else { 603 if (lo & EXT_MEM_ENABLE_F) { 604 hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A); 605 meminfo_buff->avail[i].base = 606 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi)); 607 meminfo_buff->avail[i].limit = 608 meminfo_buff->avail[i].base + 609 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi)); 610 meminfo_buff->avail[i].idx = 2; 611 i++; 612 } 613 614 if (lo & HMA_MUX_F) { 615 hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A); 616 meminfo_buff->avail[i].base = 617 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi)); 618 meminfo_buff->avail[i].limit = 619 meminfo_buff->avail[i].base + 620 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi)); 621 meminfo_buff->avail[i].idx = 5; 622 i++; 623 } 624 } 625 626 if (!i) /* no memory available */ 627 return CUDBG_STATUS_ENTITY_NOT_FOUND; 628 629 meminfo_buff->avail_c = i; 630 sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc), 631 cudbg_mem_desc_cmp, NULL); 632 (md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A); 633 (md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A); 634 (md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A); 635 (md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A); 636 (md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A); 637 (md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A); 638 (md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A); 639 (md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A); 640 (md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A); 641 642 /* the next few have explicit upper bounds */ 643 md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A); 644 md->limit = md->base - 1 + 645 t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) * 646 PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A)); 647 md++; 648 649 md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A); 650 md->limit = md->base - 1 + 651 t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) * 652 PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A)); 653 md++; 654 655 if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) { 656 if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) { 657 hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4; 658 md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A); 659 } else { 660 hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A); 661 md->base = t4_read_reg(padap, 662 LE_DB_HASH_TBL_BASE_ADDR_A); 663 } 664 md->limit = 0; 665 } else { 666 md->base = 0; 667 md->idx = ARRAY_SIZE(cudbg_region); /* hide it */ 668 } 669 md++; 670 671 #define ulp_region(reg) do { \ 672 md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\ 673 (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\ 674 } while (0) 675 676 ulp_region(RX_ISCSI); 677 ulp_region(RX_TDDP); 678 ulp_region(TX_TPT); 679 ulp_region(RX_STAG); 680 ulp_region(RX_RQ); 681 ulp_region(RX_RQUDP); 682 ulp_region(RX_PBL); 683 ulp_region(TX_PBL); 684 #undef ulp_region 685 md->base = 0; 686 md->idx = ARRAY_SIZE(cudbg_region); 687 if (!is_t4(padap->params.chip)) { 688 u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A); 689 u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A); 690 u32 size = 0; 691 692 if (is_t5(padap->params.chip)) { 693 if (sge_ctrl & VFIFO_ENABLE_F) 694 size = DBVFIFO_SIZE_G(fifo_size); 695 } else { 696 size = T6_DBVFIFO_SIZE_G(fifo_size); 697 } 698 699 if (size) { 700 md->base = BASEADDR_G(t4_read_reg(padap, 701 SGE_DBVFIFO_BADDR_A)); 702 md->limit = md->base + (size << 2) - 1; 703 } 704 } 705 706 md++; 707 708 md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A); 709 md->limit = 0; 710 md++; 711 md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A); 712 md->limit = 0; 713 md++; 714 715 md->base = padap->vres.ocq.start; 716 if (padap->vres.ocq.size) 717 md->limit = md->base + padap->vres.ocq.size - 1; 718 else 719 md->idx = ARRAY_SIZE(cudbg_region); /* hide it */ 720 md++; 721 722 /* add any address-space holes, there can be up to 3 */ 723 for (n = 0; n < i - 1; n++) 724 if (meminfo_buff->avail[n].limit < 725 meminfo_buff->avail[n + 1].base) 726 (md++)->base = meminfo_buff->avail[n].limit; 727 728 if (meminfo_buff->avail[n].limit) 729 (md++)->base = meminfo_buff->avail[n].limit; 730 731 n = md - meminfo_buff->mem; 732 meminfo_buff->mem_c = n; 733 734 sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc), 735 cudbg_mem_desc_cmp, NULL); 736 737 lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A); 738 hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1; 739 meminfo_buff->up_ram_lo = lo; 740 meminfo_buff->up_ram_hi = hi; 741 742 lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A); 743 hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1; 744 meminfo_buff->up_extmem2_lo = lo; 745 meminfo_buff->up_extmem2_hi = hi; 746 747 lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A); 748 for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++) 749 meminfo_buff->free_rx_cnt += 750 FREERXPAGECOUNT_G(t4_read_reg(padap, 751 TP_FLM_FREE_RX_CNT_A)); 752 753 meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo); 754 meminfo_buff->rx_pages_data[1] = 755 t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10; 756 meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1; 757 758 lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A); 759 hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A); 760 for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++) 761 meminfo_buff->free_tx_cnt += 762 FREETXPAGECOUNT_G(t4_read_reg(padap, 763 TP_FLM_FREE_TX_CNT_A)); 764 765 meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo); 766 meminfo_buff->tx_pages_data[1] = 767 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10); 768 meminfo_buff->tx_pages_data[2] = 769 hi >= (1 << 20) ? 'M' : 'K'; 770 meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo); 771 772 meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A); 773 meminfo_buff->p_structs_free_cnt = 774 FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A)); 775 776 for (i = 0; i < 4; i++) { 777 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) 778 lo = t4_read_reg(padap, 779 MPS_RX_MAC_BG_PG_CNT0_A + i * 4); 780 else 781 lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4); 782 if (is_t5(padap->params.chip)) { 783 used = T5_USED_G(lo); 784 alloc = T5_ALLOC_G(lo); 785 } else { 786 used = USED_G(lo); 787 alloc = ALLOC_G(lo); 788 } 789 meminfo_buff->port_used[i] = used; 790 meminfo_buff->port_alloc[i] = alloc; 791 } 792 793 for (i = 0; i < padap->params.arch.nchan; i++) { 794 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) 795 lo = t4_read_reg(padap, 796 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4); 797 else 798 lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4); 799 if (is_t5(padap->params.chip)) { 800 used = T5_USED_G(lo); 801 alloc = T5_ALLOC_G(lo); 802 } else { 803 used = USED_G(lo); 804 alloc = ALLOC_G(lo); 805 } 806 meminfo_buff->loopback_used[i] = used; 807 meminfo_buff->loopback_alloc[i] = alloc; 808 } 809 810 return 0; 811 } 812 813 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, 814 struct cudbg_buffer *dbg_buff, 815 struct cudbg_error *cudbg_err) 816 { 817 struct adapter *padap = pdbg_init->adap; 818 struct cudbg_buffer temp_buff = { 0 }; 819 u32 buf_size = 0; 820 int rc = 0; 821 822 if (is_t4(padap->params.chip)) 823 buf_size = T4_REGMAP_SIZE; 824 else if (is_t5(padap->params.chip) || is_t6(padap->params.chip)) 825 buf_size = T5_REGMAP_SIZE; 826 827 rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff); 828 if (rc) 829 return rc; 830 t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size); 831 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 832 } 833 834 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, 835 struct cudbg_buffer *dbg_buff, 836 struct cudbg_error *cudbg_err) 837 { 838 struct adapter *padap = pdbg_init->adap; 839 struct cudbg_buffer temp_buff = { 0 }; 840 struct devlog_params *dparams; 841 int rc = 0; 842 843 rc = t4_init_devlog_params(padap); 844 if (rc < 0) { 845 cudbg_err->sys_err = rc; 846 return rc; 847 } 848 849 dparams = &padap->params.devlog; 850 rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff); 851 if (rc) 852 return rc; 853 854 /* Collect FW devlog */ 855 if (dparams->start != 0) { 856 spin_lock(&padap->win0_lock); 857 rc = t4_memory_rw(padap, padap->params.drv_memwin, 858 dparams->memtype, dparams->start, 859 dparams->size, 860 (__be32 *)(char *)temp_buff.data, 861 1); 862 spin_unlock(&padap->win0_lock); 863 if (rc) { 864 cudbg_err->sys_err = rc; 865 cudbg_put_buff(pdbg_init, &temp_buff); 866 return rc; 867 } 868 } 869 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 870 } 871 872 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, 873 struct cudbg_buffer *dbg_buff, 874 struct cudbg_error *cudbg_err) 875 { 876 struct adapter *padap = pdbg_init->adap; 877 struct cudbg_buffer temp_buff = { 0 }; 878 int size, rc; 879 u32 cfg = 0; 880 881 if (is_t6(padap->params.chip)) { 882 size = padap->params.cim_la_size / 10 + 1; 883 size *= 10 * sizeof(u32); 884 } else { 885 size = padap->params.cim_la_size / 8; 886 size *= 8 * sizeof(u32); 887 } 888 889 size += sizeof(cfg); 890 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 891 if (rc) 892 return rc; 893 894 rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg); 895 if (rc) { 896 cudbg_err->sys_err = rc; 897 cudbg_put_buff(pdbg_init, &temp_buff); 898 return rc; 899 } 900 901 memcpy((char *)temp_buff.data, &cfg, sizeof(cfg)); 902 rc = t4_cim_read_la(padap, 903 (u32 *)((char *)temp_buff.data + sizeof(cfg)), 904 NULL); 905 if (rc < 0) { 906 cudbg_err->sys_err = rc; 907 cudbg_put_buff(pdbg_init, &temp_buff); 908 return rc; 909 } 910 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 911 } 912 913 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init, 914 struct cudbg_buffer *dbg_buff, 915 struct cudbg_error *cudbg_err) 916 { 917 struct adapter *padap = pdbg_init->adap; 918 struct cudbg_buffer temp_buff = { 0 }; 919 int size, rc; 920 921 size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); 922 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 923 if (rc) 924 return rc; 925 926 t4_cim_read_ma_la(padap, 927 (u32 *)temp_buff.data, 928 (u32 *)((char *)temp_buff.data + 929 5 * CIM_MALA_SIZE)); 930 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 931 } 932 933 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init, 934 struct cudbg_buffer *dbg_buff, 935 struct cudbg_error *cudbg_err) 936 { 937 struct adapter *padap = pdbg_init->adap; 938 struct cudbg_buffer temp_buff = { 0 }; 939 struct cudbg_cim_qcfg *cim_qcfg_data; 940 int rc; 941 942 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg), 943 &temp_buff); 944 if (rc) 945 return rc; 946 947 cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data; 948 cim_qcfg_data->chip = padap->params.chip; 949 rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A, 950 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat); 951 if (rc) { 952 cudbg_err->sys_err = rc; 953 cudbg_put_buff(pdbg_init, &temp_buff); 954 return rc; 955 } 956 957 rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A, 958 ARRAY_SIZE(cim_qcfg_data->obq_wr), 959 cim_qcfg_data->obq_wr); 960 if (rc) { 961 cudbg_err->sys_err = rc; 962 cudbg_put_buff(pdbg_init, &temp_buff); 963 return rc; 964 } 965 966 t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size, 967 cim_qcfg_data->thres); 968 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 969 } 970 971 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init, 972 struct cudbg_buffer *dbg_buff, 973 struct cudbg_error *cudbg_err, int qid) 974 { 975 struct adapter *padap = pdbg_init->adap; 976 struct cudbg_buffer temp_buff = { 0 }; 977 int no_of_read_words, rc = 0; 978 u32 qsize; 979 980 /* collect CIM IBQ */ 981 qsize = CIM_IBQ_SIZE * 4 * sizeof(u32); 982 rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff); 983 if (rc) 984 return rc; 985 986 /* t4_read_cim_ibq will return no. of read words or error */ 987 no_of_read_words = t4_read_cim_ibq(padap, qid, 988 (u32 *)temp_buff.data, qsize); 989 /* no_of_read_words is less than or equal to 0 means error */ 990 if (no_of_read_words <= 0) { 991 if (!no_of_read_words) 992 rc = CUDBG_SYSTEM_ERROR; 993 else 994 rc = no_of_read_words; 995 cudbg_err->sys_err = rc; 996 cudbg_put_buff(pdbg_init, &temp_buff); 997 return rc; 998 } 999 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1000 } 1001 1002 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init, 1003 struct cudbg_buffer *dbg_buff, 1004 struct cudbg_error *cudbg_err) 1005 { 1006 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0); 1007 } 1008 1009 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init, 1010 struct cudbg_buffer *dbg_buff, 1011 struct cudbg_error *cudbg_err) 1012 { 1013 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1); 1014 } 1015 1016 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init, 1017 struct cudbg_buffer *dbg_buff, 1018 struct cudbg_error *cudbg_err) 1019 { 1020 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2); 1021 } 1022 1023 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init, 1024 struct cudbg_buffer *dbg_buff, 1025 struct cudbg_error *cudbg_err) 1026 { 1027 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3); 1028 } 1029 1030 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init, 1031 struct cudbg_buffer *dbg_buff, 1032 struct cudbg_error *cudbg_err) 1033 { 1034 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4); 1035 } 1036 1037 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init, 1038 struct cudbg_buffer *dbg_buff, 1039 struct cudbg_error *cudbg_err) 1040 { 1041 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5); 1042 } 1043 1044 u32 cudbg_cim_obq_size(struct adapter *padap, int qid) 1045 { 1046 u32 value; 1047 1048 t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F | 1049 QUENUMSELECT_V(qid)); 1050 value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A); 1051 value = CIMQSIZE_G(value) * 64; /* size in number of words */ 1052 return value * sizeof(u32); 1053 } 1054 1055 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init, 1056 struct cudbg_buffer *dbg_buff, 1057 struct cudbg_error *cudbg_err, int qid) 1058 { 1059 struct adapter *padap = pdbg_init->adap; 1060 struct cudbg_buffer temp_buff = { 0 }; 1061 int no_of_read_words, rc = 0; 1062 u32 qsize; 1063 1064 /* collect CIM OBQ */ 1065 qsize = cudbg_cim_obq_size(padap, qid); 1066 rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff); 1067 if (rc) 1068 return rc; 1069 1070 /* t4_read_cim_obq will return no. of read words or error */ 1071 no_of_read_words = t4_read_cim_obq(padap, qid, 1072 (u32 *)temp_buff.data, qsize); 1073 /* no_of_read_words is less than or equal to 0 means error */ 1074 if (no_of_read_words <= 0) { 1075 if (!no_of_read_words) 1076 rc = CUDBG_SYSTEM_ERROR; 1077 else 1078 rc = no_of_read_words; 1079 cudbg_err->sys_err = rc; 1080 cudbg_put_buff(pdbg_init, &temp_buff); 1081 return rc; 1082 } 1083 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1084 } 1085 1086 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init, 1087 struct cudbg_buffer *dbg_buff, 1088 struct cudbg_error *cudbg_err) 1089 { 1090 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0); 1091 } 1092 1093 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init, 1094 struct cudbg_buffer *dbg_buff, 1095 struct cudbg_error *cudbg_err) 1096 { 1097 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1); 1098 } 1099 1100 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init, 1101 struct cudbg_buffer *dbg_buff, 1102 struct cudbg_error *cudbg_err) 1103 { 1104 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2); 1105 } 1106 1107 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init, 1108 struct cudbg_buffer *dbg_buff, 1109 struct cudbg_error *cudbg_err) 1110 { 1111 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3); 1112 } 1113 1114 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init, 1115 struct cudbg_buffer *dbg_buff, 1116 struct cudbg_error *cudbg_err) 1117 { 1118 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4); 1119 } 1120 1121 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init, 1122 struct cudbg_buffer *dbg_buff, 1123 struct cudbg_error *cudbg_err) 1124 { 1125 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5); 1126 } 1127 1128 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init, 1129 struct cudbg_buffer *dbg_buff, 1130 struct cudbg_error *cudbg_err) 1131 { 1132 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6); 1133 } 1134 1135 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init, 1136 struct cudbg_buffer *dbg_buff, 1137 struct cudbg_error *cudbg_err) 1138 { 1139 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7); 1140 } 1141 1142 static int cudbg_meminfo_get_mem_index(struct adapter *padap, 1143 struct cudbg_meminfo *mem_info, 1144 u8 mem_type, u8 *idx) 1145 { 1146 u8 i, flag; 1147 1148 switch (mem_type) { 1149 case MEM_EDC0: 1150 flag = EDC0_FLAG; 1151 break; 1152 case MEM_EDC1: 1153 flag = EDC1_FLAG; 1154 break; 1155 case MEM_MC0: 1156 /* Some T5 cards have both MC0 and MC1. */ 1157 flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG; 1158 break; 1159 case MEM_MC1: 1160 flag = MC1_FLAG; 1161 break; 1162 case MEM_HMA: 1163 flag = HMA_FLAG; 1164 break; 1165 default: 1166 return CUDBG_STATUS_ENTITY_NOT_FOUND; 1167 } 1168 1169 for (i = 0; i < mem_info->avail_c; i++) { 1170 if (mem_info->avail[i].idx == flag) { 1171 *idx = i; 1172 return 0; 1173 } 1174 } 1175 1176 return CUDBG_STATUS_ENTITY_NOT_FOUND; 1177 } 1178 1179 /* Fetch the @region_name's start and end from @meminfo. */ 1180 static int cudbg_get_mem_region(struct adapter *padap, 1181 struct cudbg_meminfo *meminfo, 1182 u8 mem_type, const char *region_name, 1183 struct cudbg_mem_desc *mem_desc) 1184 { 1185 u8 mc, found = 0; 1186 u32 idx = 0; 1187 int rc, i; 1188 1189 rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc); 1190 if (rc) 1191 return rc; 1192 1193 i = match_string(cudbg_region, ARRAY_SIZE(cudbg_region), region_name); 1194 if (i < 0) 1195 return -EINVAL; 1196 1197 idx = i; 1198 for (i = 0; i < meminfo->mem_c; i++) { 1199 if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region)) 1200 continue; /* Skip holes */ 1201 1202 if (!(meminfo->mem[i].limit)) 1203 meminfo->mem[i].limit = 1204 i < meminfo->mem_c - 1 ? 1205 meminfo->mem[i + 1].base - 1 : ~0; 1206 1207 if (meminfo->mem[i].idx == idx) { 1208 /* Check if the region exists in @mem_type memory */ 1209 if (meminfo->mem[i].base < meminfo->avail[mc].base && 1210 meminfo->mem[i].limit < meminfo->avail[mc].base) 1211 return -EINVAL; 1212 1213 if (meminfo->mem[i].base > meminfo->avail[mc].limit) 1214 return -EINVAL; 1215 1216 memcpy(mem_desc, &meminfo->mem[i], 1217 sizeof(struct cudbg_mem_desc)); 1218 found = 1; 1219 break; 1220 } 1221 } 1222 if (!found) 1223 return -EINVAL; 1224 1225 return 0; 1226 } 1227 1228 /* Fetch and update the start and end of the requested memory region w.r.t 0 1229 * in the corresponding EDC/MC/HMA. 1230 */ 1231 static int cudbg_get_mem_relative(struct adapter *padap, 1232 struct cudbg_meminfo *meminfo, 1233 u8 mem_type, u32 *out_base, u32 *out_end) 1234 { 1235 u8 mc_idx; 1236 int rc; 1237 1238 rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx); 1239 if (rc) 1240 return rc; 1241 1242 if (*out_base < meminfo->avail[mc_idx].base) 1243 *out_base = 0; 1244 else 1245 *out_base -= meminfo->avail[mc_idx].base; 1246 1247 if (*out_end > meminfo->avail[mc_idx].limit) 1248 *out_end = meminfo->avail[mc_idx].limit; 1249 else 1250 *out_end -= meminfo->avail[mc_idx].base; 1251 1252 return 0; 1253 } 1254 1255 /* Get TX and RX Payload region */ 1256 static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type, 1257 const char *region_name, 1258 struct cudbg_region_info *payload) 1259 { 1260 struct cudbg_mem_desc mem_desc = { 0 }; 1261 struct cudbg_meminfo meminfo; 1262 int rc; 1263 1264 rc = cudbg_fill_meminfo(padap, &meminfo); 1265 if (rc) 1266 return rc; 1267 1268 rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name, 1269 &mem_desc); 1270 if (rc) { 1271 payload->exist = false; 1272 return 0; 1273 } 1274 1275 payload->exist = true; 1276 payload->start = mem_desc.base; 1277 payload->end = mem_desc.limit; 1278 1279 return cudbg_get_mem_relative(padap, &meminfo, mem_type, 1280 &payload->start, &payload->end); 1281 } 1282 1283 static int cudbg_memory_read(struct cudbg_init *pdbg_init, int win, 1284 int mtype, u32 addr, u32 len, void *hbuf) 1285 { 1286 u32 win_pf, memoffset, mem_aperture, mem_base; 1287 struct adapter *adap = pdbg_init->adap; 1288 u32 pos, offset, resid; 1289 u32 *res_buf; 1290 u64 *buf; 1291 int ret; 1292 1293 /* Argument sanity checks ... 1294 */ 1295 if (addr & 0x3 || (uintptr_t)hbuf & 0x3) 1296 return -EINVAL; 1297 1298 buf = (u64 *)hbuf; 1299 1300 /* Try to do 64-bit reads. Residual will be handled later. */ 1301 resid = len & 0x7; 1302 len -= resid; 1303 1304 ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base, 1305 &mem_aperture); 1306 if (ret) 1307 return ret; 1308 1309 addr = addr + memoffset; 1310 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf); 1311 1312 pos = addr & ~(mem_aperture - 1); 1313 offset = addr - pos; 1314 1315 /* Set up initial PCI-E Memory Window to cover the start of our 1316 * transfer. 1317 */ 1318 t4_memory_update_win(adap, win, pos | win_pf); 1319 1320 /* Transfer data from the adapter */ 1321 while (len > 0) { 1322 *buf++ = le64_to_cpu((__force __le64) 1323 t4_read_reg64(adap, mem_base + offset)); 1324 offset += sizeof(u64); 1325 len -= sizeof(u64); 1326 1327 /* If we've reached the end of our current window aperture, 1328 * move the PCI-E Memory Window on to the next. 1329 */ 1330 if (offset == mem_aperture) { 1331 pos += mem_aperture; 1332 offset = 0; 1333 t4_memory_update_win(adap, win, pos | win_pf); 1334 } 1335 } 1336 1337 res_buf = (u32 *)buf; 1338 /* Read residual in 32-bit multiples */ 1339 while (resid > sizeof(u32)) { 1340 *res_buf++ = le32_to_cpu((__force __le32) 1341 t4_read_reg(adap, mem_base + offset)); 1342 offset += sizeof(u32); 1343 resid -= sizeof(u32); 1344 1345 /* If we've reached the end of our current window aperture, 1346 * move the PCI-E Memory Window on to the next. 1347 */ 1348 if (offset == mem_aperture) { 1349 pos += mem_aperture; 1350 offset = 0; 1351 t4_memory_update_win(adap, win, pos | win_pf); 1352 } 1353 } 1354 1355 /* Transfer residual < 32-bits */ 1356 if (resid) 1357 t4_memory_rw_residual(adap, resid, mem_base + offset, 1358 (u8 *)res_buf, T4_MEMORY_READ); 1359 1360 return 0; 1361 } 1362 1363 #define CUDBG_YIELD_ITERATION 256 1364 1365 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, 1366 struct cudbg_buffer *dbg_buff, u8 mem_type, 1367 unsigned long tot_len, 1368 struct cudbg_error *cudbg_err) 1369 { 1370 static const char * const region_name[] = { "Tx payload:", 1371 "Rx payload:" }; 1372 unsigned long bytes, bytes_left, bytes_read = 0; 1373 struct adapter *padap = pdbg_init->adap; 1374 struct cudbg_buffer temp_buff = { 0 }; 1375 struct cudbg_region_info payload[2]; 1376 u32 yield_count = 0; 1377 int rc = 0; 1378 u8 i; 1379 1380 /* Get TX/RX Payload region range if they exist */ 1381 memset(payload, 0, sizeof(payload)); 1382 for (i = 0; i < ARRAY_SIZE(region_name); i++) { 1383 rc = cudbg_get_payload_range(padap, mem_type, region_name[i], 1384 &payload[i]); 1385 if (rc) 1386 return rc; 1387 1388 if (payload[i].exist) { 1389 /* Align start and end to avoid wrap around */ 1390 payload[i].start = roundup(payload[i].start, 1391 CUDBG_CHUNK_SIZE); 1392 payload[i].end = rounddown(payload[i].end, 1393 CUDBG_CHUNK_SIZE); 1394 } 1395 } 1396 1397 bytes_left = tot_len; 1398 while (bytes_left > 0) { 1399 /* As MC size is huge and read through PIO access, this 1400 * loop will hold cpu for a longer time. OS may think that 1401 * the process is hanged and will generate CPU stall traces. 1402 * So yield the cpu regularly. 1403 */ 1404 yield_count++; 1405 if (!(yield_count % CUDBG_YIELD_ITERATION)) 1406 schedule(); 1407 1408 bytes = min_t(unsigned long, bytes_left, 1409 (unsigned long)CUDBG_CHUNK_SIZE); 1410 rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff); 1411 if (rc) 1412 return rc; 1413 1414 for (i = 0; i < ARRAY_SIZE(payload); i++) 1415 if (payload[i].exist && 1416 bytes_read >= payload[i].start && 1417 bytes_read + bytes <= payload[i].end) 1418 /* TX and RX Payload regions can't overlap */ 1419 goto skip_read; 1420 1421 spin_lock(&padap->win0_lock); 1422 rc = cudbg_memory_read(pdbg_init, MEMWIN_NIC, mem_type, 1423 bytes_read, bytes, temp_buff.data); 1424 spin_unlock(&padap->win0_lock); 1425 if (rc) { 1426 cudbg_err->sys_err = rc; 1427 cudbg_put_buff(pdbg_init, &temp_buff); 1428 return rc; 1429 } 1430 1431 skip_read: 1432 bytes_left -= bytes; 1433 bytes_read += bytes; 1434 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff, 1435 dbg_buff); 1436 if (rc) { 1437 cudbg_put_buff(pdbg_init, &temp_buff); 1438 return rc; 1439 } 1440 } 1441 return rc; 1442 } 1443 1444 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, 1445 struct cudbg_error *cudbg_err) 1446 { 1447 struct adapter *padap = pdbg_init->adap; 1448 int rc; 1449 1450 if (is_fw_attached(pdbg_init)) { 1451 /* Flush uP dcache before reading edcX/mcX */ 1452 rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH); 1453 if (rc) 1454 cudbg_err->sys_warn = rc; 1455 } 1456 } 1457 1458 static int cudbg_mem_region_size(struct cudbg_init *pdbg_init, 1459 struct cudbg_error *cudbg_err, 1460 u8 mem_type, unsigned long *region_size) 1461 { 1462 struct adapter *padap = pdbg_init->adap; 1463 struct cudbg_meminfo mem_info; 1464 u8 mc_idx; 1465 int rc; 1466 1467 memset(&mem_info, 0, sizeof(struct cudbg_meminfo)); 1468 rc = cudbg_fill_meminfo(padap, &mem_info); 1469 if (rc) { 1470 cudbg_err->sys_err = rc; 1471 return rc; 1472 } 1473 1474 cudbg_t4_fwcache(pdbg_init, cudbg_err); 1475 rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx); 1476 if (rc) { 1477 cudbg_err->sys_err = rc; 1478 return rc; 1479 } 1480 1481 if (region_size) 1482 *region_size = mem_info.avail[mc_idx].limit - 1483 mem_info.avail[mc_idx].base; 1484 1485 return 0; 1486 } 1487 1488 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, 1489 struct cudbg_buffer *dbg_buff, 1490 struct cudbg_error *cudbg_err, 1491 u8 mem_type) 1492 { 1493 unsigned long size = 0; 1494 int rc; 1495 1496 rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size); 1497 if (rc) 1498 return rc; 1499 1500 return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size, 1501 cudbg_err); 1502 } 1503 1504 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, 1505 struct cudbg_buffer *dbg_buff, 1506 struct cudbg_error *cudbg_err) 1507 { 1508 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, 1509 MEM_EDC0); 1510 } 1511 1512 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, 1513 struct cudbg_buffer *dbg_buff, 1514 struct cudbg_error *cudbg_err) 1515 { 1516 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, 1517 MEM_EDC1); 1518 } 1519 1520 int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init, 1521 struct cudbg_buffer *dbg_buff, 1522 struct cudbg_error *cudbg_err) 1523 { 1524 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, 1525 MEM_MC0); 1526 } 1527 1528 int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init, 1529 struct cudbg_buffer *dbg_buff, 1530 struct cudbg_error *cudbg_err) 1531 { 1532 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, 1533 MEM_MC1); 1534 } 1535 1536 int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init, 1537 struct cudbg_buffer *dbg_buff, 1538 struct cudbg_error *cudbg_err) 1539 { 1540 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, 1541 MEM_HMA); 1542 } 1543 1544 int cudbg_collect_rss(struct cudbg_init *pdbg_init, 1545 struct cudbg_buffer *dbg_buff, 1546 struct cudbg_error *cudbg_err) 1547 { 1548 struct adapter *padap = pdbg_init->adap; 1549 struct cudbg_buffer temp_buff = { 0 }; 1550 int rc, nentries; 1551 1552 nentries = t4_chip_rss_size(padap); 1553 rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16), 1554 &temp_buff); 1555 if (rc) 1556 return rc; 1557 1558 rc = t4_read_rss(padap, (u16 *)temp_buff.data); 1559 if (rc) { 1560 cudbg_err->sys_err = rc; 1561 cudbg_put_buff(pdbg_init, &temp_buff); 1562 return rc; 1563 } 1564 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1565 } 1566 1567 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init, 1568 struct cudbg_buffer *dbg_buff, 1569 struct cudbg_error *cudbg_err) 1570 { 1571 struct adapter *padap = pdbg_init->adap; 1572 struct cudbg_buffer temp_buff = { 0 }; 1573 struct cudbg_rss_vf_conf *vfconf; 1574 int vf, rc, vf_count; 1575 1576 vf_count = padap->params.arch.vfcount; 1577 rc = cudbg_get_buff(pdbg_init, dbg_buff, 1578 vf_count * sizeof(struct cudbg_rss_vf_conf), 1579 &temp_buff); 1580 if (rc) 1581 return rc; 1582 1583 vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data; 1584 for (vf = 0; vf < vf_count; vf++) 1585 t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl, 1586 &vfconf[vf].rss_vf_vfh, true); 1587 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1588 } 1589 1590 int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init, 1591 struct cudbg_buffer *dbg_buff, 1592 struct cudbg_error *cudbg_err) 1593 { 1594 struct adapter *padap = pdbg_init->adap; 1595 struct cudbg_buffer temp_buff = { 0 }; 1596 int rc; 1597 1598 rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16), 1599 &temp_buff); 1600 if (rc) 1601 return rc; 1602 1603 t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL); 1604 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1605 } 1606 1607 int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init, 1608 struct cudbg_buffer *dbg_buff, 1609 struct cudbg_error *cudbg_err) 1610 { 1611 struct adapter *padap = pdbg_init->adap; 1612 struct cudbg_buffer temp_buff = { 0 }; 1613 struct cudbg_pm_stats *pm_stats_buff; 1614 int rc; 1615 1616 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats), 1617 &temp_buff); 1618 if (rc) 1619 return rc; 1620 1621 pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data; 1622 t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc); 1623 t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc); 1624 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1625 } 1626 1627 int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init, 1628 struct cudbg_buffer *dbg_buff, 1629 struct cudbg_error *cudbg_err) 1630 { 1631 struct adapter *padap = pdbg_init->adap; 1632 struct cudbg_buffer temp_buff = { 0 }; 1633 struct cudbg_hw_sched *hw_sched_buff; 1634 int i, rc = 0; 1635 1636 if (!padap->params.vpd.cclk) 1637 return CUDBG_STATUS_CCLK_NOT_DEFINED; 1638 1639 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched), 1640 &temp_buff); 1641 1642 if (rc) 1643 return rc; 1644 1645 hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data; 1646 hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A); 1647 hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A)); 1648 t4_read_pace_tbl(padap, hw_sched_buff->pace_tab); 1649 for (i = 0; i < NTX_SCHED; ++i) 1650 t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i], 1651 &hw_sched_buff->ipg[i], true); 1652 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1653 } 1654 1655 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, 1656 struct cudbg_buffer *dbg_buff, 1657 struct cudbg_error *cudbg_err) 1658 { 1659 struct adapter *padap = pdbg_init->adap; 1660 struct cudbg_buffer temp_buff = { 0 }; 1661 struct ireg_buf *ch_tp_pio; 1662 int i, rc, n = 0; 1663 u32 size; 1664 1665 if (is_t5(padap->params.chip)) 1666 n = sizeof(t5_tp_pio_array) + 1667 sizeof(t5_tp_tm_pio_array) + 1668 sizeof(t5_tp_mib_index_array); 1669 else 1670 n = sizeof(t6_tp_pio_array) + 1671 sizeof(t6_tp_tm_pio_array) + 1672 sizeof(t6_tp_mib_index_array); 1673 1674 n = n / (IREG_NUM_ELEM * sizeof(u32)); 1675 size = sizeof(struct ireg_buf) * n; 1676 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 1677 if (rc) 1678 return rc; 1679 1680 ch_tp_pio = (struct ireg_buf *)temp_buff.data; 1681 1682 /* TP_PIO */ 1683 if (is_t5(padap->params.chip)) 1684 n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); 1685 else if (is_t6(padap->params.chip)) 1686 n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); 1687 1688 for (i = 0; i < n; i++) { 1689 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; 1690 u32 *buff = ch_tp_pio->outbuf; 1691 1692 if (is_t5(padap->params.chip)) { 1693 tp_pio->ireg_addr = t5_tp_pio_array[i][0]; 1694 tp_pio->ireg_data = t5_tp_pio_array[i][1]; 1695 tp_pio->ireg_local_offset = t5_tp_pio_array[i][2]; 1696 tp_pio->ireg_offset_range = t5_tp_pio_array[i][3]; 1697 } else if (is_t6(padap->params.chip)) { 1698 tp_pio->ireg_addr = t6_tp_pio_array[i][0]; 1699 tp_pio->ireg_data = t6_tp_pio_array[i][1]; 1700 tp_pio->ireg_local_offset = t6_tp_pio_array[i][2]; 1701 tp_pio->ireg_offset_range = t6_tp_pio_array[i][3]; 1702 } 1703 t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range, 1704 tp_pio->ireg_local_offset, true); 1705 ch_tp_pio++; 1706 } 1707 1708 /* TP_TM_PIO */ 1709 if (is_t5(padap->params.chip)) 1710 n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); 1711 else if (is_t6(padap->params.chip)) 1712 n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); 1713 1714 for (i = 0; i < n; i++) { 1715 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; 1716 u32 *buff = ch_tp_pio->outbuf; 1717 1718 if (is_t5(padap->params.chip)) { 1719 tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0]; 1720 tp_pio->ireg_data = t5_tp_tm_pio_array[i][1]; 1721 tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2]; 1722 tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3]; 1723 } else if (is_t6(padap->params.chip)) { 1724 tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0]; 1725 tp_pio->ireg_data = t6_tp_tm_pio_array[i][1]; 1726 tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2]; 1727 tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3]; 1728 } 1729 t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range, 1730 tp_pio->ireg_local_offset, true); 1731 ch_tp_pio++; 1732 } 1733 1734 /* TP_MIB_INDEX */ 1735 if (is_t5(padap->params.chip)) 1736 n = sizeof(t5_tp_mib_index_array) / 1737 (IREG_NUM_ELEM * sizeof(u32)); 1738 else if (is_t6(padap->params.chip)) 1739 n = sizeof(t6_tp_mib_index_array) / 1740 (IREG_NUM_ELEM * sizeof(u32)); 1741 1742 for (i = 0; i < n ; i++) { 1743 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; 1744 u32 *buff = ch_tp_pio->outbuf; 1745 1746 if (is_t5(padap->params.chip)) { 1747 tp_pio->ireg_addr = t5_tp_mib_index_array[i][0]; 1748 tp_pio->ireg_data = t5_tp_mib_index_array[i][1]; 1749 tp_pio->ireg_local_offset = 1750 t5_tp_mib_index_array[i][2]; 1751 tp_pio->ireg_offset_range = 1752 t5_tp_mib_index_array[i][3]; 1753 } else if (is_t6(padap->params.chip)) { 1754 tp_pio->ireg_addr = t6_tp_mib_index_array[i][0]; 1755 tp_pio->ireg_data = t6_tp_mib_index_array[i][1]; 1756 tp_pio->ireg_local_offset = 1757 t6_tp_mib_index_array[i][2]; 1758 tp_pio->ireg_offset_range = 1759 t6_tp_mib_index_array[i][3]; 1760 } 1761 t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range, 1762 tp_pio->ireg_local_offset, true); 1763 ch_tp_pio++; 1764 } 1765 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1766 } 1767 1768 static void cudbg_read_sge_qbase_indirect_reg(struct adapter *padap, 1769 struct sge_qbase_reg_field *qbase, 1770 u32 func, bool is_pf) 1771 { 1772 u32 *buff, i; 1773 1774 if (is_pf) { 1775 buff = qbase->pf_data_value[func]; 1776 } else { 1777 buff = qbase->vf_data_value[func]; 1778 /* In SGE_QBASE_INDEX, 1779 * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256. 1780 */ 1781 func += 8; 1782 } 1783 1784 t4_write_reg(padap, qbase->reg_addr, func); 1785 for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++, buff++) 1786 *buff = t4_read_reg(padap, qbase->reg_data[i]); 1787 } 1788 1789 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, 1790 struct cudbg_buffer *dbg_buff, 1791 struct cudbg_error *cudbg_err) 1792 { 1793 struct adapter *padap = pdbg_init->adap; 1794 struct cudbg_buffer temp_buff = { 0 }; 1795 struct sge_qbase_reg_field *sge_qbase; 1796 struct ireg_buf *ch_sge_dbg; 1797 int i, rc; 1798 1799 rc = cudbg_get_buff(pdbg_init, dbg_buff, 1800 sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase), 1801 &temp_buff); 1802 if (rc) 1803 return rc; 1804 1805 ch_sge_dbg = (struct ireg_buf *)temp_buff.data; 1806 for (i = 0; i < 2; i++) { 1807 struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio; 1808 u32 *buff = ch_sge_dbg->outbuf; 1809 1810 sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0]; 1811 sge_pio->ireg_data = t5_sge_dbg_index_array[i][1]; 1812 sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2]; 1813 sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3]; 1814 t4_read_indirect(padap, 1815 sge_pio->ireg_addr, 1816 sge_pio->ireg_data, 1817 buff, 1818 sge_pio->ireg_offset_range, 1819 sge_pio->ireg_local_offset); 1820 ch_sge_dbg++; 1821 } 1822 1823 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) { 1824 sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg; 1825 /* 1 addr reg SGE_QBASE_INDEX and 4 data reg 1826 * SGE_QBASE_MAP[0-3] 1827 */ 1828 sge_qbase->reg_addr = t6_sge_qbase_index_array[0]; 1829 for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++) 1830 sge_qbase->reg_data[i] = 1831 t6_sge_qbase_index_array[i + 1]; 1832 1833 for (i = 0; i <= PCIE_FW_MASTER_M; i++) 1834 cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase, 1835 i, true); 1836 1837 for (i = 0; i < padap->params.arch.vfcount; i++) 1838 cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase, 1839 i, false); 1840 1841 sge_qbase->vfcount = padap->params.arch.vfcount; 1842 } 1843 1844 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1845 } 1846 1847 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init, 1848 struct cudbg_buffer *dbg_buff, 1849 struct cudbg_error *cudbg_err) 1850 { 1851 struct adapter *padap = pdbg_init->adap; 1852 struct cudbg_buffer temp_buff = { 0 }; 1853 struct cudbg_ulprx_la *ulprx_la_buff; 1854 int rc; 1855 1856 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la), 1857 &temp_buff); 1858 if (rc) 1859 return rc; 1860 1861 ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data; 1862 t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data); 1863 ulprx_la_buff->size = ULPRX_LA_SIZE; 1864 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1865 } 1866 1867 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init, 1868 struct cudbg_buffer *dbg_buff, 1869 struct cudbg_error *cudbg_err) 1870 { 1871 struct adapter *padap = pdbg_init->adap; 1872 struct cudbg_buffer temp_buff = { 0 }; 1873 struct cudbg_tp_la *tp_la_buff; 1874 int size, rc; 1875 1876 size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); 1877 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 1878 if (rc) 1879 return rc; 1880 1881 tp_la_buff = (struct cudbg_tp_la *)temp_buff.data; 1882 tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A)); 1883 t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL); 1884 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1885 } 1886 1887 int cudbg_collect_meminfo(struct cudbg_init *pdbg_init, 1888 struct cudbg_buffer *dbg_buff, 1889 struct cudbg_error *cudbg_err) 1890 { 1891 struct adapter *padap = pdbg_init->adap; 1892 struct cudbg_buffer temp_buff = { 0 }; 1893 struct cudbg_meminfo *meminfo_buff; 1894 struct cudbg_ver_hdr *ver_hdr; 1895 int rc; 1896 1897 rc = cudbg_get_buff(pdbg_init, dbg_buff, 1898 sizeof(struct cudbg_ver_hdr) + 1899 sizeof(struct cudbg_meminfo), 1900 &temp_buff); 1901 if (rc) 1902 return rc; 1903 1904 ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data; 1905 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; 1906 ver_hdr->revision = CUDBG_MEMINFO_REV; 1907 ver_hdr->size = sizeof(struct cudbg_meminfo); 1908 1909 meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data + 1910 sizeof(*ver_hdr)); 1911 rc = cudbg_fill_meminfo(padap, meminfo_buff); 1912 if (rc) { 1913 cudbg_err->sys_err = rc; 1914 cudbg_put_buff(pdbg_init, &temp_buff); 1915 return rc; 1916 } 1917 1918 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1919 } 1920 1921 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, 1922 struct cudbg_buffer *dbg_buff, 1923 struct cudbg_error *cudbg_err) 1924 { 1925 struct cudbg_cim_pif_la *cim_pif_la_buff; 1926 struct adapter *padap = pdbg_init->adap; 1927 struct cudbg_buffer temp_buff = { 0 }; 1928 int size, rc; 1929 1930 size = sizeof(struct cudbg_cim_pif_la) + 1931 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); 1932 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 1933 if (rc) 1934 return rc; 1935 1936 cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data; 1937 cim_pif_la_buff->size = CIM_PIFLA_SIZE; 1938 t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data, 1939 (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE, 1940 NULL, NULL); 1941 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1942 } 1943 1944 int cudbg_collect_clk_info(struct cudbg_init *pdbg_init, 1945 struct cudbg_buffer *dbg_buff, 1946 struct cudbg_error *cudbg_err) 1947 { 1948 struct adapter *padap = pdbg_init->adap; 1949 struct cudbg_buffer temp_buff = { 0 }; 1950 struct cudbg_clk_info *clk_info_buff; 1951 u64 tp_tick_us; 1952 int rc; 1953 1954 if (!padap->params.vpd.cclk) 1955 return CUDBG_STATUS_CCLK_NOT_DEFINED; 1956 1957 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info), 1958 &temp_buff); 1959 if (rc) 1960 return rc; 1961 1962 clk_info_buff = (struct cudbg_clk_info *)temp_buff.data; 1963 clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */ 1964 clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A); 1965 clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res); 1966 clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res); 1967 tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000; 1968 1969 clk_info_buff->dack_timer = 1970 (clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 * 1971 t4_read_reg(padap, TP_DACK_TIMER_A); 1972 clk_info_buff->retransmit_min = 1973 tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A); 1974 clk_info_buff->retransmit_max = 1975 tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A); 1976 clk_info_buff->persist_timer_min = 1977 tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A); 1978 clk_info_buff->persist_timer_max = 1979 tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A); 1980 clk_info_buff->keepalive_idle_timer = 1981 tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A); 1982 clk_info_buff->keepalive_interval = 1983 tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A); 1984 clk_info_buff->initial_srtt = 1985 tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A)); 1986 clk_info_buff->finwait2_timer = 1987 tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A); 1988 1989 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1990 } 1991 1992 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, 1993 struct cudbg_buffer *dbg_buff, 1994 struct cudbg_error *cudbg_err) 1995 { 1996 struct adapter *padap = pdbg_init->adap; 1997 struct cudbg_buffer temp_buff = { 0 }; 1998 struct ireg_buf *ch_pcie; 1999 int i, rc, n; 2000 u32 size; 2001 2002 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); 2003 size = sizeof(struct ireg_buf) * n * 2; 2004 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2005 if (rc) 2006 return rc; 2007 2008 ch_pcie = (struct ireg_buf *)temp_buff.data; 2009 /* PCIE_PDBG */ 2010 for (i = 0; i < n; i++) { 2011 struct ireg_field *pcie_pio = &ch_pcie->tp_pio; 2012 u32 *buff = ch_pcie->outbuf; 2013 2014 pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0]; 2015 pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1]; 2016 pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2]; 2017 pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3]; 2018 t4_read_indirect(padap, 2019 pcie_pio->ireg_addr, 2020 pcie_pio->ireg_data, 2021 buff, 2022 pcie_pio->ireg_offset_range, 2023 pcie_pio->ireg_local_offset); 2024 ch_pcie++; 2025 } 2026 2027 /* PCIE_CDBG */ 2028 n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); 2029 for (i = 0; i < n; i++) { 2030 struct ireg_field *pcie_pio = &ch_pcie->tp_pio; 2031 u32 *buff = ch_pcie->outbuf; 2032 2033 pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0]; 2034 pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1]; 2035 pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2]; 2036 pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3]; 2037 t4_read_indirect(padap, 2038 pcie_pio->ireg_addr, 2039 pcie_pio->ireg_data, 2040 buff, 2041 pcie_pio->ireg_offset_range, 2042 pcie_pio->ireg_local_offset); 2043 ch_pcie++; 2044 } 2045 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2046 } 2047 2048 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, 2049 struct cudbg_buffer *dbg_buff, 2050 struct cudbg_error *cudbg_err) 2051 { 2052 struct adapter *padap = pdbg_init->adap; 2053 struct cudbg_buffer temp_buff = { 0 }; 2054 struct ireg_buf *ch_pm; 2055 int i, rc, n; 2056 u32 size; 2057 2058 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); 2059 size = sizeof(struct ireg_buf) * n * 2; 2060 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2061 if (rc) 2062 return rc; 2063 2064 ch_pm = (struct ireg_buf *)temp_buff.data; 2065 /* PM_RX */ 2066 for (i = 0; i < n; i++) { 2067 struct ireg_field *pm_pio = &ch_pm->tp_pio; 2068 u32 *buff = ch_pm->outbuf; 2069 2070 pm_pio->ireg_addr = t5_pm_rx_array[i][0]; 2071 pm_pio->ireg_data = t5_pm_rx_array[i][1]; 2072 pm_pio->ireg_local_offset = t5_pm_rx_array[i][2]; 2073 pm_pio->ireg_offset_range = t5_pm_rx_array[i][3]; 2074 t4_read_indirect(padap, 2075 pm_pio->ireg_addr, 2076 pm_pio->ireg_data, 2077 buff, 2078 pm_pio->ireg_offset_range, 2079 pm_pio->ireg_local_offset); 2080 ch_pm++; 2081 } 2082 2083 /* PM_TX */ 2084 n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32)); 2085 for (i = 0; i < n; i++) { 2086 struct ireg_field *pm_pio = &ch_pm->tp_pio; 2087 u32 *buff = ch_pm->outbuf; 2088 2089 pm_pio->ireg_addr = t5_pm_tx_array[i][0]; 2090 pm_pio->ireg_data = t5_pm_tx_array[i][1]; 2091 pm_pio->ireg_local_offset = t5_pm_tx_array[i][2]; 2092 pm_pio->ireg_offset_range = t5_pm_tx_array[i][3]; 2093 t4_read_indirect(padap, 2094 pm_pio->ireg_addr, 2095 pm_pio->ireg_data, 2096 buff, 2097 pm_pio->ireg_offset_range, 2098 pm_pio->ireg_local_offset); 2099 ch_pm++; 2100 } 2101 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2102 } 2103 2104 int cudbg_collect_tid(struct cudbg_init *pdbg_init, 2105 struct cudbg_buffer *dbg_buff, 2106 struct cudbg_error *cudbg_err) 2107 { 2108 struct adapter *padap = pdbg_init->adap; 2109 struct cudbg_tid_info_region_rev1 *tid1; 2110 struct cudbg_buffer temp_buff = { 0 }; 2111 struct cudbg_tid_info_region *tid; 2112 u32 para[2], val[2]; 2113 int rc; 2114 2115 rc = cudbg_get_buff(pdbg_init, dbg_buff, 2116 sizeof(struct cudbg_tid_info_region_rev1), 2117 &temp_buff); 2118 if (rc) 2119 return rc; 2120 2121 tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data; 2122 tid = &tid1->tid; 2123 tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE; 2124 tid1->ver_hdr.revision = CUDBG_TID_INFO_REV; 2125 tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) - 2126 sizeof(struct cudbg_ver_hdr); 2127 2128 /* If firmware is not attached/alive, use backdoor register 2129 * access to collect dump. 2130 */ 2131 if (!is_fw_attached(pdbg_init)) 2132 goto fill_tid; 2133 2134 #define FW_PARAM_PFVF_A(param) \ 2135 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ 2136 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \ 2137 FW_PARAMS_PARAM_Y_V(0) | \ 2138 FW_PARAMS_PARAM_Z_V(0)) 2139 2140 para[0] = FW_PARAM_PFVF_A(ETHOFLD_START); 2141 para[1] = FW_PARAM_PFVF_A(ETHOFLD_END); 2142 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val); 2143 if (rc < 0) { 2144 cudbg_err->sys_err = rc; 2145 cudbg_put_buff(pdbg_init, &temp_buff); 2146 return rc; 2147 } 2148 tid->uotid_base = val[0]; 2149 tid->nuotids = val[1] - val[0] + 1; 2150 2151 if (is_t5(padap->params.chip)) { 2152 tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4; 2153 } else if (is_t6(padap->params.chip)) { 2154 tid1->tid_start = 2155 t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A); 2156 tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A); 2157 2158 para[0] = FW_PARAM_PFVF_A(HPFILTER_START); 2159 para[1] = FW_PARAM_PFVF_A(HPFILTER_END); 2160 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, 2161 para, val); 2162 if (rc < 0) { 2163 cudbg_err->sys_err = rc; 2164 cudbg_put_buff(pdbg_init, &temp_buff); 2165 return rc; 2166 } 2167 tid->hpftid_base = val[0]; 2168 tid->nhpftids = val[1] - val[0] + 1; 2169 } 2170 2171 #undef FW_PARAM_PFVF_A 2172 2173 fill_tid: 2174 tid->ntids = padap->tids.ntids; 2175 tid->nstids = padap->tids.nstids; 2176 tid->stid_base = padap->tids.stid_base; 2177 tid->hash_base = padap->tids.hash_base; 2178 2179 tid->natids = padap->tids.natids; 2180 tid->nftids = padap->tids.nftids; 2181 tid->ftid_base = padap->tids.ftid_base; 2182 tid->aftid_base = padap->tids.aftid_base; 2183 tid->aftid_end = padap->tids.aftid_end; 2184 2185 tid->sftid_base = padap->tids.sftid_base; 2186 tid->nsftids = padap->tids.nsftids; 2187 2188 tid->flags = padap->flags; 2189 tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A); 2190 tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A); 2191 tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A); 2192 2193 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2194 } 2195 2196 int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init, 2197 struct cudbg_buffer *dbg_buff, 2198 struct cudbg_error *cudbg_err) 2199 { 2200 struct adapter *padap = pdbg_init->adap; 2201 struct cudbg_buffer temp_buff = { 0 }; 2202 u32 size, *value, j; 2203 int i, rc, n; 2204 2205 size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; 2206 n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32)); 2207 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2208 if (rc) 2209 return rc; 2210 2211 value = (u32 *)temp_buff.data; 2212 for (i = 0; i < n; i++) { 2213 for (j = t5_pcie_config_array[i][0]; 2214 j <= t5_pcie_config_array[i][1]; j += 4) { 2215 t4_hw_pci_read_cfg4(padap, j, value); 2216 value++; 2217 } 2218 } 2219 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2220 } 2221 2222 static int cudbg_sge_ctxt_check_valid(u32 *buf, int type) 2223 { 2224 int index, bit, bit_pos = 0; 2225 2226 switch (type) { 2227 case CTXT_EGRESS: 2228 bit_pos = 176; 2229 break; 2230 case CTXT_INGRESS: 2231 bit_pos = 141; 2232 break; 2233 case CTXT_FLM: 2234 bit_pos = 89; 2235 break; 2236 } 2237 index = bit_pos / 32; 2238 bit = bit_pos % 32; 2239 return buf[index] & (1U << bit); 2240 } 2241 2242 static int cudbg_get_ctxt_region_info(struct adapter *padap, 2243 struct cudbg_region_info *ctx_info, 2244 u8 *mem_type) 2245 { 2246 struct cudbg_mem_desc mem_desc; 2247 struct cudbg_meminfo meminfo; 2248 u32 i, j, value, found; 2249 u8 flq; 2250 int rc; 2251 2252 rc = cudbg_fill_meminfo(padap, &meminfo); 2253 if (rc) 2254 return rc; 2255 2256 /* Get EGRESS and INGRESS context region size */ 2257 for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) { 2258 found = 0; 2259 memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc)); 2260 for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) { 2261 rc = cudbg_get_mem_region(padap, &meminfo, j, 2262 cudbg_region[i], 2263 &mem_desc); 2264 if (!rc) { 2265 found = 1; 2266 rc = cudbg_get_mem_relative(padap, &meminfo, j, 2267 &mem_desc.base, 2268 &mem_desc.limit); 2269 if (rc) { 2270 ctx_info[i].exist = false; 2271 break; 2272 } 2273 ctx_info[i].exist = true; 2274 ctx_info[i].start = mem_desc.base; 2275 ctx_info[i].end = mem_desc.limit; 2276 mem_type[i] = j; 2277 break; 2278 } 2279 } 2280 if (!found) 2281 ctx_info[i].exist = false; 2282 } 2283 2284 /* Get FLM and CNM max qid. */ 2285 value = t4_read_reg(padap, SGE_FLM_CFG_A); 2286 2287 /* Get number of data freelist queues */ 2288 flq = HDRSTARTFLQ_G(value); 2289 ctx_info[CTXT_FLM].exist = true; 2290 ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE; 2291 2292 /* The number of CONM contexts are same as number of freelist 2293 * queues. 2294 */ 2295 ctx_info[CTXT_CNM].exist = true; 2296 ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end; 2297 2298 return 0; 2299 } 2300 2301 int cudbg_dump_context_size(struct adapter *padap) 2302 { 2303 struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} }; 2304 u8 mem_type[CTXT_INGRESS + 1] = { 0 }; 2305 u32 i, size = 0; 2306 int rc; 2307 2308 /* Get max valid qid for each type of queue */ 2309 rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type); 2310 if (rc) 2311 return rc; 2312 2313 for (i = 0; i < CTXT_CNM; i++) { 2314 if (!region_info[i].exist) { 2315 if (i == CTXT_EGRESS || i == CTXT_INGRESS) 2316 size += CUDBG_LOWMEM_MAX_CTXT_QIDS * 2317 SGE_CTXT_SIZE; 2318 continue; 2319 } 2320 2321 size += (region_info[i].end - region_info[i].start + 1) / 2322 SGE_CTXT_SIZE; 2323 } 2324 return size * sizeof(struct cudbg_ch_cntxt); 2325 } 2326 2327 static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid, 2328 enum ctxt_type ctype, u32 *data) 2329 { 2330 struct adapter *padap = pdbg_init->adap; 2331 int rc = -1; 2332 2333 /* Under heavy traffic, the SGE Queue contexts registers will be 2334 * frequently accessed by firmware. 2335 * 2336 * To avoid conflicts with firmware, always ask firmware to fetch 2337 * the SGE Queue contexts via mailbox. On failure, fallback to 2338 * accessing hardware registers directly. 2339 */ 2340 if (is_fw_attached(pdbg_init)) 2341 rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data); 2342 if (rc) 2343 t4_sge_ctxt_rd_bd(padap, cid, ctype, data); 2344 } 2345 2346 static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid, 2347 u8 ctxt_type, 2348 struct cudbg_ch_cntxt **out_buff) 2349 { 2350 struct cudbg_ch_cntxt *buff = *out_buff; 2351 int rc; 2352 u32 j; 2353 2354 for (j = 0; j < max_qid; j++) { 2355 cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data); 2356 rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type); 2357 if (!rc) 2358 continue; 2359 2360 buff->cntxt_type = ctxt_type; 2361 buff->cntxt_id = j; 2362 buff++; 2363 if (ctxt_type == CTXT_FLM) { 2364 cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data); 2365 buff->cntxt_type = CTXT_CNM; 2366 buff->cntxt_id = j; 2367 buff++; 2368 } 2369 } 2370 2371 *out_buff = buff; 2372 } 2373 2374 int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, 2375 struct cudbg_buffer *dbg_buff, 2376 struct cudbg_error *cudbg_err) 2377 { 2378 struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} }; 2379 struct adapter *padap = pdbg_init->adap; 2380 u32 j, size, max_ctx_size, max_ctx_qid; 2381 u8 mem_type[CTXT_INGRESS + 1] = { 0 }; 2382 struct cudbg_buffer temp_buff = { 0 }; 2383 struct cudbg_ch_cntxt *buff; 2384 u8 *ctx_buf; 2385 u8 i, k; 2386 int rc; 2387 2388 /* Get max valid qid for each type of queue */ 2389 rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type); 2390 if (rc) 2391 return rc; 2392 2393 rc = cudbg_dump_context_size(padap); 2394 if (rc <= 0) 2395 return CUDBG_STATUS_ENTITY_NOT_FOUND; 2396 2397 size = rc; 2398 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2399 if (rc) 2400 return rc; 2401 2402 /* Get buffer with enough space to read the biggest context 2403 * region in memory. 2404 */ 2405 max_ctx_size = max(region_info[CTXT_EGRESS].end - 2406 region_info[CTXT_EGRESS].start + 1, 2407 region_info[CTXT_INGRESS].end - 2408 region_info[CTXT_INGRESS].start + 1); 2409 2410 ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL); 2411 if (!ctx_buf) { 2412 cudbg_put_buff(pdbg_init, &temp_buff); 2413 return -ENOMEM; 2414 } 2415 2416 buff = (struct cudbg_ch_cntxt *)temp_buff.data; 2417 2418 /* Collect EGRESS and INGRESS context data. 2419 * In case of failures, fallback to collecting via FW or 2420 * backdoor access. 2421 */ 2422 for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) { 2423 if (!region_info[i].exist) { 2424 max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS; 2425 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i, 2426 &buff); 2427 continue; 2428 } 2429 2430 max_ctx_size = region_info[i].end - region_info[i].start + 1; 2431 max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE; 2432 2433 /* If firmware is not attached/alive, use backdoor register 2434 * access to collect dump. 2435 */ 2436 if (is_fw_attached(pdbg_init)) { 2437 t4_sge_ctxt_flush(padap, padap->mbox, i); 2438 2439 rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i], 2440 region_info[i].start, max_ctx_size, 2441 (__be32 *)ctx_buf, 1); 2442 } 2443 2444 if (rc || !is_fw_attached(pdbg_init)) { 2445 max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS; 2446 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i, 2447 &buff); 2448 continue; 2449 } 2450 2451 for (j = 0; j < max_ctx_qid; j++) { 2452 __be64 *dst_off; 2453 u64 *src_off; 2454 2455 src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE); 2456 dst_off = (__be64 *)buff->data; 2457 2458 /* The data is stored in 64-bit cpu order. Convert it 2459 * to big endian before parsing. 2460 */ 2461 for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++) 2462 dst_off[k] = cpu_to_be64(src_off[k]); 2463 2464 rc = cudbg_sge_ctxt_check_valid(buff->data, i); 2465 if (!rc) 2466 continue; 2467 2468 buff->cntxt_type = i; 2469 buff->cntxt_id = j; 2470 buff++; 2471 } 2472 } 2473 2474 kvfree(ctx_buf); 2475 2476 /* Collect FREELIST and CONGESTION MANAGER contexts */ 2477 max_ctx_size = region_info[CTXT_FLM].end - 2478 region_info[CTXT_FLM].start + 1; 2479 max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE; 2480 /* Since FLM and CONM are 1-to-1 mapped, the below function 2481 * will fetch both FLM and CONM contexts. 2482 */ 2483 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff); 2484 2485 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2486 } 2487 2488 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) 2489 { 2490 *mask = x | y; 2491 y = (__force u64)cpu_to_be64(y); 2492 memcpy(addr, (char *)&y + 2, ETH_ALEN); 2493 } 2494 2495 static void cudbg_mps_rpl_backdoor(struct adapter *padap, 2496 struct fw_ldst_mps_rplc *mps_rplc) 2497 { 2498 if (is_t5(padap->params.chip)) { 2499 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap, 2500 MPS_VF_RPLCT_MAP3_A)); 2501 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap, 2502 MPS_VF_RPLCT_MAP2_A)); 2503 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap, 2504 MPS_VF_RPLCT_MAP1_A)); 2505 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap, 2506 MPS_VF_RPLCT_MAP0_A)); 2507 } else { 2508 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap, 2509 MPS_VF_RPLCT_MAP7_A)); 2510 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap, 2511 MPS_VF_RPLCT_MAP6_A)); 2512 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap, 2513 MPS_VF_RPLCT_MAP5_A)); 2514 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap, 2515 MPS_VF_RPLCT_MAP4_A)); 2516 } 2517 mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A)); 2518 mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A)); 2519 mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A)); 2520 mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A)); 2521 } 2522 2523 static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init, 2524 struct cudbg_mps_tcam *tcam, u32 idx) 2525 { 2526 struct adapter *padap = pdbg_init->adap; 2527 u64 tcamy, tcamx, val; 2528 u32 ctl, data2; 2529 int rc = 0; 2530 2531 if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) { 2532 /* CtlReqID - 1: use Host Driver Requester ID 2533 * CtlCmdType - 0: Read, 1: Write 2534 * CtlTcamSel - 0: TCAM0, 1: TCAM1 2535 * CtlXYBitSel- 0: Y bit, 1: X bit 2536 */ 2537 2538 /* Read tcamy */ 2539 ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0); 2540 if (idx < 256) 2541 ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0); 2542 else 2543 ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1); 2544 2545 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); 2546 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A); 2547 tcamy = DMACH_G(val) << 32; 2548 tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A); 2549 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A); 2550 tcam->lookup_type = DATALKPTYPE_G(data2); 2551 2552 /* 0 - Outer header, 1 - Inner header 2553 * [71:48] bit locations are overloaded for 2554 * outer vs. inner lookup types. 2555 */ 2556 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) { 2557 /* Inner header VNI */ 2558 tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2); 2559 tcam->vniy = (tcam->vniy << 16) | VIDL_G(val); 2560 tcam->dip_hit = data2 & DATADIPHIT_F; 2561 } else { 2562 tcam->vlan_vld = data2 & DATAVIDH2_F; 2563 tcam->ivlan = VIDL_G(val); 2564 } 2565 2566 tcam->port_num = DATAPORTNUM_G(data2); 2567 2568 /* Read tcamx. Change the control param */ 2569 ctl |= CTLXYBITSEL_V(1); 2570 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); 2571 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A); 2572 tcamx = DMACH_G(val) << 32; 2573 tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A); 2574 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A); 2575 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) { 2576 /* Inner header VNI mask */ 2577 tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2); 2578 tcam->vnix = (tcam->vnix << 16) | VIDL_G(val); 2579 } 2580 } else { 2581 tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx)); 2582 tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx)); 2583 } 2584 2585 /* If no entry, return */ 2586 if (tcamx & tcamy) 2587 return rc; 2588 2589 tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx)); 2590 tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx)); 2591 2592 if (is_t5(padap->params.chip)) 2593 tcam->repli = (tcam->cls_lo & REPLICATE_F); 2594 else if (is_t6(padap->params.chip)) 2595 tcam->repli = (tcam->cls_lo & T6_REPLICATE_F); 2596 2597 if (tcam->repli) { 2598 struct fw_ldst_cmd ldst_cmd; 2599 struct fw_ldst_mps_rplc mps_rplc; 2600 2601 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 2602 ldst_cmd.op_to_addrspace = 2603 htonl(FW_CMD_OP_V(FW_LDST_CMD) | 2604 FW_CMD_REQUEST_F | FW_CMD_READ_F | 2605 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS)); 2606 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd)); 2607 ldst_cmd.u.mps.rplc.fid_idx = 2608 htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) | 2609 FW_LDST_CMD_IDX_V(idx)); 2610 2611 /* If firmware is not attached/alive, use backdoor register 2612 * access to collect dump. 2613 */ 2614 if (is_fw_attached(pdbg_init)) 2615 rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, 2616 sizeof(ldst_cmd), &ldst_cmd); 2617 2618 if (rc || !is_fw_attached(pdbg_init)) { 2619 cudbg_mps_rpl_backdoor(padap, &mps_rplc); 2620 /* Ignore error since we collected directly from 2621 * reading registers. 2622 */ 2623 rc = 0; 2624 } else { 2625 mps_rplc = ldst_cmd.u.mps.rplc; 2626 } 2627 2628 tcam->rplc[0] = ntohl(mps_rplc.rplc31_0); 2629 tcam->rplc[1] = ntohl(mps_rplc.rplc63_32); 2630 tcam->rplc[2] = ntohl(mps_rplc.rplc95_64); 2631 tcam->rplc[3] = ntohl(mps_rplc.rplc127_96); 2632 if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) { 2633 tcam->rplc[4] = ntohl(mps_rplc.rplc159_128); 2634 tcam->rplc[5] = ntohl(mps_rplc.rplc191_160); 2635 tcam->rplc[6] = ntohl(mps_rplc.rplc223_192); 2636 tcam->rplc[7] = ntohl(mps_rplc.rplc255_224); 2637 } 2638 } 2639 cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask); 2640 tcam->idx = idx; 2641 tcam->rplc_size = padap->params.arch.mps_rplc_size; 2642 return rc; 2643 } 2644 2645 int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, 2646 struct cudbg_buffer *dbg_buff, 2647 struct cudbg_error *cudbg_err) 2648 { 2649 struct adapter *padap = pdbg_init->adap; 2650 struct cudbg_buffer temp_buff = { 0 }; 2651 u32 size = 0, i, n, total_size = 0; 2652 struct cudbg_mps_tcam *tcam; 2653 int rc; 2654 2655 n = padap->params.arch.mps_tcam_size; 2656 size = sizeof(struct cudbg_mps_tcam) * n; 2657 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2658 if (rc) 2659 return rc; 2660 2661 tcam = (struct cudbg_mps_tcam *)temp_buff.data; 2662 for (i = 0; i < n; i++) { 2663 rc = cudbg_collect_tcam_index(pdbg_init, tcam, i); 2664 if (rc) { 2665 cudbg_err->sys_err = rc; 2666 cudbg_put_buff(pdbg_init, &temp_buff); 2667 return rc; 2668 } 2669 total_size += sizeof(struct cudbg_mps_tcam); 2670 tcam++; 2671 } 2672 2673 if (!total_size) { 2674 rc = CUDBG_SYSTEM_ERROR; 2675 cudbg_err->sys_err = rc; 2676 cudbg_put_buff(pdbg_init, &temp_buff); 2677 return rc; 2678 } 2679 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2680 } 2681 2682 int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, 2683 struct cudbg_buffer *dbg_buff, 2684 struct cudbg_error *cudbg_err) 2685 { 2686 struct adapter *padap = pdbg_init->adap; 2687 struct cudbg_buffer temp_buff = { 0 }; 2688 char vpd_str[CUDBG_VPD_VER_LEN + 1]; 2689 struct cudbg_vpd_data *vpd_data; 2690 struct vpd_params vpd = { 0 }; 2691 u32 vpd_vers, fw_vers; 2692 int rc; 2693 2694 rc = t4_get_raw_vpd_params(padap, &vpd); 2695 if (rc) 2696 return rc; 2697 2698 rc = t4_get_fw_version(padap, &fw_vers); 2699 if (rc) 2700 return rc; 2701 2702 rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN, 2703 vpd_str); 2704 if (rc) 2705 return rc; 2706 2707 vpd_str[CUDBG_VPD_VER_LEN] = '\0'; 2708 rc = kstrtouint(vpd_str, 0, &vpd_vers); 2709 if (rc) 2710 return rc; 2711 2712 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data), 2713 &temp_buff); 2714 if (rc) 2715 return rc; 2716 2717 vpd_data = (struct cudbg_vpd_data *)temp_buff.data; 2718 memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1); 2719 memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1); 2720 memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1); 2721 memcpy(vpd_data->mn, vpd.id, ID_LEN + 1); 2722 vpd_data->scfg_vers = t4_read_reg(padap, PCIE_STATIC_SPARE2_A); 2723 vpd_data->vpd_vers = vpd_vers; 2724 vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers); 2725 vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers); 2726 vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers); 2727 vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers); 2728 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2729 } 2730 2731 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid, 2732 struct cudbg_tid_data *tid_data) 2733 { 2734 struct adapter *padap = pdbg_init->adap; 2735 int i, cmd_retry = 8; 2736 u32 val; 2737 2738 /* Fill REQ_DATA regs with 0's */ 2739 for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++) 2740 t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0); 2741 2742 /* Write DBIG command */ 2743 val = DBGICMD_V(4) | DBGITID_V(tid); 2744 t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val); 2745 tid_data->dbig_cmd = val; 2746 2747 val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */ 2748 t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val); 2749 tid_data->dbig_conf = val; 2750 2751 /* Poll the DBGICMDBUSY bit */ 2752 val = 1; 2753 while (val) { 2754 val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A); 2755 val = val & DBGICMDBUSY_F; 2756 cmd_retry--; 2757 if (!cmd_retry) 2758 return CUDBG_SYSTEM_ERROR; 2759 } 2760 2761 /* Check RESP status */ 2762 val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A); 2763 tid_data->dbig_rsp_stat = val; 2764 if (!(val & 1)) 2765 return CUDBG_SYSTEM_ERROR; 2766 2767 /* Read RESP data */ 2768 for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++) 2769 tid_data->data[i] = t4_read_reg(padap, 2770 LE_DB_DBGI_RSP_DATA_A + 2771 (i << 2)); 2772 tid_data->tid = tid; 2773 return 0; 2774 } 2775 2776 static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region) 2777 { 2778 int type = LE_ET_UNKNOWN; 2779 2780 if (tid < tcam_region.server_start) 2781 type = LE_ET_TCAM_CON; 2782 else if (tid < tcam_region.filter_start) 2783 type = LE_ET_TCAM_SERVER; 2784 else if (tid < tcam_region.clip_start) 2785 type = LE_ET_TCAM_FILTER; 2786 else if (tid < tcam_region.routing_start) 2787 type = LE_ET_TCAM_CLIP; 2788 else if (tid < tcam_region.tid_hash_base) 2789 type = LE_ET_TCAM_ROUTING; 2790 else if (tid < tcam_region.max_tid) 2791 type = LE_ET_HASH_CON; 2792 else 2793 type = LE_ET_INVALID_TID; 2794 2795 return type; 2796 } 2797 2798 static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data, 2799 struct cudbg_tcam tcam_region) 2800 { 2801 int ipv6 = 0; 2802 int le_type; 2803 2804 le_type = cudbg_get_le_type(tid_data->tid, tcam_region); 2805 if (tid_data->tid & 1) 2806 return 0; 2807 2808 if (le_type == LE_ET_HASH_CON) { 2809 ipv6 = tid_data->data[16] & 0x8000; 2810 } else if (le_type == LE_ET_TCAM_CON) { 2811 ipv6 = tid_data->data[16] & 0x8000; 2812 if (ipv6) 2813 ipv6 = tid_data->data[9] == 0x00C00000; 2814 } else { 2815 ipv6 = 0; 2816 } 2817 return ipv6; 2818 } 2819 2820 void cudbg_fill_le_tcam_info(struct adapter *padap, 2821 struct cudbg_tcam *tcam_region) 2822 { 2823 u32 value; 2824 2825 /* Get the LE regions */ 2826 value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A); /* hash base index */ 2827 tcam_region->tid_hash_base = value; 2828 2829 /* Get routing table index */ 2830 value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A); 2831 tcam_region->routing_start = value; 2832 2833 /* Get clip table index. For T6 there is separate CLIP TCAM */ 2834 if (is_t6(padap->params.chip)) 2835 value = t4_read_reg(padap, LE_DB_CLCAM_TID_BASE_A); 2836 else 2837 value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A); 2838 tcam_region->clip_start = value; 2839 2840 /* Get filter table index */ 2841 value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A); 2842 tcam_region->filter_start = value; 2843 2844 /* Get server table index */ 2845 value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A); 2846 tcam_region->server_start = value; 2847 2848 /* Check whether hash is enabled and calculate the max tids */ 2849 value = t4_read_reg(padap, LE_DB_CONFIG_A); 2850 if ((value >> HASHEN_S) & 1) { 2851 value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A); 2852 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) { 2853 tcam_region->max_tid = (value & 0xFFFFF) + 2854 tcam_region->tid_hash_base; 2855 } else { 2856 value = HASHTIDSIZE_G(value); 2857 value = 1 << value; 2858 tcam_region->max_tid = value + 2859 tcam_region->tid_hash_base; 2860 } 2861 } else { /* hash not enabled */ 2862 if (is_t6(padap->params.chip)) 2863 tcam_region->max_tid = (value & ASLIPCOMPEN_F) ? 2864 CUDBG_MAX_TID_COMP_EN : 2865 CUDBG_MAX_TID_COMP_DIS; 2866 else 2867 tcam_region->max_tid = CUDBG_MAX_TCAM_TID; 2868 } 2869 2870 if (is_t6(padap->params.chip)) 2871 tcam_region->max_tid += CUDBG_T6_CLIP; 2872 } 2873 2874 int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, 2875 struct cudbg_buffer *dbg_buff, 2876 struct cudbg_error *cudbg_err) 2877 { 2878 struct adapter *padap = pdbg_init->adap; 2879 struct cudbg_buffer temp_buff = { 0 }; 2880 struct cudbg_tcam tcam_region = { 0 }; 2881 struct cudbg_tid_data *tid_data; 2882 u32 bytes = 0; 2883 int rc, size; 2884 u32 i; 2885 2886 cudbg_fill_le_tcam_info(padap, &tcam_region); 2887 2888 size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid; 2889 size += sizeof(struct cudbg_tcam); 2890 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2891 if (rc) 2892 return rc; 2893 2894 memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam)); 2895 bytes = sizeof(struct cudbg_tcam); 2896 tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes); 2897 /* read all tid */ 2898 for (i = 0; i < tcam_region.max_tid; ) { 2899 rc = cudbg_read_tid(pdbg_init, i, tid_data); 2900 if (rc) { 2901 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; 2902 /* Update tcam header and exit */ 2903 tcam_region.max_tid = i; 2904 memcpy(temp_buff.data, &tcam_region, 2905 sizeof(struct cudbg_tcam)); 2906 goto out; 2907 } 2908 2909 if (cudbg_is_ipv6_entry(tid_data, tcam_region)) { 2910 /* T6 CLIP TCAM: ipv6 takes 4 entries */ 2911 if (is_t6(padap->params.chip) && 2912 i >= tcam_region.clip_start && 2913 i < tcam_region.clip_start + CUDBG_T6_CLIP) 2914 i += 4; 2915 else /* Main TCAM: ipv6 takes two tids */ 2916 i += 2; 2917 } else { 2918 i++; 2919 } 2920 2921 tid_data++; 2922 bytes += sizeof(struct cudbg_tid_data); 2923 } 2924 2925 out: 2926 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2927 } 2928 2929 int cudbg_collect_cctrl(struct cudbg_init *pdbg_init, 2930 struct cudbg_buffer *dbg_buff, 2931 struct cudbg_error *cudbg_err) 2932 { 2933 struct adapter *padap = pdbg_init->adap; 2934 struct cudbg_buffer temp_buff = { 0 }; 2935 u32 size; 2936 int rc; 2937 2938 size = sizeof(u16) * NMTUS * NCCTRL_WIN; 2939 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2940 if (rc) 2941 return rc; 2942 2943 t4_read_cong_tbl(padap, (void *)temp_buff.data); 2944 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2945 } 2946 2947 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, 2948 struct cudbg_buffer *dbg_buff, 2949 struct cudbg_error *cudbg_err) 2950 { 2951 struct adapter *padap = pdbg_init->adap; 2952 struct cudbg_buffer temp_buff = { 0 }; 2953 struct ireg_buf *ma_indr; 2954 int i, rc, n; 2955 u32 size, j; 2956 2957 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) 2958 return CUDBG_STATUS_ENTITY_NOT_FOUND; 2959 2960 n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); 2961 size = sizeof(struct ireg_buf) * n * 2; 2962 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2963 if (rc) 2964 return rc; 2965 2966 ma_indr = (struct ireg_buf *)temp_buff.data; 2967 for (i = 0; i < n; i++) { 2968 struct ireg_field *ma_fli = &ma_indr->tp_pio; 2969 u32 *buff = ma_indr->outbuf; 2970 2971 ma_fli->ireg_addr = t6_ma_ireg_array[i][0]; 2972 ma_fli->ireg_data = t6_ma_ireg_array[i][1]; 2973 ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2]; 2974 ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3]; 2975 t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data, 2976 buff, ma_fli->ireg_offset_range, 2977 ma_fli->ireg_local_offset); 2978 ma_indr++; 2979 } 2980 2981 n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32)); 2982 for (i = 0; i < n; i++) { 2983 struct ireg_field *ma_fli = &ma_indr->tp_pio; 2984 u32 *buff = ma_indr->outbuf; 2985 2986 ma_fli->ireg_addr = t6_ma_ireg_array2[i][0]; 2987 ma_fli->ireg_data = t6_ma_ireg_array2[i][1]; 2988 ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2]; 2989 for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) { 2990 t4_read_indirect(padap, ma_fli->ireg_addr, 2991 ma_fli->ireg_data, buff, 1, 2992 ma_fli->ireg_local_offset); 2993 buff++; 2994 ma_fli->ireg_local_offset += 0x20; 2995 } 2996 ma_indr++; 2997 } 2998 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2999 } 3000 3001 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, 3002 struct cudbg_buffer *dbg_buff, 3003 struct cudbg_error *cudbg_err) 3004 { 3005 struct adapter *padap = pdbg_init->adap; 3006 struct cudbg_buffer temp_buff = { 0 }; 3007 struct cudbg_ulptx_la *ulptx_la_buff; 3008 struct cudbg_ver_hdr *ver_hdr; 3009 u32 i, j; 3010 int rc; 3011 3012 rc = cudbg_get_buff(pdbg_init, dbg_buff, 3013 sizeof(struct cudbg_ver_hdr) + 3014 sizeof(struct cudbg_ulptx_la), 3015 &temp_buff); 3016 if (rc) 3017 return rc; 3018 3019 ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data; 3020 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; 3021 ver_hdr->revision = CUDBG_ULPTX_LA_REV; 3022 ver_hdr->size = sizeof(struct cudbg_ulptx_la); 3023 3024 ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data + 3025 sizeof(*ver_hdr)); 3026 for (i = 0; i < CUDBG_NUM_ULPTX; i++) { 3027 ulptx_la_buff->rdptr[i] = t4_read_reg(padap, 3028 ULP_TX_LA_RDPTR_0_A + 3029 0x10 * i); 3030 ulptx_la_buff->wrptr[i] = t4_read_reg(padap, 3031 ULP_TX_LA_WRPTR_0_A + 3032 0x10 * i); 3033 ulptx_la_buff->rddata[i] = t4_read_reg(padap, 3034 ULP_TX_LA_RDDATA_0_A + 3035 0x10 * i); 3036 for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) 3037 ulptx_la_buff->rd_data[i][j] = 3038 t4_read_reg(padap, 3039 ULP_TX_LA_RDDATA_0_A + 0x10 * i); 3040 } 3041 3042 for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) { 3043 t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1); 3044 ulptx_la_buff->rdptr_asic[i] = 3045 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A); 3046 ulptx_la_buff->rddata_asic[i][0] = 3047 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A); 3048 ulptx_la_buff->rddata_asic[i][1] = 3049 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A); 3050 ulptx_la_buff->rddata_asic[i][2] = 3051 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A); 3052 ulptx_la_buff->rddata_asic[i][3] = 3053 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A); 3054 ulptx_la_buff->rddata_asic[i][4] = 3055 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A); 3056 ulptx_la_buff->rddata_asic[i][5] = 3057 t4_read_reg(padap, PM_RX_BASE_ADDR); 3058 } 3059 3060 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 3061 } 3062 3063 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, 3064 struct cudbg_buffer *dbg_buff, 3065 struct cudbg_error *cudbg_err) 3066 { 3067 struct adapter *padap = pdbg_init->adap; 3068 struct cudbg_buffer temp_buff = { 0 }; 3069 u32 local_offset, local_range; 3070 struct ireg_buf *up_cim; 3071 u32 size, j, iter; 3072 u32 instance = 0; 3073 int i, rc, n; 3074 3075 if (is_t5(padap->params.chip)) 3076 n = sizeof(t5_up_cim_reg_array) / 3077 ((IREG_NUM_ELEM + 1) * sizeof(u32)); 3078 else if (is_t6(padap->params.chip)) 3079 n = sizeof(t6_up_cim_reg_array) / 3080 ((IREG_NUM_ELEM + 1) * sizeof(u32)); 3081 else 3082 return CUDBG_STATUS_NOT_IMPLEMENTED; 3083 3084 size = sizeof(struct ireg_buf) * n; 3085 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 3086 if (rc) 3087 return rc; 3088 3089 up_cim = (struct ireg_buf *)temp_buff.data; 3090 for (i = 0; i < n; i++) { 3091 struct ireg_field *up_cim_reg = &up_cim->tp_pio; 3092 u32 *buff = up_cim->outbuf; 3093 3094 if (is_t5(padap->params.chip)) { 3095 up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0]; 3096 up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1]; 3097 up_cim_reg->ireg_local_offset = 3098 t5_up_cim_reg_array[i][2]; 3099 up_cim_reg->ireg_offset_range = 3100 t5_up_cim_reg_array[i][3]; 3101 instance = t5_up_cim_reg_array[i][4]; 3102 } else if (is_t6(padap->params.chip)) { 3103 up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0]; 3104 up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1]; 3105 up_cim_reg->ireg_local_offset = 3106 t6_up_cim_reg_array[i][2]; 3107 up_cim_reg->ireg_offset_range = 3108 t6_up_cim_reg_array[i][3]; 3109 instance = t6_up_cim_reg_array[i][4]; 3110 } 3111 3112 switch (instance) { 3113 case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES: 3114 iter = up_cim_reg->ireg_offset_range; 3115 local_offset = 0x120; 3116 local_range = 1; 3117 break; 3118 case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES: 3119 iter = up_cim_reg->ireg_offset_range; 3120 local_offset = 0x10; 3121 local_range = 1; 3122 break; 3123 default: 3124 iter = 1; 3125 local_offset = 0; 3126 local_range = up_cim_reg->ireg_offset_range; 3127 break; 3128 } 3129 3130 for (j = 0; j < iter; j++, buff++) { 3131 rc = t4_cim_read(padap, 3132 up_cim_reg->ireg_local_offset + 3133 (j * local_offset), local_range, buff); 3134 if (rc) { 3135 cudbg_put_buff(pdbg_init, &temp_buff); 3136 return rc; 3137 } 3138 } 3139 up_cim++; 3140 } 3141 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 3142 } 3143 3144 int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init, 3145 struct cudbg_buffer *dbg_buff, 3146 struct cudbg_error *cudbg_err) 3147 { 3148 struct adapter *padap = pdbg_init->adap; 3149 struct cudbg_buffer temp_buff = { 0 }; 3150 struct cudbg_pbt_tables *pbt; 3151 int i, rc; 3152 u32 addr; 3153 3154 rc = cudbg_get_buff(pdbg_init, dbg_buff, 3155 sizeof(struct cudbg_pbt_tables), 3156 &temp_buff); 3157 if (rc) 3158 return rc; 3159 3160 pbt = (struct cudbg_pbt_tables *)temp_buff.data; 3161 /* PBT dynamic entries */ 3162 addr = CUDBG_CHAC_PBT_ADDR; 3163 for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) { 3164 rc = t4_cim_read(padap, addr + (i * 4), 1, 3165 &pbt->pbt_dynamic[i]); 3166 if (rc) { 3167 cudbg_err->sys_err = rc; 3168 cudbg_put_buff(pdbg_init, &temp_buff); 3169 return rc; 3170 } 3171 } 3172 3173 /* PBT static entries */ 3174 /* static entries start when bit 6 is set */ 3175 addr = CUDBG_CHAC_PBT_ADDR + (1 << 6); 3176 for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) { 3177 rc = t4_cim_read(padap, addr + (i * 4), 1, 3178 &pbt->pbt_static[i]); 3179 if (rc) { 3180 cudbg_err->sys_err = rc; 3181 cudbg_put_buff(pdbg_init, &temp_buff); 3182 return rc; 3183 } 3184 } 3185 3186 /* LRF entries */ 3187 addr = CUDBG_CHAC_PBT_LRF; 3188 for (i = 0; i < CUDBG_LRF_ENTRIES; i++) { 3189 rc = t4_cim_read(padap, addr + (i * 4), 1, 3190 &pbt->lrf_table[i]); 3191 if (rc) { 3192 cudbg_err->sys_err = rc; 3193 cudbg_put_buff(pdbg_init, &temp_buff); 3194 return rc; 3195 } 3196 } 3197 3198 /* PBT data entries */ 3199 addr = CUDBG_CHAC_PBT_DATA; 3200 for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) { 3201 rc = t4_cim_read(padap, addr + (i * 4), 1, 3202 &pbt->pbt_data[i]); 3203 if (rc) { 3204 cudbg_err->sys_err = rc; 3205 cudbg_put_buff(pdbg_init, &temp_buff); 3206 return rc; 3207 } 3208 } 3209 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 3210 } 3211 3212 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, 3213 struct cudbg_buffer *dbg_buff, 3214 struct cudbg_error *cudbg_err) 3215 { 3216 struct adapter *padap = pdbg_init->adap; 3217 struct cudbg_mbox_log *mboxlog = NULL; 3218 struct cudbg_buffer temp_buff = { 0 }; 3219 struct mbox_cmd_log *log = NULL; 3220 struct mbox_cmd *entry; 3221 unsigned int entry_idx; 3222 u16 mbox_cmds; 3223 int i, k, rc; 3224 u64 flit; 3225 u32 size; 3226 3227 log = padap->mbox_log; 3228 mbox_cmds = padap->mbox_log->size; 3229 size = sizeof(struct cudbg_mbox_log) * mbox_cmds; 3230 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 3231 if (rc) 3232 return rc; 3233 3234 mboxlog = (struct cudbg_mbox_log *)temp_buff.data; 3235 for (k = 0; k < mbox_cmds; k++) { 3236 entry_idx = log->cursor + k; 3237 if (entry_idx >= log->size) 3238 entry_idx -= log->size; 3239 3240 entry = mbox_cmd_log_entry(log, entry_idx); 3241 /* skip over unused entries */ 3242 if (entry->timestamp == 0) 3243 continue; 3244 3245 memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd)); 3246 for (i = 0; i < MBOX_LEN / 8; i++) { 3247 flit = entry->cmd[i]; 3248 mboxlog->hi[i] = (u32)(flit >> 32); 3249 mboxlog->lo[i] = (u32)flit; 3250 } 3251 mboxlog++; 3252 } 3253 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 3254 } 3255 3256 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, 3257 struct cudbg_buffer *dbg_buff, 3258 struct cudbg_error *cudbg_err) 3259 { 3260 struct adapter *padap = pdbg_init->adap; 3261 struct cudbg_buffer temp_buff = { 0 }; 3262 struct ireg_buf *hma_indr; 3263 int i, rc, n; 3264 u32 size; 3265 3266 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) 3267 return CUDBG_STATUS_ENTITY_NOT_FOUND; 3268 3269 n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); 3270 size = sizeof(struct ireg_buf) * n; 3271 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 3272 if (rc) 3273 return rc; 3274 3275 hma_indr = (struct ireg_buf *)temp_buff.data; 3276 for (i = 0; i < n; i++) { 3277 struct ireg_field *hma_fli = &hma_indr->tp_pio; 3278 u32 *buff = hma_indr->outbuf; 3279 3280 hma_fli->ireg_addr = t6_hma_ireg_array[i][0]; 3281 hma_fli->ireg_data = t6_hma_ireg_array[i][1]; 3282 hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2]; 3283 hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3]; 3284 t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data, 3285 buff, hma_fli->ireg_offset_range, 3286 hma_fli->ireg_local_offset); 3287 hma_indr++; 3288 } 3289 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 3290 } 3291 3292 void cudbg_fill_qdesc_num_and_size(const struct adapter *padap, 3293 u32 *num, u32 *size) 3294 { 3295 u32 tot_entries = 0, tot_size = 0; 3296 3297 /* NIC TXQ, RXQ, FLQ, and CTRLQ */ 3298 tot_entries += MAX_ETH_QSETS * 3; 3299 tot_entries += MAX_CTRL_QUEUES; 3300 3301 tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; 3302 tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE; 3303 tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE; 3304 tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES * 3305 MAX_CTRL_TXQ_DESC_SIZE; 3306 3307 /* FW_EVTQ and INTRQ */ 3308 tot_entries += INGQ_EXTRAS; 3309 tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE; 3310 3311 /* PTP_TXQ */ 3312 tot_entries += 1; 3313 tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; 3314 3315 /* ULD TXQ, RXQ, and FLQ */ 3316 tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS; 3317 tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2; 3318 3319 tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * 3320 MAX_TXQ_DESC_SIZE; 3321 tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES * 3322 MAX_RXQ_DESC_SIZE; 3323 tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS * 3324 MAX_FL_DESC_SIZE; 3325 3326 /* ULD CIQ */ 3327 tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS; 3328 tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE * 3329 MAX_RXQ_DESC_SIZE; 3330 3331 /* ETHOFLD TXQ, RXQ, and FLQ */ 3332 tot_entries += MAX_OFLD_QSETS * 3; 3333 tot_size += MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; 3334 3335 tot_size += sizeof(struct cudbg_ver_hdr) + 3336 sizeof(struct cudbg_qdesc_info) + 3337 sizeof(struct cudbg_qdesc_entry) * tot_entries; 3338 3339 if (num) 3340 *num = tot_entries; 3341 3342 if (size) 3343 *size = tot_size; 3344 } 3345 3346 int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, 3347 struct cudbg_buffer *dbg_buff, 3348 struct cudbg_error *cudbg_err) 3349 { 3350 u32 num_queues = 0, tot_entries = 0, size = 0; 3351 struct adapter *padap = pdbg_init->adap; 3352 struct cudbg_buffer temp_buff = { 0 }; 3353 struct cudbg_qdesc_entry *qdesc_entry; 3354 struct cudbg_qdesc_info *qdesc_info; 3355 struct cudbg_ver_hdr *ver_hdr; 3356 struct sge *s = &padap->sge; 3357 u32 i, j, cur_off, tot_len; 3358 u8 *data; 3359 int rc; 3360 3361 cudbg_fill_qdesc_num_and_size(padap, &tot_entries, &size); 3362 size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE); 3363 tot_len = size; 3364 data = kvzalloc(size, GFP_KERNEL); 3365 if (!data) 3366 return -ENOMEM; 3367 3368 ver_hdr = (struct cudbg_ver_hdr *)data; 3369 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; 3370 ver_hdr->revision = CUDBG_QDESC_REV; 3371 ver_hdr->size = sizeof(struct cudbg_qdesc_info); 3372 size -= sizeof(*ver_hdr); 3373 3374 qdesc_info = (struct cudbg_qdesc_info *)(data + 3375 sizeof(*ver_hdr)); 3376 size -= sizeof(*qdesc_info); 3377 qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data; 3378 3379 #define QDESC_GET(q, desc, type, label) do { \ 3380 if (size <= 0) { \ 3381 goto label; \ 3382 } \ 3383 if (desc) { \ 3384 cudbg_fill_qdesc_##q(q, type, qdesc_entry); \ 3385 size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \ 3386 num_queues++; \ 3387 qdesc_entry = cudbg_next_qdesc(qdesc_entry); \ 3388 } \ 3389 } while (0) 3390 3391 #define QDESC_GET_TXQ(q, type, label) do { \ 3392 struct sge_txq *txq = (struct sge_txq *)q; \ 3393 QDESC_GET(txq, txq->desc, type, label); \ 3394 } while (0) 3395 3396 #define QDESC_GET_RXQ(q, type, label) do { \ 3397 struct sge_rspq *rxq = (struct sge_rspq *)q; \ 3398 QDESC_GET(rxq, rxq->desc, type, label); \ 3399 } while (0) 3400 3401 #define QDESC_GET_FLQ(q, type, label) do { \ 3402 struct sge_fl *flq = (struct sge_fl *)q; \ 3403 QDESC_GET(flq, flq->desc, type, label); \ 3404 } while (0) 3405 3406 /* NIC TXQ */ 3407 for (i = 0; i < s->ethqsets; i++) 3408 QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out); 3409 3410 /* NIC RXQ */ 3411 for (i = 0; i < s->ethqsets; i++) 3412 QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out); 3413 3414 /* NIC FLQ */ 3415 for (i = 0; i < s->ethqsets; i++) 3416 QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out); 3417 3418 /* NIC CTRLQ */ 3419 for (i = 0; i < padap->params.nports; i++) 3420 QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out); 3421 3422 /* FW_EVTQ */ 3423 QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out); 3424 3425 /* INTRQ */ 3426 QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out); 3427 3428 /* PTP_TXQ */ 3429 QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out); 3430 3431 /* ULD Queues */ 3432 mutex_lock(&uld_mutex); 3433 3434 if (s->uld_txq_info) { 3435 struct sge_uld_txq_info *utxq; 3436 3437 /* ULD TXQ */ 3438 for (j = 0; j < CXGB4_TX_MAX; j++) { 3439 if (!s->uld_txq_info[j]) 3440 continue; 3441 3442 utxq = s->uld_txq_info[j]; 3443 for (i = 0; i < utxq->ntxq; i++) 3444 QDESC_GET_TXQ(&utxq->uldtxq[i].q, 3445 cudbg_uld_txq_to_qtype(j), 3446 out_unlock); 3447 } 3448 } 3449 3450 if (s->uld_rxq_info) { 3451 struct sge_uld_rxq_info *urxq; 3452 u32 base; 3453 3454 /* ULD RXQ */ 3455 for (j = 0; j < CXGB4_ULD_MAX; j++) { 3456 if (!s->uld_rxq_info[j]) 3457 continue; 3458 3459 urxq = s->uld_rxq_info[j]; 3460 for (i = 0; i < urxq->nrxq; i++) 3461 QDESC_GET_RXQ(&urxq->uldrxq[i].rspq, 3462 cudbg_uld_rxq_to_qtype(j), 3463 out_unlock); 3464 } 3465 3466 /* ULD FLQ */ 3467 for (j = 0; j < CXGB4_ULD_MAX; j++) { 3468 if (!s->uld_rxq_info[j]) 3469 continue; 3470 3471 urxq = s->uld_rxq_info[j]; 3472 for (i = 0; i < urxq->nrxq; i++) 3473 QDESC_GET_FLQ(&urxq->uldrxq[i].fl, 3474 cudbg_uld_flq_to_qtype(j), 3475 out_unlock); 3476 } 3477 3478 /* ULD CIQ */ 3479 for (j = 0; j < CXGB4_ULD_MAX; j++) { 3480 if (!s->uld_rxq_info[j]) 3481 continue; 3482 3483 urxq = s->uld_rxq_info[j]; 3484 base = urxq->nrxq; 3485 for (i = 0; i < urxq->nciq; i++) 3486 QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq, 3487 cudbg_uld_ciq_to_qtype(j), 3488 out_unlock); 3489 } 3490 } 3491 3492 /* ETHOFLD TXQ */ 3493 if (s->eohw_txq) 3494 for (i = 0; i < s->eoqsets; i++) 3495 QDESC_GET_TXQ(&s->eohw_txq[i].q, 3496 CUDBG_QTYPE_ETHOFLD_TXQ, out); 3497 3498 /* ETHOFLD RXQ and FLQ */ 3499 if (s->eohw_rxq) { 3500 for (i = 0; i < s->eoqsets; i++) 3501 QDESC_GET_RXQ(&s->eohw_rxq[i].rspq, 3502 CUDBG_QTYPE_ETHOFLD_RXQ, out); 3503 3504 for (i = 0; i < s->eoqsets; i++) 3505 QDESC_GET_FLQ(&s->eohw_rxq[i].fl, 3506 CUDBG_QTYPE_ETHOFLD_FLQ, out); 3507 } 3508 3509 out_unlock: 3510 mutex_unlock(&uld_mutex); 3511 3512 out: 3513 qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry); 3514 qdesc_info->num_queues = num_queues; 3515 cur_off = 0; 3516 while (tot_len) { 3517 u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE); 3518 3519 rc = cudbg_get_buff(pdbg_init, dbg_buff, chunk_size, 3520 &temp_buff); 3521 if (rc) { 3522 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; 3523 goto out_free; 3524 } 3525 3526 memcpy(temp_buff.data, data + cur_off, chunk_size); 3527 tot_len -= chunk_size; 3528 cur_off += chunk_size; 3529 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff, 3530 dbg_buff); 3531 if (rc) { 3532 cudbg_put_buff(pdbg_init, &temp_buff); 3533 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; 3534 goto out_free; 3535 } 3536 } 3537 3538 out_free: 3539 if (data) 3540 kvfree(data); 3541 3542 #undef QDESC_GET_FLQ 3543 #undef QDESC_GET_RXQ 3544 #undef QDESC_GET_TXQ 3545 #undef QDESC_GET 3546 3547 return rc; 3548 } 3549 3550 int cudbg_collect_flash(struct cudbg_init *pdbg_init, 3551 struct cudbg_buffer *dbg_buff, 3552 struct cudbg_error *cudbg_err) 3553 { 3554 struct adapter *padap = pdbg_init->adap; 3555 u32 count = padap->params.sf_size, n; 3556 struct cudbg_buffer temp_buff = {0}; 3557 u32 addr, i; 3558 int rc; 3559 3560 addr = FLASH_EXP_ROM_START; 3561 3562 for (i = 0; i < count; i += SF_PAGE_SIZE) { 3563 n = min_t(u32, count - i, SF_PAGE_SIZE); 3564 3565 rc = cudbg_get_buff(pdbg_init, dbg_buff, n, &temp_buff); 3566 if (rc) { 3567 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; 3568 goto out; 3569 } 3570 rc = t4_read_flash(padap, addr, n, (u32 *)temp_buff.data, 0); 3571 if (rc) 3572 goto out; 3573 3574 addr += (n * 4); 3575 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff, 3576 dbg_buff); 3577 if (rc) { 3578 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; 3579 goto out; 3580 } 3581 } 3582 3583 out: 3584 return rc; 3585 } 3586