1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017 Chelsio Communications. All rights reserved. 4 */ 5 6 #include <linux/sort.h> 7 #include <linux/string.h> 8 9 #include "t4_regs.h" 10 #include "cxgb4.h" 11 #include "cxgb4_cudbg.h" 12 #include "cudbg_if.h" 13 #include "cudbg_lib_common.h" 14 #include "cudbg_entity.h" 15 #include "cudbg_lib.h" 16 #include "cudbg_zlib.h" 17 18 static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { 19 {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */ 20 {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */ 21 {0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */ 22 {0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */ 23 {0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */ 24 {0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */ 25 {0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */ 26 {0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */ 27 {0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */ 28 {0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */ 29 {0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */ 30 {0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */ 31 }; 32 33 static const u32 t5_tp_pio_array[][IREG_NUM_ELEM] = { 34 {0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */ 35 {0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */ 36 {0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */ 37 {0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */ 38 {0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */ 39 {0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */ 40 {0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */ 41 {0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */ 42 {0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */ 43 {0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */ 44 {0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */ 45 }; 46 47 static const u32 t6_tp_tm_pio_array[][IREG_NUM_ELEM] = { 48 {0x7e18, 0x7e1c, 0x0, 12} 49 }; 50 51 static const u32 t5_tp_tm_pio_array[][IREG_NUM_ELEM] = { 52 {0x7e18, 0x7e1c, 0x0, 12} 53 }; 54 55 static const u32 t6_tp_mib_index_array[6][IREG_NUM_ELEM] = { 56 {0x7e50, 0x7e54, 0x0, 13}, 57 {0x7e50, 0x7e54, 0x10, 6}, 58 {0x7e50, 0x7e54, 0x18, 21}, 59 {0x7e50, 0x7e54, 0x30, 32}, 60 {0x7e50, 0x7e54, 0x50, 22}, 61 {0x7e50, 0x7e54, 0x68, 12} 62 }; 63 64 static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = { 65 {0x7e50, 0x7e54, 0x0, 13}, 66 {0x7e50, 0x7e54, 0x10, 6}, 67 {0x7e50, 0x7e54, 0x18, 8}, 68 {0x7e50, 0x7e54, 0x20, 13}, 69 {0x7e50, 0x7e54, 0x30, 16}, 70 {0x7e50, 0x7e54, 0x40, 16}, 71 {0x7e50, 0x7e54, 0x50, 16}, 72 {0x7e50, 0x7e54, 0x60, 6}, 73 {0x7e50, 0x7e54, 0x68, 4} 74 }; 75 76 static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = { 77 {0x10cc, 0x10d0, 0x0, 16}, 78 {0x10cc, 0x10d4, 0x0, 16}, 79 }; 80 81 static const u32 t6_sge_qbase_index_array[] = { 82 /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */ 83 0x1250, 0x1240, 0x1244, 0x1248, 0x124c, 84 }; 85 86 static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = { 87 {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */ 88 {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */ 89 {0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */ 90 }; 91 92 static const u32 t5_pcie_cdbg_array[][IREG_NUM_ELEM] = { 93 {0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */ 94 {0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */ 95 }; 96 97 static const u32 t5_pm_rx_array[][IREG_NUM_ELEM] = { 98 {0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */ 99 {0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */ 100 }; 101 102 static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = { 103 {0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */ 104 {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */ 105 }; 106 107 static const u32 t5_pcie_config_array[][2] = { 108 {0x0, 0x34}, 109 {0x3c, 0x40}, 110 {0x50, 0x64}, 111 {0x70, 0x80}, 112 {0x94, 0xa0}, 113 {0xb0, 0xb8}, 114 {0xd0, 0xd4}, 115 {0x100, 0x128}, 116 {0x140, 0x148}, 117 {0x150, 0x164}, 118 {0x170, 0x178}, 119 {0x180, 0x194}, 120 {0x1a0, 0x1b8}, 121 {0x1c0, 0x208}, 122 }; 123 124 static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = { 125 {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */ 126 {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */ 127 {0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */ 128 }; 129 130 static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = { 131 {0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */ 132 {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */ 133 }; 134 135 static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { 136 {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ 137 {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */ 138 {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ 139 {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ 140 {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ 141 {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ 142 {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ 143 {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ 144 {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ 145 {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ 146 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ 147 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ 148 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ 149 {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */ 150 {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */ 151 {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */ 152 {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */ 153 {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */ 154 {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */ 155 {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */ 156 {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */ 157 {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */ 158 }; 159 160 static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { 161 {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ 162 {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */ 163 {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ 164 {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ 165 {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ 166 {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ 167 {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ 168 {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ 169 {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ 170 {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ 171 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ 172 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ 173 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ 174 }; 175 176 static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { 177 {0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */ 178 }; 179 180 u32 cudbg_get_entity_length(struct adapter *adap, u32 entity) 181 { 182 struct cudbg_tcam tcam_region = { 0 }; 183 u32 value, n = 0, len = 0; 184 185 switch (entity) { 186 case CUDBG_REG_DUMP: 187 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 188 case CHELSIO_T4: 189 len = T4_REGMAP_SIZE; 190 break; 191 case CHELSIO_T5: 192 case CHELSIO_T6: 193 len = T5_REGMAP_SIZE; 194 break; 195 default: 196 break; 197 } 198 break; 199 case CUDBG_DEV_LOG: 200 len = adap->params.devlog.size; 201 break; 202 case CUDBG_CIM_LA: 203 if (is_t6(adap->params.chip)) { 204 len = adap->params.cim_la_size / 10 + 1; 205 len *= 10 * sizeof(u32); 206 } else { 207 len = adap->params.cim_la_size / 8; 208 len *= 8 * sizeof(u32); 209 } 210 len += sizeof(u32); /* for reading CIM LA configuration */ 211 break; 212 case CUDBG_CIM_MA_LA: 213 len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); 214 break; 215 case CUDBG_CIM_QCFG: 216 len = sizeof(struct cudbg_cim_qcfg); 217 break; 218 case CUDBG_CIM_IBQ_TP0: 219 case CUDBG_CIM_IBQ_TP1: 220 case CUDBG_CIM_IBQ_ULP: 221 case CUDBG_CIM_IBQ_SGE0: 222 case CUDBG_CIM_IBQ_SGE1: 223 case CUDBG_CIM_IBQ_NCSI: 224 len = CIM_IBQ_SIZE * 4 * sizeof(u32); 225 break; 226 case CUDBG_CIM_OBQ_ULP0: 227 len = cudbg_cim_obq_size(adap, 0); 228 break; 229 case CUDBG_CIM_OBQ_ULP1: 230 len = cudbg_cim_obq_size(adap, 1); 231 break; 232 case CUDBG_CIM_OBQ_ULP2: 233 len = cudbg_cim_obq_size(adap, 2); 234 break; 235 case CUDBG_CIM_OBQ_ULP3: 236 len = cudbg_cim_obq_size(adap, 3); 237 break; 238 case CUDBG_CIM_OBQ_SGE: 239 len = cudbg_cim_obq_size(adap, 4); 240 break; 241 case CUDBG_CIM_OBQ_NCSI: 242 len = cudbg_cim_obq_size(adap, 5); 243 break; 244 case CUDBG_CIM_OBQ_RXQ0: 245 len = cudbg_cim_obq_size(adap, 6); 246 break; 247 case CUDBG_CIM_OBQ_RXQ1: 248 len = cudbg_cim_obq_size(adap, 7); 249 break; 250 case CUDBG_EDC0: 251 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 252 if (value & EDRAM0_ENABLE_F) { 253 value = t4_read_reg(adap, MA_EDRAM0_BAR_A); 254 len = EDRAM0_SIZE_G(value); 255 } 256 len = cudbg_mbytes_to_bytes(len); 257 break; 258 case CUDBG_EDC1: 259 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 260 if (value & EDRAM1_ENABLE_F) { 261 value = t4_read_reg(adap, MA_EDRAM1_BAR_A); 262 len = EDRAM1_SIZE_G(value); 263 } 264 len = cudbg_mbytes_to_bytes(len); 265 break; 266 case CUDBG_MC0: 267 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 268 if (value & EXT_MEM0_ENABLE_F) { 269 value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); 270 len = EXT_MEM0_SIZE_G(value); 271 } 272 len = cudbg_mbytes_to_bytes(len); 273 break; 274 case CUDBG_MC1: 275 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 276 if (value & EXT_MEM1_ENABLE_F) { 277 value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); 278 len = EXT_MEM1_SIZE_G(value); 279 } 280 len = cudbg_mbytes_to_bytes(len); 281 break; 282 case CUDBG_RSS: 283 len = t4_chip_rss_size(adap) * sizeof(u16); 284 break; 285 case CUDBG_RSS_VF_CONF: 286 len = adap->params.arch.vfcount * 287 sizeof(struct cudbg_rss_vf_conf); 288 break; 289 case CUDBG_PATH_MTU: 290 len = NMTUS * sizeof(u16); 291 break; 292 case CUDBG_PM_STATS: 293 len = sizeof(struct cudbg_pm_stats); 294 break; 295 case CUDBG_HW_SCHED: 296 len = sizeof(struct cudbg_hw_sched); 297 break; 298 case CUDBG_TP_INDIRECT: 299 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 300 case CHELSIO_T5: 301 n = sizeof(t5_tp_pio_array) + 302 sizeof(t5_tp_tm_pio_array) + 303 sizeof(t5_tp_mib_index_array); 304 break; 305 case CHELSIO_T6: 306 n = sizeof(t6_tp_pio_array) + 307 sizeof(t6_tp_tm_pio_array) + 308 sizeof(t6_tp_mib_index_array); 309 break; 310 default: 311 break; 312 } 313 n = n / (IREG_NUM_ELEM * sizeof(u32)); 314 len = sizeof(struct ireg_buf) * n; 315 break; 316 case CUDBG_SGE_INDIRECT: 317 len = sizeof(struct ireg_buf) * 2 + 318 sizeof(struct sge_qbase_reg_field); 319 break; 320 case CUDBG_ULPRX_LA: 321 len = sizeof(struct cudbg_ulprx_la); 322 break; 323 case CUDBG_TP_LA: 324 len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); 325 break; 326 case CUDBG_MEMINFO: 327 len = sizeof(struct cudbg_ver_hdr) + 328 sizeof(struct cudbg_meminfo); 329 break; 330 case CUDBG_CIM_PIF_LA: 331 len = sizeof(struct cudbg_cim_pif_la); 332 len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); 333 break; 334 case CUDBG_CLK: 335 len = sizeof(struct cudbg_clk_info); 336 break; 337 case CUDBG_PCIE_INDIRECT: 338 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); 339 len = sizeof(struct ireg_buf) * n * 2; 340 break; 341 case CUDBG_PM_INDIRECT: 342 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); 343 len = sizeof(struct ireg_buf) * n * 2; 344 break; 345 case CUDBG_TID_INFO: 346 len = sizeof(struct cudbg_tid_info_region_rev1); 347 break; 348 case CUDBG_PCIE_CONFIG: 349 len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; 350 break; 351 case CUDBG_DUMP_CONTEXT: 352 len = cudbg_dump_context_size(adap); 353 break; 354 case CUDBG_MPS_TCAM: 355 len = sizeof(struct cudbg_mps_tcam) * 356 adap->params.arch.mps_tcam_size; 357 break; 358 case CUDBG_VPD_DATA: 359 len = sizeof(struct cudbg_vpd_data); 360 break; 361 case CUDBG_LE_TCAM: 362 cudbg_fill_le_tcam_info(adap, &tcam_region); 363 len = sizeof(struct cudbg_tcam) + 364 sizeof(struct cudbg_tid_data) * tcam_region.max_tid; 365 break; 366 case CUDBG_CCTRL: 367 len = sizeof(u16) * NMTUS * NCCTRL_WIN; 368 break; 369 case CUDBG_MA_INDIRECT: 370 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { 371 n = sizeof(t6_ma_ireg_array) / 372 (IREG_NUM_ELEM * sizeof(u32)); 373 len = sizeof(struct ireg_buf) * n * 2; 374 } 375 break; 376 case CUDBG_ULPTX_LA: 377 len = sizeof(struct cudbg_ver_hdr) + 378 sizeof(struct cudbg_ulptx_la); 379 break; 380 case CUDBG_UP_CIM_INDIRECT: 381 n = 0; 382 if (is_t5(adap->params.chip)) 383 n = sizeof(t5_up_cim_reg_array) / 384 ((IREG_NUM_ELEM + 1) * sizeof(u32)); 385 else if (is_t6(adap->params.chip)) 386 n = sizeof(t6_up_cim_reg_array) / 387 ((IREG_NUM_ELEM + 1) * sizeof(u32)); 388 len = sizeof(struct ireg_buf) * n; 389 break; 390 case CUDBG_PBT_TABLE: 391 len = sizeof(struct cudbg_pbt_tables); 392 break; 393 case CUDBG_MBOX_LOG: 394 len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; 395 break; 396 case CUDBG_HMA_INDIRECT: 397 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { 398 n = sizeof(t6_hma_ireg_array) / 399 (IREG_NUM_ELEM * sizeof(u32)); 400 len = sizeof(struct ireg_buf) * n; 401 } 402 break; 403 case CUDBG_HMA: 404 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 405 if (value & HMA_MUX_F) { 406 /* In T6, there's no MC1. So, HMA shares MC1 407 * address space. 408 */ 409 value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); 410 len = EXT_MEM1_SIZE_G(value); 411 } 412 len = cudbg_mbytes_to_bytes(len); 413 break; 414 case CUDBG_QDESC: 415 cudbg_fill_qdesc_num_and_size(adap, NULL, &len); 416 break; 417 default: 418 break; 419 } 420 421 return len; 422 } 423 424 static int cudbg_do_compression(struct cudbg_init *pdbg_init, 425 struct cudbg_buffer *pin_buff, 426 struct cudbg_buffer *dbg_buff) 427 { 428 struct cudbg_buffer temp_in_buff = { 0 }; 429 int bytes_left, bytes_read, bytes; 430 u32 offset = dbg_buff->offset; 431 int rc; 432 433 temp_in_buff.offset = pin_buff->offset; 434 temp_in_buff.data = pin_buff->data; 435 temp_in_buff.size = pin_buff->size; 436 437 bytes_left = pin_buff->size; 438 bytes_read = 0; 439 while (bytes_left > 0) { 440 /* Do compression in smaller chunks */ 441 bytes = min_t(unsigned long, bytes_left, 442 (unsigned long)CUDBG_CHUNK_SIZE); 443 temp_in_buff.data = (char *)pin_buff->data + bytes_read; 444 temp_in_buff.size = bytes; 445 rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff); 446 if (rc) 447 return rc; 448 bytes_left -= bytes; 449 bytes_read += bytes; 450 } 451 452 pin_buff->size = dbg_buff->offset - offset; 453 return 0; 454 } 455 456 static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init, 457 struct cudbg_buffer *pin_buff, 458 struct cudbg_buffer *dbg_buff) 459 { 460 int rc = 0; 461 462 if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) { 463 cudbg_update_buff(pin_buff, dbg_buff); 464 } else { 465 rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff); 466 if (rc) 467 goto out; 468 } 469 470 out: 471 cudbg_put_buff(pdbg_init, pin_buff); 472 return rc; 473 } 474 475 static int is_fw_attached(struct cudbg_init *pdbg_init) 476 { 477 struct adapter *padap = pdbg_init->adap; 478 479 if (!(padap->flags & CXGB4_FW_OK) || padap->use_bd) 480 return 0; 481 482 return 1; 483 } 484 485 /* This function will add additional padding bytes into debug_buffer to make it 486 * 4 byte aligned. 487 */ 488 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, 489 struct cudbg_entity_hdr *entity_hdr) 490 { 491 u8 zero_buf[4] = {0}; 492 u8 padding, remain; 493 494 remain = (dbg_buff->offset - entity_hdr->start_offset) % 4; 495 padding = 4 - remain; 496 if (remain) { 497 memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf, 498 padding); 499 dbg_buff->offset += padding; 500 entity_hdr->num_pad = padding; 501 } 502 entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset; 503 } 504 505 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i) 506 { 507 struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf; 508 509 return (struct cudbg_entity_hdr *) 510 ((char *)outbuf + cudbg_hdr->hdr_len + 511 (sizeof(struct cudbg_entity_hdr) * (i - 1))); 512 } 513 514 static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len, 515 void *dest) 516 { 517 int vaddr, rc; 518 519 vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE); 520 if (vaddr < 0) 521 return vaddr; 522 523 rc = pci_read_vpd(padap->pdev, vaddr, len, dest); 524 if (rc < 0) 525 return rc; 526 527 return 0; 528 } 529 530 static int cudbg_mem_desc_cmp(const void *a, const void *b) 531 { 532 return ((const struct cudbg_mem_desc *)a)->base - 533 ((const struct cudbg_mem_desc *)b)->base; 534 } 535 536 int cudbg_fill_meminfo(struct adapter *padap, 537 struct cudbg_meminfo *meminfo_buff) 538 { 539 struct cudbg_mem_desc *md; 540 u32 lo, hi, used, alloc; 541 int n, i; 542 543 memset(meminfo_buff->avail, 0, 544 ARRAY_SIZE(meminfo_buff->avail) * 545 sizeof(struct cudbg_mem_desc)); 546 memset(meminfo_buff->mem, 0, 547 (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc)); 548 md = meminfo_buff->mem; 549 550 for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) { 551 meminfo_buff->mem[i].limit = 0; 552 meminfo_buff->mem[i].idx = i; 553 } 554 555 /* Find and sort the populated memory ranges */ 556 i = 0; 557 lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A); 558 if (lo & EDRAM0_ENABLE_F) { 559 hi = t4_read_reg(padap, MA_EDRAM0_BAR_A); 560 meminfo_buff->avail[i].base = 561 cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi)); 562 meminfo_buff->avail[i].limit = 563 meminfo_buff->avail[i].base + 564 cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi)); 565 meminfo_buff->avail[i].idx = 0; 566 i++; 567 } 568 569 if (lo & EDRAM1_ENABLE_F) { 570 hi = t4_read_reg(padap, MA_EDRAM1_BAR_A); 571 meminfo_buff->avail[i].base = 572 cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi)); 573 meminfo_buff->avail[i].limit = 574 meminfo_buff->avail[i].base + 575 cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi)); 576 meminfo_buff->avail[i].idx = 1; 577 i++; 578 } 579 580 if (is_t5(padap->params.chip)) { 581 if (lo & EXT_MEM0_ENABLE_F) { 582 hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A); 583 meminfo_buff->avail[i].base = 584 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi)); 585 meminfo_buff->avail[i].limit = 586 meminfo_buff->avail[i].base + 587 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi)); 588 meminfo_buff->avail[i].idx = 3; 589 i++; 590 } 591 592 if (lo & EXT_MEM1_ENABLE_F) { 593 hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A); 594 meminfo_buff->avail[i].base = 595 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi)); 596 meminfo_buff->avail[i].limit = 597 meminfo_buff->avail[i].base + 598 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi)); 599 meminfo_buff->avail[i].idx = 4; 600 i++; 601 } 602 } else { 603 if (lo & EXT_MEM_ENABLE_F) { 604 hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A); 605 meminfo_buff->avail[i].base = 606 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi)); 607 meminfo_buff->avail[i].limit = 608 meminfo_buff->avail[i].base + 609 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi)); 610 meminfo_buff->avail[i].idx = 2; 611 i++; 612 } 613 614 if (lo & HMA_MUX_F) { 615 hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A); 616 meminfo_buff->avail[i].base = 617 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi)); 618 meminfo_buff->avail[i].limit = 619 meminfo_buff->avail[i].base + 620 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi)); 621 meminfo_buff->avail[i].idx = 5; 622 i++; 623 } 624 } 625 626 if (!i) /* no memory available */ 627 return CUDBG_STATUS_ENTITY_NOT_FOUND; 628 629 meminfo_buff->avail_c = i; 630 sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc), 631 cudbg_mem_desc_cmp, NULL); 632 (md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A); 633 (md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A); 634 (md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A); 635 (md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A); 636 (md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A); 637 (md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A); 638 (md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A); 639 (md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A); 640 (md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A); 641 642 /* the next few have explicit upper bounds */ 643 md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A); 644 md->limit = md->base - 1 + 645 t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) * 646 PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A)); 647 md++; 648 649 md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A); 650 md->limit = md->base - 1 + 651 t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) * 652 PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A)); 653 md++; 654 655 if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) { 656 if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) { 657 hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4; 658 md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A); 659 } else { 660 hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A); 661 md->base = t4_read_reg(padap, 662 LE_DB_HASH_TBL_BASE_ADDR_A); 663 } 664 md->limit = 0; 665 } else { 666 md->base = 0; 667 md->idx = ARRAY_SIZE(cudbg_region); /* hide it */ 668 } 669 md++; 670 671 #define ulp_region(reg) do { \ 672 md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\ 673 (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\ 674 } while (0) 675 676 ulp_region(RX_ISCSI); 677 ulp_region(RX_TDDP); 678 ulp_region(TX_TPT); 679 ulp_region(RX_STAG); 680 ulp_region(RX_RQ); 681 ulp_region(RX_RQUDP); 682 ulp_region(RX_PBL); 683 ulp_region(TX_PBL); 684 #undef ulp_region 685 md->base = 0; 686 md->idx = ARRAY_SIZE(cudbg_region); 687 if (!is_t4(padap->params.chip)) { 688 u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A); 689 u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A); 690 u32 size = 0; 691 692 if (is_t5(padap->params.chip)) { 693 if (sge_ctrl & VFIFO_ENABLE_F) 694 size = DBVFIFO_SIZE_G(fifo_size); 695 } else { 696 size = T6_DBVFIFO_SIZE_G(fifo_size); 697 } 698 699 if (size) { 700 md->base = BASEADDR_G(t4_read_reg(padap, 701 SGE_DBVFIFO_BADDR_A)); 702 md->limit = md->base + (size << 2) - 1; 703 } 704 } 705 706 md++; 707 708 md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A); 709 md->limit = 0; 710 md++; 711 md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A); 712 md->limit = 0; 713 md++; 714 715 md->base = padap->vres.ocq.start; 716 if (padap->vres.ocq.size) 717 md->limit = md->base + padap->vres.ocq.size - 1; 718 else 719 md->idx = ARRAY_SIZE(cudbg_region); /* hide it */ 720 md++; 721 722 /* add any address-space holes, there can be up to 3 */ 723 for (n = 0; n < i - 1; n++) 724 if (meminfo_buff->avail[n].limit < 725 meminfo_buff->avail[n + 1].base) 726 (md++)->base = meminfo_buff->avail[n].limit; 727 728 if (meminfo_buff->avail[n].limit) 729 (md++)->base = meminfo_buff->avail[n].limit; 730 731 n = md - meminfo_buff->mem; 732 meminfo_buff->mem_c = n; 733 734 sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc), 735 cudbg_mem_desc_cmp, NULL); 736 737 lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A); 738 hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1; 739 meminfo_buff->up_ram_lo = lo; 740 meminfo_buff->up_ram_hi = hi; 741 742 lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A); 743 hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1; 744 meminfo_buff->up_extmem2_lo = lo; 745 meminfo_buff->up_extmem2_hi = hi; 746 747 lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A); 748 for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++) 749 meminfo_buff->free_rx_cnt += 750 FREERXPAGECOUNT_G(t4_read_reg(padap, 751 TP_FLM_FREE_RX_CNT_A)); 752 753 meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo); 754 meminfo_buff->rx_pages_data[1] = 755 t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10; 756 meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1; 757 758 lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A); 759 hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A); 760 for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++) 761 meminfo_buff->free_tx_cnt += 762 FREETXPAGECOUNT_G(t4_read_reg(padap, 763 TP_FLM_FREE_TX_CNT_A)); 764 765 meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo); 766 meminfo_buff->tx_pages_data[1] = 767 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10); 768 meminfo_buff->tx_pages_data[2] = 769 hi >= (1 << 20) ? 'M' : 'K'; 770 meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo); 771 772 meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A); 773 meminfo_buff->p_structs_free_cnt = 774 FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A)); 775 776 for (i = 0; i < 4; i++) { 777 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) 778 lo = t4_read_reg(padap, 779 MPS_RX_MAC_BG_PG_CNT0_A + i * 4); 780 else 781 lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4); 782 if (is_t5(padap->params.chip)) { 783 used = T5_USED_G(lo); 784 alloc = T5_ALLOC_G(lo); 785 } else { 786 used = USED_G(lo); 787 alloc = ALLOC_G(lo); 788 } 789 meminfo_buff->port_used[i] = used; 790 meminfo_buff->port_alloc[i] = alloc; 791 } 792 793 for (i = 0; i < padap->params.arch.nchan; i++) { 794 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) 795 lo = t4_read_reg(padap, 796 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4); 797 else 798 lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4); 799 if (is_t5(padap->params.chip)) { 800 used = T5_USED_G(lo); 801 alloc = T5_ALLOC_G(lo); 802 } else { 803 used = USED_G(lo); 804 alloc = ALLOC_G(lo); 805 } 806 meminfo_buff->loopback_used[i] = used; 807 meminfo_buff->loopback_alloc[i] = alloc; 808 } 809 810 return 0; 811 } 812 813 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, 814 struct cudbg_buffer *dbg_buff, 815 struct cudbg_error *cudbg_err) 816 { 817 struct adapter *padap = pdbg_init->adap; 818 struct cudbg_buffer temp_buff = { 0 }; 819 u32 buf_size = 0; 820 int rc = 0; 821 822 if (is_t4(padap->params.chip)) 823 buf_size = T4_REGMAP_SIZE; 824 else if (is_t5(padap->params.chip) || is_t6(padap->params.chip)) 825 buf_size = T5_REGMAP_SIZE; 826 827 rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff); 828 if (rc) 829 return rc; 830 t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size); 831 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 832 } 833 834 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, 835 struct cudbg_buffer *dbg_buff, 836 struct cudbg_error *cudbg_err) 837 { 838 struct adapter *padap = pdbg_init->adap; 839 struct cudbg_buffer temp_buff = { 0 }; 840 struct devlog_params *dparams; 841 int rc = 0; 842 843 rc = t4_init_devlog_params(padap); 844 if (rc < 0) { 845 cudbg_err->sys_err = rc; 846 return rc; 847 } 848 849 dparams = &padap->params.devlog; 850 rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff); 851 if (rc) 852 return rc; 853 854 /* Collect FW devlog */ 855 if (dparams->start != 0) { 856 spin_lock(&padap->win0_lock); 857 rc = t4_memory_rw(padap, padap->params.drv_memwin, 858 dparams->memtype, dparams->start, 859 dparams->size, 860 (__be32 *)(char *)temp_buff.data, 861 1); 862 spin_unlock(&padap->win0_lock); 863 if (rc) { 864 cudbg_err->sys_err = rc; 865 cudbg_put_buff(pdbg_init, &temp_buff); 866 return rc; 867 } 868 } 869 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 870 } 871 872 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, 873 struct cudbg_buffer *dbg_buff, 874 struct cudbg_error *cudbg_err) 875 { 876 struct adapter *padap = pdbg_init->adap; 877 struct cudbg_buffer temp_buff = { 0 }; 878 int size, rc; 879 u32 cfg = 0; 880 881 if (is_t6(padap->params.chip)) { 882 size = padap->params.cim_la_size / 10 + 1; 883 size *= 10 * sizeof(u32); 884 } else { 885 size = padap->params.cim_la_size / 8; 886 size *= 8 * sizeof(u32); 887 } 888 889 size += sizeof(cfg); 890 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 891 if (rc) 892 return rc; 893 894 rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg); 895 if (rc) { 896 cudbg_err->sys_err = rc; 897 cudbg_put_buff(pdbg_init, &temp_buff); 898 return rc; 899 } 900 901 memcpy((char *)temp_buff.data, &cfg, sizeof(cfg)); 902 rc = t4_cim_read_la(padap, 903 (u32 *)((char *)temp_buff.data + sizeof(cfg)), 904 NULL); 905 if (rc < 0) { 906 cudbg_err->sys_err = rc; 907 cudbg_put_buff(pdbg_init, &temp_buff); 908 return rc; 909 } 910 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 911 } 912 913 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init, 914 struct cudbg_buffer *dbg_buff, 915 struct cudbg_error *cudbg_err) 916 { 917 struct adapter *padap = pdbg_init->adap; 918 struct cudbg_buffer temp_buff = { 0 }; 919 int size, rc; 920 921 size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); 922 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 923 if (rc) 924 return rc; 925 926 t4_cim_read_ma_la(padap, 927 (u32 *)temp_buff.data, 928 (u32 *)((char *)temp_buff.data + 929 5 * CIM_MALA_SIZE)); 930 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 931 } 932 933 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init, 934 struct cudbg_buffer *dbg_buff, 935 struct cudbg_error *cudbg_err) 936 { 937 struct adapter *padap = pdbg_init->adap; 938 struct cudbg_buffer temp_buff = { 0 }; 939 struct cudbg_cim_qcfg *cim_qcfg_data; 940 int rc; 941 942 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg), 943 &temp_buff); 944 if (rc) 945 return rc; 946 947 cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data; 948 cim_qcfg_data->chip = padap->params.chip; 949 rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A, 950 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat); 951 if (rc) { 952 cudbg_err->sys_err = rc; 953 cudbg_put_buff(pdbg_init, &temp_buff); 954 return rc; 955 } 956 957 rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A, 958 ARRAY_SIZE(cim_qcfg_data->obq_wr), 959 cim_qcfg_data->obq_wr); 960 if (rc) { 961 cudbg_err->sys_err = rc; 962 cudbg_put_buff(pdbg_init, &temp_buff); 963 return rc; 964 } 965 966 t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size, 967 cim_qcfg_data->thres); 968 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 969 } 970 971 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init, 972 struct cudbg_buffer *dbg_buff, 973 struct cudbg_error *cudbg_err, int qid) 974 { 975 struct adapter *padap = pdbg_init->adap; 976 struct cudbg_buffer temp_buff = { 0 }; 977 int no_of_read_words, rc = 0; 978 u32 qsize; 979 980 /* collect CIM IBQ */ 981 qsize = CIM_IBQ_SIZE * 4 * sizeof(u32); 982 rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff); 983 if (rc) 984 return rc; 985 986 /* t4_read_cim_ibq will return no. of read words or error */ 987 no_of_read_words = t4_read_cim_ibq(padap, qid, 988 (u32 *)temp_buff.data, qsize); 989 /* no_of_read_words is less than or equal to 0 means error */ 990 if (no_of_read_words <= 0) { 991 if (!no_of_read_words) 992 rc = CUDBG_SYSTEM_ERROR; 993 else 994 rc = no_of_read_words; 995 cudbg_err->sys_err = rc; 996 cudbg_put_buff(pdbg_init, &temp_buff); 997 return rc; 998 } 999 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1000 } 1001 1002 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init, 1003 struct cudbg_buffer *dbg_buff, 1004 struct cudbg_error *cudbg_err) 1005 { 1006 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0); 1007 } 1008 1009 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init, 1010 struct cudbg_buffer *dbg_buff, 1011 struct cudbg_error *cudbg_err) 1012 { 1013 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1); 1014 } 1015 1016 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init, 1017 struct cudbg_buffer *dbg_buff, 1018 struct cudbg_error *cudbg_err) 1019 { 1020 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2); 1021 } 1022 1023 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init, 1024 struct cudbg_buffer *dbg_buff, 1025 struct cudbg_error *cudbg_err) 1026 { 1027 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3); 1028 } 1029 1030 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init, 1031 struct cudbg_buffer *dbg_buff, 1032 struct cudbg_error *cudbg_err) 1033 { 1034 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4); 1035 } 1036 1037 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init, 1038 struct cudbg_buffer *dbg_buff, 1039 struct cudbg_error *cudbg_err) 1040 { 1041 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5); 1042 } 1043 1044 u32 cudbg_cim_obq_size(struct adapter *padap, int qid) 1045 { 1046 u32 value; 1047 1048 t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F | 1049 QUENUMSELECT_V(qid)); 1050 value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A); 1051 value = CIMQSIZE_G(value) * 64; /* size in number of words */ 1052 return value * sizeof(u32); 1053 } 1054 1055 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init, 1056 struct cudbg_buffer *dbg_buff, 1057 struct cudbg_error *cudbg_err, int qid) 1058 { 1059 struct adapter *padap = pdbg_init->adap; 1060 struct cudbg_buffer temp_buff = { 0 }; 1061 int no_of_read_words, rc = 0; 1062 u32 qsize; 1063 1064 /* collect CIM OBQ */ 1065 qsize = cudbg_cim_obq_size(padap, qid); 1066 rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff); 1067 if (rc) 1068 return rc; 1069 1070 /* t4_read_cim_obq will return no. of read words or error */ 1071 no_of_read_words = t4_read_cim_obq(padap, qid, 1072 (u32 *)temp_buff.data, qsize); 1073 /* no_of_read_words is less than or equal to 0 means error */ 1074 if (no_of_read_words <= 0) { 1075 if (!no_of_read_words) 1076 rc = CUDBG_SYSTEM_ERROR; 1077 else 1078 rc = no_of_read_words; 1079 cudbg_err->sys_err = rc; 1080 cudbg_put_buff(pdbg_init, &temp_buff); 1081 return rc; 1082 } 1083 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1084 } 1085 1086 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init, 1087 struct cudbg_buffer *dbg_buff, 1088 struct cudbg_error *cudbg_err) 1089 { 1090 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0); 1091 } 1092 1093 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init, 1094 struct cudbg_buffer *dbg_buff, 1095 struct cudbg_error *cudbg_err) 1096 { 1097 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1); 1098 } 1099 1100 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init, 1101 struct cudbg_buffer *dbg_buff, 1102 struct cudbg_error *cudbg_err) 1103 { 1104 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2); 1105 } 1106 1107 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init, 1108 struct cudbg_buffer *dbg_buff, 1109 struct cudbg_error *cudbg_err) 1110 { 1111 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3); 1112 } 1113 1114 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init, 1115 struct cudbg_buffer *dbg_buff, 1116 struct cudbg_error *cudbg_err) 1117 { 1118 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4); 1119 } 1120 1121 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init, 1122 struct cudbg_buffer *dbg_buff, 1123 struct cudbg_error *cudbg_err) 1124 { 1125 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5); 1126 } 1127 1128 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init, 1129 struct cudbg_buffer *dbg_buff, 1130 struct cudbg_error *cudbg_err) 1131 { 1132 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6); 1133 } 1134 1135 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init, 1136 struct cudbg_buffer *dbg_buff, 1137 struct cudbg_error *cudbg_err) 1138 { 1139 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7); 1140 } 1141 1142 static int cudbg_meminfo_get_mem_index(struct adapter *padap, 1143 struct cudbg_meminfo *mem_info, 1144 u8 mem_type, u8 *idx) 1145 { 1146 u8 i, flag; 1147 1148 switch (mem_type) { 1149 case MEM_EDC0: 1150 flag = EDC0_FLAG; 1151 break; 1152 case MEM_EDC1: 1153 flag = EDC1_FLAG; 1154 break; 1155 case MEM_MC0: 1156 /* Some T5 cards have both MC0 and MC1. */ 1157 flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG; 1158 break; 1159 case MEM_MC1: 1160 flag = MC1_FLAG; 1161 break; 1162 case MEM_HMA: 1163 flag = HMA_FLAG; 1164 break; 1165 default: 1166 return CUDBG_STATUS_ENTITY_NOT_FOUND; 1167 } 1168 1169 for (i = 0; i < mem_info->avail_c; i++) { 1170 if (mem_info->avail[i].idx == flag) { 1171 *idx = i; 1172 return 0; 1173 } 1174 } 1175 1176 return CUDBG_STATUS_ENTITY_NOT_FOUND; 1177 } 1178 1179 /* Fetch the @region_name's start and end from @meminfo. */ 1180 static int cudbg_get_mem_region(struct adapter *padap, 1181 struct cudbg_meminfo *meminfo, 1182 u8 mem_type, const char *region_name, 1183 struct cudbg_mem_desc *mem_desc) 1184 { 1185 u8 mc, found = 0; 1186 u32 idx = 0; 1187 int rc, i; 1188 1189 rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc); 1190 if (rc) 1191 return rc; 1192 1193 i = match_string(cudbg_region, ARRAY_SIZE(cudbg_region), region_name); 1194 if (i < 0) 1195 return -EINVAL; 1196 1197 idx = i; 1198 for (i = 0; i < meminfo->mem_c; i++) { 1199 if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region)) 1200 continue; /* Skip holes */ 1201 1202 if (!(meminfo->mem[i].limit)) 1203 meminfo->mem[i].limit = 1204 i < meminfo->mem_c - 1 ? 1205 meminfo->mem[i + 1].base - 1 : ~0; 1206 1207 if (meminfo->mem[i].idx == idx) { 1208 /* Check if the region exists in @mem_type memory */ 1209 if (meminfo->mem[i].base < meminfo->avail[mc].base && 1210 meminfo->mem[i].limit < meminfo->avail[mc].base) 1211 return -EINVAL; 1212 1213 if (meminfo->mem[i].base > meminfo->avail[mc].limit) 1214 return -EINVAL; 1215 1216 memcpy(mem_desc, &meminfo->mem[i], 1217 sizeof(struct cudbg_mem_desc)); 1218 found = 1; 1219 break; 1220 } 1221 } 1222 if (!found) 1223 return -EINVAL; 1224 1225 return 0; 1226 } 1227 1228 /* Fetch and update the start and end of the requested memory region w.r.t 0 1229 * in the corresponding EDC/MC/HMA. 1230 */ 1231 static int cudbg_get_mem_relative(struct adapter *padap, 1232 struct cudbg_meminfo *meminfo, 1233 u8 mem_type, u32 *out_base, u32 *out_end) 1234 { 1235 u8 mc_idx; 1236 int rc; 1237 1238 rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx); 1239 if (rc) 1240 return rc; 1241 1242 if (*out_base < meminfo->avail[mc_idx].base) 1243 *out_base = 0; 1244 else 1245 *out_base -= meminfo->avail[mc_idx].base; 1246 1247 if (*out_end > meminfo->avail[mc_idx].limit) 1248 *out_end = meminfo->avail[mc_idx].limit; 1249 else 1250 *out_end -= meminfo->avail[mc_idx].base; 1251 1252 return 0; 1253 } 1254 1255 /* Get TX and RX Payload region */ 1256 static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type, 1257 const char *region_name, 1258 struct cudbg_region_info *payload) 1259 { 1260 struct cudbg_mem_desc mem_desc = { 0 }; 1261 struct cudbg_meminfo meminfo; 1262 int rc; 1263 1264 rc = cudbg_fill_meminfo(padap, &meminfo); 1265 if (rc) 1266 return rc; 1267 1268 rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name, 1269 &mem_desc); 1270 if (rc) { 1271 payload->exist = false; 1272 return 0; 1273 } 1274 1275 payload->exist = true; 1276 payload->start = mem_desc.base; 1277 payload->end = mem_desc.limit; 1278 1279 return cudbg_get_mem_relative(padap, &meminfo, mem_type, 1280 &payload->start, &payload->end); 1281 } 1282 1283 static int cudbg_memory_read(struct cudbg_init *pdbg_init, int win, 1284 int mtype, u32 addr, u32 len, void *hbuf) 1285 { 1286 u32 win_pf, memoffset, mem_aperture, mem_base; 1287 struct adapter *adap = pdbg_init->adap; 1288 u32 pos, offset, resid; 1289 u32 *res_buf; 1290 u64 *buf; 1291 int ret; 1292 1293 /* Argument sanity checks ... 1294 */ 1295 if (addr & 0x3 || (uintptr_t)hbuf & 0x3) 1296 return -EINVAL; 1297 1298 buf = (u64 *)hbuf; 1299 1300 /* Try to do 64-bit reads. Residual will be handled later. */ 1301 resid = len & 0x7; 1302 len -= resid; 1303 1304 ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base, 1305 &mem_aperture); 1306 if (ret) 1307 return ret; 1308 1309 addr = addr + memoffset; 1310 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf); 1311 1312 pos = addr & ~(mem_aperture - 1); 1313 offset = addr - pos; 1314 1315 /* Set up initial PCI-E Memory Window to cover the start of our 1316 * transfer. 1317 */ 1318 t4_memory_update_win(adap, win, pos | win_pf); 1319 1320 /* Transfer data from the adapter */ 1321 while (len > 0) { 1322 *buf++ = le64_to_cpu((__force __le64) 1323 t4_read_reg64(adap, mem_base + offset)); 1324 offset += sizeof(u64); 1325 len -= sizeof(u64); 1326 1327 /* If we've reached the end of our current window aperture, 1328 * move the PCI-E Memory Window on to the next. 1329 */ 1330 if (offset == mem_aperture) { 1331 pos += mem_aperture; 1332 offset = 0; 1333 t4_memory_update_win(adap, win, pos | win_pf); 1334 } 1335 } 1336 1337 res_buf = (u32 *)buf; 1338 /* Read residual in 32-bit multiples */ 1339 while (resid > sizeof(u32)) { 1340 *res_buf++ = le32_to_cpu((__force __le32) 1341 t4_read_reg(adap, mem_base + offset)); 1342 offset += sizeof(u32); 1343 resid -= sizeof(u32); 1344 1345 /* If we've reached the end of our current window aperture, 1346 * move the PCI-E Memory Window on to the next. 1347 */ 1348 if (offset == mem_aperture) { 1349 pos += mem_aperture; 1350 offset = 0; 1351 t4_memory_update_win(adap, win, pos | win_pf); 1352 } 1353 } 1354 1355 /* Transfer residual < 32-bits */ 1356 if (resid) 1357 t4_memory_rw_residual(adap, resid, mem_base + offset, 1358 (u8 *)res_buf, T4_MEMORY_READ); 1359 1360 return 0; 1361 } 1362 1363 #define CUDBG_YIELD_ITERATION 256 1364 1365 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, 1366 struct cudbg_buffer *dbg_buff, u8 mem_type, 1367 unsigned long tot_len, 1368 struct cudbg_error *cudbg_err) 1369 { 1370 static const char * const region_name[] = { "Tx payload:", 1371 "Rx payload:" }; 1372 unsigned long bytes, bytes_left, bytes_read = 0; 1373 struct adapter *padap = pdbg_init->adap; 1374 struct cudbg_buffer temp_buff = { 0 }; 1375 struct cudbg_region_info payload[2]; 1376 u32 yield_count = 0; 1377 int rc = 0; 1378 u8 i; 1379 1380 /* Get TX/RX Payload region range if they exist */ 1381 memset(payload, 0, sizeof(payload)); 1382 for (i = 0; i < ARRAY_SIZE(region_name); i++) { 1383 rc = cudbg_get_payload_range(padap, mem_type, region_name[i], 1384 &payload[i]); 1385 if (rc) 1386 return rc; 1387 1388 if (payload[i].exist) { 1389 /* Align start and end to avoid wrap around */ 1390 payload[i].start = roundup(payload[i].start, 1391 CUDBG_CHUNK_SIZE); 1392 payload[i].end = rounddown(payload[i].end, 1393 CUDBG_CHUNK_SIZE); 1394 } 1395 } 1396 1397 bytes_left = tot_len; 1398 while (bytes_left > 0) { 1399 /* As MC size is huge and read through PIO access, this 1400 * loop will hold cpu for a longer time. OS may think that 1401 * the process is hanged and will generate CPU stall traces. 1402 * So yield the cpu regularly. 1403 */ 1404 yield_count++; 1405 if (!(yield_count % CUDBG_YIELD_ITERATION)) 1406 schedule(); 1407 1408 bytes = min_t(unsigned long, bytes_left, 1409 (unsigned long)CUDBG_CHUNK_SIZE); 1410 rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff); 1411 if (rc) 1412 return rc; 1413 1414 for (i = 0; i < ARRAY_SIZE(payload); i++) 1415 if (payload[i].exist && 1416 bytes_read >= payload[i].start && 1417 bytes_read + bytes <= payload[i].end) 1418 /* TX and RX Payload regions can't overlap */ 1419 goto skip_read; 1420 1421 spin_lock(&padap->win0_lock); 1422 rc = cudbg_memory_read(pdbg_init, MEMWIN_NIC, mem_type, 1423 bytes_read, bytes, temp_buff.data); 1424 spin_unlock(&padap->win0_lock); 1425 if (rc) { 1426 cudbg_err->sys_err = rc; 1427 cudbg_put_buff(pdbg_init, &temp_buff); 1428 return rc; 1429 } 1430 1431 skip_read: 1432 bytes_left -= bytes; 1433 bytes_read += bytes; 1434 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff, 1435 dbg_buff); 1436 if (rc) { 1437 cudbg_put_buff(pdbg_init, &temp_buff); 1438 return rc; 1439 } 1440 } 1441 return rc; 1442 } 1443 1444 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, 1445 struct cudbg_error *cudbg_err) 1446 { 1447 struct adapter *padap = pdbg_init->adap; 1448 int rc; 1449 1450 if (is_fw_attached(pdbg_init)) { 1451 /* Flush uP dcache before reading edcX/mcX */ 1452 rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH); 1453 if (rc) 1454 cudbg_err->sys_warn = rc; 1455 } 1456 } 1457 1458 static int cudbg_mem_region_size(struct cudbg_init *pdbg_init, 1459 struct cudbg_error *cudbg_err, 1460 u8 mem_type, unsigned long *region_size) 1461 { 1462 struct adapter *padap = pdbg_init->adap; 1463 struct cudbg_meminfo mem_info; 1464 u8 mc_idx; 1465 int rc; 1466 1467 memset(&mem_info, 0, sizeof(struct cudbg_meminfo)); 1468 rc = cudbg_fill_meminfo(padap, &mem_info); 1469 if (rc) { 1470 cudbg_err->sys_err = rc; 1471 return rc; 1472 } 1473 1474 cudbg_t4_fwcache(pdbg_init, cudbg_err); 1475 rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx); 1476 if (rc) { 1477 cudbg_err->sys_err = rc; 1478 return rc; 1479 } 1480 1481 if (region_size) 1482 *region_size = mem_info.avail[mc_idx].limit - 1483 mem_info.avail[mc_idx].base; 1484 1485 return 0; 1486 } 1487 1488 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, 1489 struct cudbg_buffer *dbg_buff, 1490 struct cudbg_error *cudbg_err, 1491 u8 mem_type) 1492 { 1493 unsigned long size = 0; 1494 int rc; 1495 1496 rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size); 1497 if (rc) 1498 return rc; 1499 1500 return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size, 1501 cudbg_err); 1502 } 1503 1504 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, 1505 struct cudbg_buffer *dbg_buff, 1506 struct cudbg_error *cudbg_err) 1507 { 1508 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, 1509 MEM_EDC0); 1510 } 1511 1512 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, 1513 struct cudbg_buffer *dbg_buff, 1514 struct cudbg_error *cudbg_err) 1515 { 1516 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, 1517 MEM_EDC1); 1518 } 1519 1520 int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init, 1521 struct cudbg_buffer *dbg_buff, 1522 struct cudbg_error *cudbg_err) 1523 { 1524 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, 1525 MEM_MC0); 1526 } 1527 1528 int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init, 1529 struct cudbg_buffer *dbg_buff, 1530 struct cudbg_error *cudbg_err) 1531 { 1532 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, 1533 MEM_MC1); 1534 } 1535 1536 int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init, 1537 struct cudbg_buffer *dbg_buff, 1538 struct cudbg_error *cudbg_err) 1539 { 1540 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, 1541 MEM_HMA); 1542 } 1543 1544 int cudbg_collect_rss(struct cudbg_init *pdbg_init, 1545 struct cudbg_buffer *dbg_buff, 1546 struct cudbg_error *cudbg_err) 1547 { 1548 struct adapter *padap = pdbg_init->adap; 1549 struct cudbg_buffer temp_buff = { 0 }; 1550 int rc, nentries; 1551 1552 nentries = t4_chip_rss_size(padap); 1553 rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16), 1554 &temp_buff); 1555 if (rc) 1556 return rc; 1557 1558 rc = t4_read_rss(padap, (u16 *)temp_buff.data); 1559 if (rc) { 1560 cudbg_err->sys_err = rc; 1561 cudbg_put_buff(pdbg_init, &temp_buff); 1562 return rc; 1563 } 1564 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1565 } 1566 1567 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init, 1568 struct cudbg_buffer *dbg_buff, 1569 struct cudbg_error *cudbg_err) 1570 { 1571 struct adapter *padap = pdbg_init->adap; 1572 struct cudbg_buffer temp_buff = { 0 }; 1573 struct cudbg_rss_vf_conf *vfconf; 1574 int vf, rc, vf_count; 1575 1576 vf_count = padap->params.arch.vfcount; 1577 rc = cudbg_get_buff(pdbg_init, dbg_buff, 1578 vf_count * sizeof(struct cudbg_rss_vf_conf), 1579 &temp_buff); 1580 if (rc) 1581 return rc; 1582 1583 vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data; 1584 for (vf = 0; vf < vf_count; vf++) 1585 t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl, 1586 &vfconf[vf].rss_vf_vfh, true); 1587 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1588 } 1589 1590 int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init, 1591 struct cudbg_buffer *dbg_buff, 1592 struct cudbg_error *cudbg_err) 1593 { 1594 struct adapter *padap = pdbg_init->adap; 1595 struct cudbg_buffer temp_buff = { 0 }; 1596 int rc; 1597 1598 rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16), 1599 &temp_buff); 1600 if (rc) 1601 return rc; 1602 1603 t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL); 1604 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1605 } 1606 1607 int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init, 1608 struct cudbg_buffer *dbg_buff, 1609 struct cudbg_error *cudbg_err) 1610 { 1611 struct adapter *padap = pdbg_init->adap; 1612 struct cudbg_buffer temp_buff = { 0 }; 1613 struct cudbg_pm_stats *pm_stats_buff; 1614 int rc; 1615 1616 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats), 1617 &temp_buff); 1618 if (rc) 1619 return rc; 1620 1621 pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data; 1622 t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc); 1623 t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc); 1624 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1625 } 1626 1627 int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init, 1628 struct cudbg_buffer *dbg_buff, 1629 struct cudbg_error *cudbg_err) 1630 { 1631 struct adapter *padap = pdbg_init->adap; 1632 struct cudbg_buffer temp_buff = { 0 }; 1633 struct cudbg_hw_sched *hw_sched_buff; 1634 int i, rc = 0; 1635 1636 if (!padap->params.vpd.cclk) 1637 return CUDBG_STATUS_CCLK_NOT_DEFINED; 1638 1639 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched), 1640 &temp_buff); 1641 1642 if (rc) 1643 return rc; 1644 1645 hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data; 1646 hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A); 1647 hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A)); 1648 t4_read_pace_tbl(padap, hw_sched_buff->pace_tab); 1649 for (i = 0; i < NTX_SCHED; ++i) 1650 t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i], 1651 &hw_sched_buff->ipg[i], true); 1652 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1653 } 1654 1655 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, 1656 struct cudbg_buffer *dbg_buff, 1657 struct cudbg_error *cudbg_err) 1658 { 1659 struct adapter *padap = pdbg_init->adap; 1660 struct cudbg_buffer temp_buff = { 0 }; 1661 struct ireg_buf *ch_tp_pio; 1662 int i, rc, n = 0; 1663 u32 size; 1664 1665 if (is_t5(padap->params.chip)) 1666 n = sizeof(t5_tp_pio_array) + 1667 sizeof(t5_tp_tm_pio_array) + 1668 sizeof(t5_tp_mib_index_array); 1669 else 1670 n = sizeof(t6_tp_pio_array) + 1671 sizeof(t6_tp_tm_pio_array) + 1672 sizeof(t6_tp_mib_index_array); 1673 1674 n = n / (IREG_NUM_ELEM * sizeof(u32)); 1675 size = sizeof(struct ireg_buf) * n; 1676 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 1677 if (rc) 1678 return rc; 1679 1680 ch_tp_pio = (struct ireg_buf *)temp_buff.data; 1681 1682 /* TP_PIO */ 1683 if (is_t5(padap->params.chip)) 1684 n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); 1685 else if (is_t6(padap->params.chip)) 1686 n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); 1687 1688 for (i = 0; i < n; i++) { 1689 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; 1690 u32 *buff = ch_tp_pio->outbuf; 1691 1692 if (is_t5(padap->params.chip)) { 1693 tp_pio->ireg_addr = t5_tp_pio_array[i][0]; 1694 tp_pio->ireg_data = t5_tp_pio_array[i][1]; 1695 tp_pio->ireg_local_offset = t5_tp_pio_array[i][2]; 1696 tp_pio->ireg_offset_range = t5_tp_pio_array[i][3]; 1697 } else if (is_t6(padap->params.chip)) { 1698 tp_pio->ireg_addr = t6_tp_pio_array[i][0]; 1699 tp_pio->ireg_data = t6_tp_pio_array[i][1]; 1700 tp_pio->ireg_local_offset = t6_tp_pio_array[i][2]; 1701 tp_pio->ireg_offset_range = t6_tp_pio_array[i][3]; 1702 } 1703 t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range, 1704 tp_pio->ireg_local_offset, true); 1705 ch_tp_pio++; 1706 } 1707 1708 /* TP_TM_PIO */ 1709 if (is_t5(padap->params.chip)) 1710 n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); 1711 else if (is_t6(padap->params.chip)) 1712 n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); 1713 1714 for (i = 0; i < n; i++) { 1715 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; 1716 u32 *buff = ch_tp_pio->outbuf; 1717 1718 if (is_t5(padap->params.chip)) { 1719 tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0]; 1720 tp_pio->ireg_data = t5_tp_tm_pio_array[i][1]; 1721 tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2]; 1722 tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3]; 1723 } else if (is_t6(padap->params.chip)) { 1724 tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0]; 1725 tp_pio->ireg_data = t6_tp_tm_pio_array[i][1]; 1726 tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2]; 1727 tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3]; 1728 } 1729 t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range, 1730 tp_pio->ireg_local_offset, true); 1731 ch_tp_pio++; 1732 } 1733 1734 /* TP_MIB_INDEX */ 1735 if (is_t5(padap->params.chip)) 1736 n = sizeof(t5_tp_mib_index_array) / 1737 (IREG_NUM_ELEM * sizeof(u32)); 1738 else if (is_t6(padap->params.chip)) 1739 n = sizeof(t6_tp_mib_index_array) / 1740 (IREG_NUM_ELEM * sizeof(u32)); 1741 1742 for (i = 0; i < n ; i++) { 1743 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; 1744 u32 *buff = ch_tp_pio->outbuf; 1745 1746 if (is_t5(padap->params.chip)) { 1747 tp_pio->ireg_addr = t5_tp_mib_index_array[i][0]; 1748 tp_pio->ireg_data = t5_tp_mib_index_array[i][1]; 1749 tp_pio->ireg_local_offset = 1750 t5_tp_mib_index_array[i][2]; 1751 tp_pio->ireg_offset_range = 1752 t5_tp_mib_index_array[i][3]; 1753 } else if (is_t6(padap->params.chip)) { 1754 tp_pio->ireg_addr = t6_tp_mib_index_array[i][0]; 1755 tp_pio->ireg_data = t6_tp_mib_index_array[i][1]; 1756 tp_pio->ireg_local_offset = 1757 t6_tp_mib_index_array[i][2]; 1758 tp_pio->ireg_offset_range = 1759 t6_tp_mib_index_array[i][3]; 1760 } 1761 t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range, 1762 tp_pio->ireg_local_offset, true); 1763 ch_tp_pio++; 1764 } 1765 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1766 } 1767 1768 static void cudbg_read_sge_qbase_indirect_reg(struct adapter *padap, 1769 struct sge_qbase_reg_field *qbase, 1770 u32 func, bool is_pf) 1771 { 1772 u32 *buff, i; 1773 1774 if (is_pf) { 1775 buff = qbase->pf_data_value[func]; 1776 } else { 1777 buff = qbase->vf_data_value[func]; 1778 /* In SGE_QBASE_INDEX, 1779 * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256. 1780 */ 1781 func += 8; 1782 } 1783 1784 t4_write_reg(padap, qbase->reg_addr, func); 1785 for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++, buff++) 1786 *buff = t4_read_reg(padap, qbase->reg_data[i]); 1787 } 1788 1789 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, 1790 struct cudbg_buffer *dbg_buff, 1791 struct cudbg_error *cudbg_err) 1792 { 1793 struct adapter *padap = pdbg_init->adap; 1794 struct cudbg_buffer temp_buff = { 0 }; 1795 struct sge_qbase_reg_field *sge_qbase; 1796 struct ireg_buf *ch_sge_dbg; 1797 int i, rc; 1798 1799 rc = cudbg_get_buff(pdbg_init, dbg_buff, 1800 sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase), 1801 &temp_buff); 1802 if (rc) 1803 return rc; 1804 1805 ch_sge_dbg = (struct ireg_buf *)temp_buff.data; 1806 for (i = 0; i < 2; i++) { 1807 struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio; 1808 u32 *buff = ch_sge_dbg->outbuf; 1809 1810 sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0]; 1811 sge_pio->ireg_data = t5_sge_dbg_index_array[i][1]; 1812 sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2]; 1813 sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3]; 1814 t4_read_indirect(padap, 1815 sge_pio->ireg_addr, 1816 sge_pio->ireg_data, 1817 buff, 1818 sge_pio->ireg_offset_range, 1819 sge_pio->ireg_local_offset); 1820 ch_sge_dbg++; 1821 } 1822 1823 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) { 1824 sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg; 1825 /* 1 addr reg SGE_QBASE_INDEX and 4 data reg 1826 * SGE_QBASE_MAP[0-3] 1827 */ 1828 sge_qbase->reg_addr = t6_sge_qbase_index_array[0]; 1829 for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++) 1830 sge_qbase->reg_data[i] = 1831 t6_sge_qbase_index_array[i + 1]; 1832 1833 for (i = 0; i <= PCIE_FW_MASTER_M; i++) 1834 cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase, 1835 i, true); 1836 1837 for (i = 0; i < padap->params.arch.vfcount; i++) 1838 cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase, 1839 i, false); 1840 1841 sge_qbase->vfcount = padap->params.arch.vfcount; 1842 } 1843 1844 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1845 } 1846 1847 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init, 1848 struct cudbg_buffer *dbg_buff, 1849 struct cudbg_error *cudbg_err) 1850 { 1851 struct adapter *padap = pdbg_init->adap; 1852 struct cudbg_buffer temp_buff = { 0 }; 1853 struct cudbg_ulprx_la *ulprx_la_buff; 1854 int rc; 1855 1856 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la), 1857 &temp_buff); 1858 if (rc) 1859 return rc; 1860 1861 ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data; 1862 t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data); 1863 ulprx_la_buff->size = ULPRX_LA_SIZE; 1864 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1865 } 1866 1867 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init, 1868 struct cudbg_buffer *dbg_buff, 1869 struct cudbg_error *cudbg_err) 1870 { 1871 struct adapter *padap = pdbg_init->adap; 1872 struct cudbg_buffer temp_buff = { 0 }; 1873 struct cudbg_tp_la *tp_la_buff; 1874 int size, rc; 1875 1876 size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); 1877 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 1878 if (rc) 1879 return rc; 1880 1881 tp_la_buff = (struct cudbg_tp_la *)temp_buff.data; 1882 tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A)); 1883 t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL); 1884 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1885 } 1886 1887 int cudbg_collect_meminfo(struct cudbg_init *pdbg_init, 1888 struct cudbg_buffer *dbg_buff, 1889 struct cudbg_error *cudbg_err) 1890 { 1891 struct adapter *padap = pdbg_init->adap; 1892 struct cudbg_buffer temp_buff = { 0 }; 1893 struct cudbg_meminfo *meminfo_buff; 1894 struct cudbg_ver_hdr *ver_hdr; 1895 int rc; 1896 1897 rc = cudbg_get_buff(pdbg_init, dbg_buff, 1898 sizeof(struct cudbg_ver_hdr) + 1899 sizeof(struct cudbg_meminfo), 1900 &temp_buff); 1901 if (rc) 1902 return rc; 1903 1904 ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data; 1905 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; 1906 ver_hdr->revision = CUDBG_MEMINFO_REV; 1907 ver_hdr->size = sizeof(struct cudbg_meminfo); 1908 1909 meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data + 1910 sizeof(*ver_hdr)); 1911 rc = cudbg_fill_meminfo(padap, meminfo_buff); 1912 if (rc) { 1913 cudbg_err->sys_err = rc; 1914 cudbg_put_buff(pdbg_init, &temp_buff); 1915 return rc; 1916 } 1917 1918 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1919 } 1920 1921 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, 1922 struct cudbg_buffer *dbg_buff, 1923 struct cudbg_error *cudbg_err) 1924 { 1925 struct cudbg_cim_pif_la *cim_pif_la_buff; 1926 struct adapter *padap = pdbg_init->adap; 1927 struct cudbg_buffer temp_buff = { 0 }; 1928 int size, rc; 1929 1930 size = sizeof(struct cudbg_cim_pif_la) + 1931 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); 1932 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 1933 if (rc) 1934 return rc; 1935 1936 cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data; 1937 cim_pif_la_buff->size = CIM_PIFLA_SIZE; 1938 t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data, 1939 (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE, 1940 NULL, NULL); 1941 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1942 } 1943 1944 int cudbg_collect_clk_info(struct cudbg_init *pdbg_init, 1945 struct cudbg_buffer *dbg_buff, 1946 struct cudbg_error *cudbg_err) 1947 { 1948 struct adapter *padap = pdbg_init->adap; 1949 struct cudbg_buffer temp_buff = { 0 }; 1950 struct cudbg_clk_info *clk_info_buff; 1951 u64 tp_tick_us; 1952 int rc; 1953 1954 if (!padap->params.vpd.cclk) 1955 return CUDBG_STATUS_CCLK_NOT_DEFINED; 1956 1957 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info), 1958 &temp_buff); 1959 if (rc) 1960 return rc; 1961 1962 clk_info_buff = (struct cudbg_clk_info *)temp_buff.data; 1963 clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */ 1964 clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A); 1965 clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res); 1966 clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res); 1967 tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000; 1968 1969 clk_info_buff->dack_timer = 1970 (clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 * 1971 t4_read_reg(padap, TP_DACK_TIMER_A); 1972 clk_info_buff->retransmit_min = 1973 tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A); 1974 clk_info_buff->retransmit_max = 1975 tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A); 1976 clk_info_buff->persist_timer_min = 1977 tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A); 1978 clk_info_buff->persist_timer_max = 1979 tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A); 1980 clk_info_buff->keepalive_idle_timer = 1981 tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A); 1982 clk_info_buff->keepalive_interval = 1983 tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A); 1984 clk_info_buff->initial_srtt = 1985 tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A)); 1986 clk_info_buff->finwait2_timer = 1987 tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A); 1988 1989 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 1990 } 1991 1992 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, 1993 struct cudbg_buffer *dbg_buff, 1994 struct cudbg_error *cudbg_err) 1995 { 1996 struct adapter *padap = pdbg_init->adap; 1997 struct cudbg_buffer temp_buff = { 0 }; 1998 struct ireg_buf *ch_pcie; 1999 int i, rc, n; 2000 u32 size; 2001 2002 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); 2003 size = sizeof(struct ireg_buf) * n * 2; 2004 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2005 if (rc) 2006 return rc; 2007 2008 ch_pcie = (struct ireg_buf *)temp_buff.data; 2009 /* PCIE_PDBG */ 2010 for (i = 0; i < n; i++) { 2011 struct ireg_field *pcie_pio = &ch_pcie->tp_pio; 2012 u32 *buff = ch_pcie->outbuf; 2013 2014 pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0]; 2015 pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1]; 2016 pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2]; 2017 pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3]; 2018 t4_read_indirect(padap, 2019 pcie_pio->ireg_addr, 2020 pcie_pio->ireg_data, 2021 buff, 2022 pcie_pio->ireg_offset_range, 2023 pcie_pio->ireg_local_offset); 2024 ch_pcie++; 2025 } 2026 2027 /* PCIE_CDBG */ 2028 n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); 2029 for (i = 0; i < n; i++) { 2030 struct ireg_field *pcie_pio = &ch_pcie->tp_pio; 2031 u32 *buff = ch_pcie->outbuf; 2032 2033 pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0]; 2034 pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1]; 2035 pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2]; 2036 pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3]; 2037 t4_read_indirect(padap, 2038 pcie_pio->ireg_addr, 2039 pcie_pio->ireg_data, 2040 buff, 2041 pcie_pio->ireg_offset_range, 2042 pcie_pio->ireg_local_offset); 2043 ch_pcie++; 2044 } 2045 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2046 } 2047 2048 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, 2049 struct cudbg_buffer *dbg_buff, 2050 struct cudbg_error *cudbg_err) 2051 { 2052 struct adapter *padap = pdbg_init->adap; 2053 struct cudbg_buffer temp_buff = { 0 }; 2054 struct ireg_buf *ch_pm; 2055 int i, rc, n; 2056 u32 size; 2057 2058 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); 2059 size = sizeof(struct ireg_buf) * n * 2; 2060 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2061 if (rc) 2062 return rc; 2063 2064 ch_pm = (struct ireg_buf *)temp_buff.data; 2065 /* PM_RX */ 2066 for (i = 0; i < n; i++) { 2067 struct ireg_field *pm_pio = &ch_pm->tp_pio; 2068 u32 *buff = ch_pm->outbuf; 2069 2070 pm_pio->ireg_addr = t5_pm_rx_array[i][0]; 2071 pm_pio->ireg_data = t5_pm_rx_array[i][1]; 2072 pm_pio->ireg_local_offset = t5_pm_rx_array[i][2]; 2073 pm_pio->ireg_offset_range = t5_pm_rx_array[i][3]; 2074 t4_read_indirect(padap, 2075 pm_pio->ireg_addr, 2076 pm_pio->ireg_data, 2077 buff, 2078 pm_pio->ireg_offset_range, 2079 pm_pio->ireg_local_offset); 2080 ch_pm++; 2081 } 2082 2083 /* PM_TX */ 2084 n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32)); 2085 for (i = 0; i < n; i++) { 2086 struct ireg_field *pm_pio = &ch_pm->tp_pio; 2087 u32 *buff = ch_pm->outbuf; 2088 2089 pm_pio->ireg_addr = t5_pm_tx_array[i][0]; 2090 pm_pio->ireg_data = t5_pm_tx_array[i][1]; 2091 pm_pio->ireg_local_offset = t5_pm_tx_array[i][2]; 2092 pm_pio->ireg_offset_range = t5_pm_tx_array[i][3]; 2093 t4_read_indirect(padap, 2094 pm_pio->ireg_addr, 2095 pm_pio->ireg_data, 2096 buff, 2097 pm_pio->ireg_offset_range, 2098 pm_pio->ireg_local_offset); 2099 ch_pm++; 2100 } 2101 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2102 } 2103 2104 int cudbg_collect_tid(struct cudbg_init *pdbg_init, 2105 struct cudbg_buffer *dbg_buff, 2106 struct cudbg_error *cudbg_err) 2107 { 2108 struct adapter *padap = pdbg_init->adap; 2109 struct cudbg_tid_info_region_rev1 *tid1; 2110 struct cudbg_buffer temp_buff = { 0 }; 2111 struct cudbg_tid_info_region *tid; 2112 u32 para[2], val[2]; 2113 int rc; 2114 2115 rc = cudbg_get_buff(pdbg_init, dbg_buff, 2116 sizeof(struct cudbg_tid_info_region_rev1), 2117 &temp_buff); 2118 if (rc) 2119 return rc; 2120 2121 tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data; 2122 tid = &tid1->tid; 2123 tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE; 2124 tid1->ver_hdr.revision = CUDBG_TID_INFO_REV; 2125 tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) - 2126 sizeof(struct cudbg_ver_hdr); 2127 2128 /* If firmware is not attached/alive, use backdoor register 2129 * access to collect dump. 2130 */ 2131 if (!is_fw_attached(pdbg_init)) 2132 goto fill_tid; 2133 2134 #define FW_PARAM_PFVF_A(param) \ 2135 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ 2136 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \ 2137 FW_PARAMS_PARAM_Y_V(0) | \ 2138 FW_PARAMS_PARAM_Z_V(0)) 2139 2140 para[0] = FW_PARAM_PFVF_A(ETHOFLD_START); 2141 para[1] = FW_PARAM_PFVF_A(ETHOFLD_END); 2142 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val); 2143 if (rc < 0) { 2144 cudbg_err->sys_err = rc; 2145 cudbg_put_buff(pdbg_init, &temp_buff); 2146 return rc; 2147 } 2148 tid->uotid_base = val[0]; 2149 tid->nuotids = val[1] - val[0] + 1; 2150 2151 if (is_t5(padap->params.chip)) { 2152 tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4; 2153 } else if (is_t6(padap->params.chip)) { 2154 tid1->tid_start = 2155 t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A); 2156 tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A); 2157 2158 para[0] = FW_PARAM_PFVF_A(HPFILTER_START); 2159 para[1] = FW_PARAM_PFVF_A(HPFILTER_END); 2160 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, 2161 para, val); 2162 if (rc < 0) { 2163 cudbg_err->sys_err = rc; 2164 cudbg_put_buff(pdbg_init, &temp_buff); 2165 return rc; 2166 } 2167 tid->hpftid_base = val[0]; 2168 tid->nhpftids = val[1] - val[0] + 1; 2169 } 2170 2171 #undef FW_PARAM_PFVF_A 2172 2173 fill_tid: 2174 tid->ntids = padap->tids.ntids; 2175 tid->nstids = padap->tids.nstids; 2176 tid->stid_base = padap->tids.stid_base; 2177 tid->hash_base = padap->tids.hash_base; 2178 2179 tid->natids = padap->tids.natids; 2180 tid->nftids = padap->tids.nftids; 2181 tid->ftid_base = padap->tids.ftid_base; 2182 tid->aftid_base = padap->tids.aftid_base; 2183 tid->aftid_end = padap->tids.aftid_end; 2184 2185 tid->sftid_base = padap->tids.sftid_base; 2186 tid->nsftids = padap->tids.nsftids; 2187 2188 tid->flags = padap->flags; 2189 tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A); 2190 tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A); 2191 tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A); 2192 2193 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2194 } 2195 2196 int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init, 2197 struct cudbg_buffer *dbg_buff, 2198 struct cudbg_error *cudbg_err) 2199 { 2200 struct adapter *padap = pdbg_init->adap; 2201 struct cudbg_buffer temp_buff = { 0 }; 2202 u32 size, *value, j; 2203 int i, rc, n; 2204 2205 size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; 2206 n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32)); 2207 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2208 if (rc) 2209 return rc; 2210 2211 value = (u32 *)temp_buff.data; 2212 for (i = 0; i < n; i++) { 2213 for (j = t5_pcie_config_array[i][0]; 2214 j <= t5_pcie_config_array[i][1]; j += 4) { 2215 t4_hw_pci_read_cfg4(padap, j, value); 2216 value++; 2217 } 2218 } 2219 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2220 } 2221 2222 static int cudbg_sge_ctxt_check_valid(u32 *buf, int type) 2223 { 2224 int index, bit, bit_pos = 0; 2225 2226 switch (type) { 2227 case CTXT_EGRESS: 2228 bit_pos = 176; 2229 break; 2230 case CTXT_INGRESS: 2231 bit_pos = 141; 2232 break; 2233 case CTXT_FLM: 2234 bit_pos = 89; 2235 break; 2236 } 2237 index = bit_pos / 32; 2238 bit = bit_pos % 32; 2239 return buf[index] & (1U << bit); 2240 } 2241 2242 static int cudbg_get_ctxt_region_info(struct adapter *padap, 2243 struct cudbg_region_info *ctx_info, 2244 u8 *mem_type) 2245 { 2246 struct cudbg_mem_desc mem_desc; 2247 struct cudbg_meminfo meminfo; 2248 u32 i, j, value, found; 2249 u8 flq; 2250 int rc; 2251 2252 rc = cudbg_fill_meminfo(padap, &meminfo); 2253 if (rc) 2254 return rc; 2255 2256 /* Get EGRESS and INGRESS context region size */ 2257 for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) { 2258 found = 0; 2259 memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc)); 2260 for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) { 2261 rc = cudbg_get_mem_region(padap, &meminfo, j, 2262 cudbg_region[i], 2263 &mem_desc); 2264 if (!rc) { 2265 found = 1; 2266 rc = cudbg_get_mem_relative(padap, &meminfo, j, 2267 &mem_desc.base, 2268 &mem_desc.limit); 2269 if (rc) { 2270 ctx_info[i].exist = false; 2271 break; 2272 } 2273 ctx_info[i].exist = true; 2274 ctx_info[i].start = mem_desc.base; 2275 ctx_info[i].end = mem_desc.limit; 2276 mem_type[i] = j; 2277 break; 2278 } 2279 } 2280 if (!found) 2281 ctx_info[i].exist = false; 2282 } 2283 2284 /* Get FLM and CNM max qid. */ 2285 value = t4_read_reg(padap, SGE_FLM_CFG_A); 2286 2287 /* Get number of data freelist queues */ 2288 flq = HDRSTARTFLQ_G(value); 2289 ctx_info[CTXT_FLM].exist = true; 2290 ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE; 2291 2292 /* The number of CONM contexts are same as number of freelist 2293 * queues. 2294 */ 2295 ctx_info[CTXT_CNM].exist = true; 2296 ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end; 2297 2298 return 0; 2299 } 2300 2301 int cudbg_dump_context_size(struct adapter *padap) 2302 { 2303 struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} }; 2304 u8 mem_type[CTXT_INGRESS + 1] = { 0 }; 2305 u32 i, size = 0; 2306 int rc; 2307 2308 /* Get max valid qid for each type of queue */ 2309 rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type); 2310 if (rc) 2311 return rc; 2312 2313 for (i = 0; i < CTXT_CNM; i++) { 2314 if (!region_info[i].exist) { 2315 if (i == CTXT_EGRESS || i == CTXT_INGRESS) 2316 size += CUDBG_LOWMEM_MAX_CTXT_QIDS * 2317 SGE_CTXT_SIZE; 2318 continue; 2319 } 2320 2321 size += (region_info[i].end - region_info[i].start + 1) / 2322 SGE_CTXT_SIZE; 2323 } 2324 return size * sizeof(struct cudbg_ch_cntxt); 2325 } 2326 2327 static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid, 2328 enum ctxt_type ctype, u32 *data) 2329 { 2330 struct adapter *padap = pdbg_init->adap; 2331 int rc = -1; 2332 2333 /* Under heavy traffic, the SGE Queue contexts registers will be 2334 * frequently accessed by firmware. 2335 * 2336 * To avoid conflicts with firmware, always ask firmware to fetch 2337 * the SGE Queue contexts via mailbox. On failure, fallback to 2338 * accessing hardware registers directly. 2339 */ 2340 if (is_fw_attached(pdbg_init)) 2341 rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data); 2342 if (rc) 2343 t4_sge_ctxt_rd_bd(padap, cid, ctype, data); 2344 } 2345 2346 static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid, 2347 u8 ctxt_type, 2348 struct cudbg_ch_cntxt **out_buff) 2349 { 2350 struct cudbg_ch_cntxt *buff = *out_buff; 2351 int rc; 2352 u32 j; 2353 2354 for (j = 0; j < max_qid; j++) { 2355 cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data); 2356 rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type); 2357 if (!rc) 2358 continue; 2359 2360 buff->cntxt_type = ctxt_type; 2361 buff->cntxt_id = j; 2362 buff++; 2363 if (ctxt_type == CTXT_FLM) { 2364 cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data); 2365 buff->cntxt_type = CTXT_CNM; 2366 buff->cntxt_id = j; 2367 buff++; 2368 } 2369 } 2370 2371 *out_buff = buff; 2372 } 2373 2374 int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, 2375 struct cudbg_buffer *dbg_buff, 2376 struct cudbg_error *cudbg_err) 2377 { 2378 struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} }; 2379 struct adapter *padap = pdbg_init->adap; 2380 u32 j, size, max_ctx_size, max_ctx_qid; 2381 u8 mem_type[CTXT_INGRESS + 1] = { 0 }; 2382 struct cudbg_buffer temp_buff = { 0 }; 2383 struct cudbg_ch_cntxt *buff; 2384 u8 *ctx_buf; 2385 u8 i, k; 2386 int rc; 2387 2388 /* Get max valid qid for each type of queue */ 2389 rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type); 2390 if (rc) 2391 return rc; 2392 2393 rc = cudbg_dump_context_size(padap); 2394 if (rc <= 0) 2395 return CUDBG_STATUS_ENTITY_NOT_FOUND; 2396 2397 size = rc; 2398 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2399 if (rc) 2400 return rc; 2401 2402 /* Get buffer with enough space to read the biggest context 2403 * region in memory. 2404 */ 2405 max_ctx_size = max(region_info[CTXT_EGRESS].end - 2406 region_info[CTXT_EGRESS].start + 1, 2407 region_info[CTXT_INGRESS].end - 2408 region_info[CTXT_INGRESS].start + 1); 2409 2410 ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL); 2411 if (!ctx_buf) { 2412 cudbg_put_buff(pdbg_init, &temp_buff); 2413 return -ENOMEM; 2414 } 2415 2416 buff = (struct cudbg_ch_cntxt *)temp_buff.data; 2417 2418 /* Collect EGRESS and INGRESS context data. 2419 * In case of failures, fallback to collecting via FW or 2420 * backdoor access. 2421 */ 2422 for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) { 2423 if (!region_info[i].exist) { 2424 max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS; 2425 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i, 2426 &buff); 2427 continue; 2428 } 2429 2430 max_ctx_size = region_info[i].end - region_info[i].start + 1; 2431 max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE; 2432 2433 /* If firmware is not attached/alive, use backdoor register 2434 * access to collect dump. 2435 */ 2436 if (is_fw_attached(pdbg_init)) { 2437 t4_sge_ctxt_flush(padap, padap->mbox, i); 2438 2439 rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i], 2440 region_info[i].start, max_ctx_size, 2441 (__be32 *)ctx_buf, 1); 2442 } 2443 2444 if (rc || !is_fw_attached(pdbg_init)) { 2445 max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS; 2446 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i, 2447 &buff); 2448 continue; 2449 } 2450 2451 for (j = 0; j < max_ctx_qid; j++) { 2452 __be64 *dst_off; 2453 u64 *src_off; 2454 2455 src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE); 2456 dst_off = (__be64 *)buff->data; 2457 2458 /* The data is stored in 64-bit cpu order. Convert it 2459 * to big endian before parsing. 2460 */ 2461 for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++) 2462 dst_off[k] = cpu_to_be64(src_off[k]); 2463 2464 rc = cudbg_sge_ctxt_check_valid(buff->data, i); 2465 if (!rc) 2466 continue; 2467 2468 buff->cntxt_type = i; 2469 buff->cntxt_id = j; 2470 buff++; 2471 } 2472 } 2473 2474 kvfree(ctx_buf); 2475 2476 /* Collect FREELIST and CONGESTION MANAGER contexts */ 2477 max_ctx_size = region_info[CTXT_FLM].end - 2478 region_info[CTXT_FLM].start + 1; 2479 max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE; 2480 /* Since FLM and CONM are 1-to-1 mapped, the below function 2481 * will fetch both FLM and CONM contexts. 2482 */ 2483 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff); 2484 2485 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2486 } 2487 2488 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) 2489 { 2490 *mask = x | y; 2491 y = (__force u64)cpu_to_be64(y); 2492 memcpy(addr, (char *)&y + 2, ETH_ALEN); 2493 } 2494 2495 static void cudbg_mps_rpl_backdoor(struct adapter *padap, 2496 struct fw_ldst_mps_rplc *mps_rplc) 2497 { 2498 if (is_t5(padap->params.chip)) { 2499 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap, 2500 MPS_VF_RPLCT_MAP3_A)); 2501 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap, 2502 MPS_VF_RPLCT_MAP2_A)); 2503 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap, 2504 MPS_VF_RPLCT_MAP1_A)); 2505 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap, 2506 MPS_VF_RPLCT_MAP0_A)); 2507 } else { 2508 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap, 2509 MPS_VF_RPLCT_MAP7_A)); 2510 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap, 2511 MPS_VF_RPLCT_MAP6_A)); 2512 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap, 2513 MPS_VF_RPLCT_MAP5_A)); 2514 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap, 2515 MPS_VF_RPLCT_MAP4_A)); 2516 } 2517 mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A)); 2518 mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A)); 2519 mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A)); 2520 mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A)); 2521 } 2522 2523 static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init, 2524 struct cudbg_mps_tcam *tcam, u32 idx) 2525 { 2526 struct adapter *padap = pdbg_init->adap; 2527 u64 tcamy, tcamx, val; 2528 u32 ctl, data2; 2529 int rc = 0; 2530 2531 if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) { 2532 /* CtlReqID - 1: use Host Driver Requester ID 2533 * CtlCmdType - 0: Read, 1: Write 2534 * CtlTcamSel - 0: TCAM0, 1: TCAM1 2535 * CtlXYBitSel- 0: Y bit, 1: X bit 2536 */ 2537 2538 /* Read tcamy */ 2539 ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0); 2540 if (idx < 256) 2541 ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0); 2542 else 2543 ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1); 2544 2545 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); 2546 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A); 2547 tcamy = DMACH_G(val) << 32; 2548 tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A); 2549 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A); 2550 tcam->lookup_type = DATALKPTYPE_G(data2); 2551 2552 /* 0 - Outer header, 1 - Inner header 2553 * [71:48] bit locations are overloaded for 2554 * outer vs. inner lookup types. 2555 */ 2556 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) { 2557 /* Inner header VNI */ 2558 tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2); 2559 tcam->vniy = (tcam->vniy << 16) | VIDL_G(val); 2560 tcam->dip_hit = data2 & DATADIPHIT_F; 2561 } else { 2562 tcam->vlan_vld = data2 & DATAVIDH2_F; 2563 tcam->ivlan = VIDL_G(val); 2564 } 2565 2566 tcam->port_num = DATAPORTNUM_G(data2); 2567 2568 /* Read tcamx. Change the control param */ 2569 ctl |= CTLXYBITSEL_V(1); 2570 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); 2571 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A); 2572 tcamx = DMACH_G(val) << 32; 2573 tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A); 2574 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A); 2575 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) { 2576 /* Inner header VNI mask */ 2577 tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2); 2578 tcam->vnix = (tcam->vnix << 16) | VIDL_G(val); 2579 } 2580 } else { 2581 tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx)); 2582 tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx)); 2583 } 2584 2585 /* If no entry, return */ 2586 if (tcamx & tcamy) 2587 return rc; 2588 2589 tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx)); 2590 tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx)); 2591 2592 if (is_t5(padap->params.chip)) 2593 tcam->repli = (tcam->cls_lo & REPLICATE_F); 2594 else if (is_t6(padap->params.chip)) 2595 tcam->repli = (tcam->cls_lo & T6_REPLICATE_F); 2596 2597 if (tcam->repli) { 2598 struct fw_ldst_cmd ldst_cmd; 2599 struct fw_ldst_mps_rplc mps_rplc; 2600 2601 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 2602 ldst_cmd.op_to_addrspace = 2603 htonl(FW_CMD_OP_V(FW_LDST_CMD) | 2604 FW_CMD_REQUEST_F | FW_CMD_READ_F | 2605 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS)); 2606 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd)); 2607 ldst_cmd.u.mps.rplc.fid_idx = 2608 htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) | 2609 FW_LDST_CMD_IDX_V(idx)); 2610 2611 /* If firmware is not attached/alive, use backdoor register 2612 * access to collect dump. 2613 */ 2614 if (is_fw_attached(pdbg_init)) 2615 rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, 2616 sizeof(ldst_cmd), &ldst_cmd); 2617 2618 if (rc || !is_fw_attached(pdbg_init)) { 2619 cudbg_mps_rpl_backdoor(padap, &mps_rplc); 2620 /* Ignore error since we collected directly from 2621 * reading registers. 2622 */ 2623 rc = 0; 2624 } else { 2625 mps_rplc = ldst_cmd.u.mps.rplc; 2626 } 2627 2628 tcam->rplc[0] = ntohl(mps_rplc.rplc31_0); 2629 tcam->rplc[1] = ntohl(mps_rplc.rplc63_32); 2630 tcam->rplc[2] = ntohl(mps_rplc.rplc95_64); 2631 tcam->rplc[3] = ntohl(mps_rplc.rplc127_96); 2632 if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) { 2633 tcam->rplc[4] = ntohl(mps_rplc.rplc159_128); 2634 tcam->rplc[5] = ntohl(mps_rplc.rplc191_160); 2635 tcam->rplc[6] = ntohl(mps_rplc.rplc223_192); 2636 tcam->rplc[7] = ntohl(mps_rplc.rplc255_224); 2637 } 2638 } 2639 cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask); 2640 tcam->idx = idx; 2641 tcam->rplc_size = padap->params.arch.mps_rplc_size; 2642 return rc; 2643 } 2644 2645 int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, 2646 struct cudbg_buffer *dbg_buff, 2647 struct cudbg_error *cudbg_err) 2648 { 2649 struct adapter *padap = pdbg_init->adap; 2650 struct cudbg_buffer temp_buff = { 0 }; 2651 u32 size = 0, i, n, total_size = 0; 2652 struct cudbg_mps_tcam *tcam; 2653 int rc; 2654 2655 n = padap->params.arch.mps_tcam_size; 2656 size = sizeof(struct cudbg_mps_tcam) * n; 2657 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2658 if (rc) 2659 return rc; 2660 2661 tcam = (struct cudbg_mps_tcam *)temp_buff.data; 2662 for (i = 0; i < n; i++) { 2663 rc = cudbg_collect_tcam_index(pdbg_init, tcam, i); 2664 if (rc) { 2665 cudbg_err->sys_err = rc; 2666 cudbg_put_buff(pdbg_init, &temp_buff); 2667 return rc; 2668 } 2669 total_size += sizeof(struct cudbg_mps_tcam); 2670 tcam++; 2671 } 2672 2673 if (!total_size) { 2674 rc = CUDBG_SYSTEM_ERROR; 2675 cudbg_err->sys_err = rc; 2676 cudbg_put_buff(pdbg_init, &temp_buff); 2677 return rc; 2678 } 2679 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2680 } 2681 2682 int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, 2683 struct cudbg_buffer *dbg_buff, 2684 struct cudbg_error *cudbg_err) 2685 { 2686 struct adapter *padap = pdbg_init->adap; 2687 struct cudbg_buffer temp_buff = { 0 }; 2688 char vpd_str[CUDBG_VPD_VER_LEN + 1]; 2689 u32 scfg_vers, vpd_vers, fw_vers; 2690 struct cudbg_vpd_data *vpd_data; 2691 struct vpd_params vpd = { 0 }; 2692 int rc, ret; 2693 2694 rc = t4_get_raw_vpd_params(padap, &vpd); 2695 if (rc) 2696 return rc; 2697 2698 rc = t4_get_fw_version(padap, &fw_vers); 2699 if (rc) 2700 return rc; 2701 2702 /* Serial Configuration Version is located beyond the PF's vpd size. 2703 * Temporarily give access to entire EEPROM to get it. 2704 */ 2705 rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE); 2706 if (rc < 0) 2707 return rc; 2708 2709 ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN, 2710 &scfg_vers); 2711 2712 /* Restore back to original PF's vpd size */ 2713 rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE); 2714 if (rc < 0) 2715 return rc; 2716 2717 if (ret) 2718 return ret; 2719 2720 rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN, 2721 vpd_str); 2722 if (rc) 2723 return rc; 2724 2725 vpd_str[CUDBG_VPD_VER_LEN] = '\0'; 2726 rc = kstrtouint(vpd_str, 0, &vpd_vers); 2727 if (rc) 2728 return rc; 2729 2730 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data), 2731 &temp_buff); 2732 if (rc) 2733 return rc; 2734 2735 vpd_data = (struct cudbg_vpd_data *)temp_buff.data; 2736 memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1); 2737 memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1); 2738 memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1); 2739 memcpy(vpd_data->mn, vpd.id, ID_LEN + 1); 2740 vpd_data->scfg_vers = scfg_vers; 2741 vpd_data->vpd_vers = vpd_vers; 2742 vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers); 2743 vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers); 2744 vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers); 2745 vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers); 2746 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2747 } 2748 2749 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid, 2750 struct cudbg_tid_data *tid_data) 2751 { 2752 struct adapter *padap = pdbg_init->adap; 2753 int i, cmd_retry = 8; 2754 u32 val; 2755 2756 /* Fill REQ_DATA regs with 0's */ 2757 for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++) 2758 t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0); 2759 2760 /* Write DBIG command */ 2761 val = DBGICMD_V(4) | DBGITID_V(tid); 2762 t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val); 2763 tid_data->dbig_cmd = val; 2764 2765 val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */ 2766 t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val); 2767 tid_data->dbig_conf = val; 2768 2769 /* Poll the DBGICMDBUSY bit */ 2770 val = 1; 2771 while (val) { 2772 val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A); 2773 val = val & DBGICMDBUSY_F; 2774 cmd_retry--; 2775 if (!cmd_retry) 2776 return CUDBG_SYSTEM_ERROR; 2777 } 2778 2779 /* Check RESP status */ 2780 val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A); 2781 tid_data->dbig_rsp_stat = val; 2782 if (!(val & 1)) 2783 return CUDBG_SYSTEM_ERROR; 2784 2785 /* Read RESP data */ 2786 for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++) 2787 tid_data->data[i] = t4_read_reg(padap, 2788 LE_DB_DBGI_RSP_DATA_A + 2789 (i << 2)); 2790 tid_data->tid = tid; 2791 return 0; 2792 } 2793 2794 static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region) 2795 { 2796 int type = LE_ET_UNKNOWN; 2797 2798 if (tid < tcam_region.server_start) 2799 type = LE_ET_TCAM_CON; 2800 else if (tid < tcam_region.filter_start) 2801 type = LE_ET_TCAM_SERVER; 2802 else if (tid < tcam_region.clip_start) 2803 type = LE_ET_TCAM_FILTER; 2804 else if (tid < tcam_region.routing_start) 2805 type = LE_ET_TCAM_CLIP; 2806 else if (tid < tcam_region.tid_hash_base) 2807 type = LE_ET_TCAM_ROUTING; 2808 else if (tid < tcam_region.max_tid) 2809 type = LE_ET_HASH_CON; 2810 else 2811 type = LE_ET_INVALID_TID; 2812 2813 return type; 2814 } 2815 2816 static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data, 2817 struct cudbg_tcam tcam_region) 2818 { 2819 int ipv6 = 0; 2820 int le_type; 2821 2822 le_type = cudbg_get_le_type(tid_data->tid, tcam_region); 2823 if (tid_data->tid & 1) 2824 return 0; 2825 2826 if (le_type == LE_ET_HASH_CON) { 2827 ipv6 = tid_data->data[16] & 0x8000; 2828 } else if (le_type == LE_ET_TCAM_CON) { 2829 ipv6 = tid_data->data[16] & 0x8000; 2830 if (ipv6) 2831 ipv6 = tid_data->data[9] == 0x00C00000; 2832 } else { 2833 ipv6 = 0; 2834 } 2835 return ipv6; 2836 } 2837 2838 void cudbg_fill_le_tcam_info(struct adapter *padap, 2839 struct cudbg_tcam *tcam_region) 2840 { 2841 u32 value; 2842 2843 /* Get the LE regions */ 2844 value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A); /* hash base index */ 2845 tcam_region->tid_hash_base = value; 2846 2847 /* Get routing table index */ 2848 value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A); 2849 tcam_region->routing_start = value; 2850 2851 /* Get clip table index. For T6 there is separate CLIP TCAM */ 2852 if (is_t6(padap->params.chip)) 2853 value = t4_read_reg(padap, LE_DB_CLCAM_TID_BASE_A); 2854 else 2855 value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A); 2856 tcam_region->clip_start = value; 2857 2858 /* Get filter table index */ 2859 value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A); 2860 tcam_region->filter_start = value; 2861 2862 /* Get server table index */ 2863 value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A); 2864 tcam_region->server_start = value; 2865 2866 /* Check whether hash is enabled and calculate the max tids */ 2867 value = t4_read_reg(padap, LE_DB_CONFIG_A); 2868 if ((value >> HASHEN_S) & 1) { 2869 value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A); 2870 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) { 2871 tcam_region->max_tid = (value & 0xFFFFF) + 2872 tcam_region->tid_hash_base; 2873 } else { 2874 value = HASHTIDSIZE_G(value); 2875 value = 1 << value; 2876 tcam_region->max_tid = value + 2877 tcam_region->tid_hash_base; 2878 } 2879 } else { /* hash not enabled */ 2880 if (is_t6(padap->params.chip)) 2881 tcam_region->max_tid = (value & ASLIPCOMPEN_F) ? 2882 CUDBG_MAX_TID_COMP_EN : 2883 CUDBG_MAX_TID_COMP_DIS; 2884 else 2885 tcam_region->max_tid = CUDBG_MAX_TCAM_TID; 2886 } 2887 2888 if (is_t6(padap->params.chip)) 2889 tcam_region->max_tid += CUDBG_T6_CLIP; 2890 } 2891 2892 int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, 2893 struct cudbg_buffer *dbg_buff, 2894 struct cudbg_error *cudbg_err) 2895 { 2896 struct adapter *padap = pdbg_init->adap; 2897 struct cudbg_buffer temp_buff = { 0 }; 2898 struct cudbg_tcam tcam_region = { 0 }; 2899 struct cudbg_tid_data *tid_data; 2900 u32 bytes = 0; 2901 int rc, size; 2902 u32 i; 2903 2904 cudbg_fill_le_tcam_info(padap, &tcam_region); 2905 2906 size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid; 2907 size += sizeof(struct cudbg_tcam); 2908 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2909 if (rc) 2910 return rc; 2911 2912 memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam)); 2913 bytes = sizeof(struct cudbg_tcam); 2914 tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes); 2915 /* read all tid */ 2916 for (i = 0; i < tcam_region.max_tid; ) { 2917 rc = cudbg_read_tid(pdbg_init, i, tid_data); 2918 if (rc) { 2919 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; 2920 /* Update tcam header and exit */ 2921 tcam_region.max_tid = i; 2922 memcpy(temp_buff.data, &tcam_region, 2923 sizeof(struct cudbg_tcam)); 2924 goto out; 2925 } 2926 2927 if (cudbg_is_ipv6_entry(tid_data, tcam_region)) { 2928 /* T6 CLIP TCAM: ipv6 takes 4 entries */ 2929 if (is_t6(padap->params.chip) && 2930 i >= tcam_region.clip_start && 2931 i < tcam_region.clip_start + CUDBG_T6_CLIP) 2932 i += 4; 2933 else /* Main TCAM: ipv6 takes two tids */ 2934 i += 2; 2935 } else { 2936 i++; 2937 } 2938 2939 tid_data++; 2940 bytes += sizeof(struct cudbg_tid_data); 2941 } 2942 2943 out: 2944 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2945 } 2946 2947 int cudbg_collect_cctrl(struct cudbg_init *pdbg_init, 2948 struct cudbg_buffer *dbg_buff, 2949 struct cudbg_error *cudbg_err) 2950 { 2951 struct adapter *padap = pdbg_init->adap; 2952 struct cudbg_buffer temp_buff = { 0 }; 2953 u32 size; 2954 int rc; 2955 2956 size = sizeof(u16) * NMTUS * NCCTRL_WIN; 2957 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2958 if (rc) 2959 return rc; 2960 2961 t4_read_cong_tbl(padap, (void *)temp_buff.data); 2962 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 2963 } 2964 2965 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, 2966 struct cudbg_buffer *dbg_buff, 2967 struct cudbg_error *cudbg_err) 2968 { 2969 struct adapter *padap = pdbg_init->adap; 2970 struct cudbg_buffer temp_buff = { 0 }; 2971 struct ireg_buf *ma_indr; 2972 int i, rc, n; 2973 u32 size, j; 2974 2975 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) 2976 return CUDBG_STATUS_ENTITY_NOT_FOUND; 2977 2978 n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); 2979 size = sizeof(struct ireg_buf) * n * 2; 2980 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 2981 if (rc) 2982 return rc; 2983 2984 ma_indr = (struct ireg_buf *)temp_buff.data; 2985 for (i = 0; i < n; i++) { 2986 struct ireg_field *ma_fli = &ma_indr->tp_pio; 2987 u32 *buff = ma_indr->outbuf; 2988 2989 ma_fli->ireg_addr = t6_ma_ireg_array[i][0]; 2990 ma_fli->ireg_data = t6_ma_ireg_array[i][1]; 2991 ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2]; 2992 ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3]; 2993 t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data, 2994 buff, ma_fli->ireg_offset_range, 2995 ma_fli->ireg_local_offset); 2996 ma_indr++; 2997 } 2998 2999 n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32)); 3000 for (i = 0; i < n; i++) { 3001 struct ireg_field *ma_fli = &ma_indr->tp_pio; 3002 u32 *buff = ma_indr->outbuf; 3003 3004 ma_fli->ireg_addr = t6_ma_ireg_array2[i][0]; 3005 ma_fli->ireg_data = t6_ma_ireg_array2[i][1]; 3006 ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2]; 3007 for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) { 3008 t4_read_indirect(padap, ma_fli->ireg_addr, 3009 ma_fli->ireg_data, buff, 1, 3010 ma_fli->ireg_local_offset); 3011 buff++; 3012 ma_fli->ireg_local_offset += 0x20; 3013 } 3014 ma_indr++; 3015 } 3016 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 3017 } 3018 3019 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, 3020 struct cudbg_buffer *dbg_buff, 3021 struct cudbg_error *cudbg_err) 3022 { 3023 struct adapter *padap = pdbg_init->adap; 3024 struct cudbg_buffer temp_buff = { 0 }; 3025 struct cudbg_ulptx_la *ulptx_la_buff; 3026 struct cudbg_ver_hdr *ver_hdr; 3027 u32 i, j; 3028 int rc; 3029 3030 rc = cudbg_get_buff(pdbg_init, dbg_buff, 3031 sizeof(struct cudbg_ver_hdr) + 3032 sizeof(struct cudbg_ulptx_la), 3033 &temp_buff); 3034 if (rc) 3035 return rc; 3036 3037 ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data; 3038 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; 3039 ver_hdr->revision = CUDBG_ULPTX_LA_REV; 3040 ver_hdr->size = sizeof(struct cudbg_ulptx_la); 3041 3042 ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data + 3043 sizeof(*ver_hdr)); 3044 for (i = 0; i < CUDBG_NUM_ULPTX; i++) { 3045 ulptx_la_buff->rdptr[i] = t4_read_reg(padap, 3046 ULP_TX_LA_RDPTR_0_A + 3047 0x10 * i); 3048 ulptx_la_buff->wrptr[i] = t4_read_reg(padap, 3049 ULP_TX_LA_WRPTR_0_A + 3050 0x10 * i); 3051 ulptx_la_buff->rddata[i] = t4_read_reg(padap, 3052 ULP_TX_LA_RDDATA_0_A + 3053 0x10 * i); 3054 for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) 3055 ulptx_la_buff->rd_data[i][j] = 3056 t4_read_reg(padap, 3057 ULP_TX_LA_RDDATA_0_A + 0x10 * i); 3058 } 3059 3060 for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) { 3061 t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1); 3062 ulptx_la_buff->rdptr_asic[i] = 3063 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A); 3064 ulptx_la_buff->rddata_asic[i][0] = 3065 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A); 3066 ulptx_la_buff->rddata_asic[i][1] = 3067 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A); 3068 ulptx_la_buff->rddata_asic[i][2] = 3069 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A); 3070 ulptx_la_buff->rddata_asic[i][3] = 3071 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A); 3072 ulptx_la_buff->rddata_asic[i][4] = 3073 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A); 3074 ulptx_la_buff->rddata_asic[i][5] = 3075 t4_read_reg(padap, PM_RX_BASE_ADDR); 3076 } 3077 3078 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 3079 } 3080 3081 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, 3082 struct cudbg_buffer *dbg_buff, 3083 struct cudbg_error *cudbg_err) 3084 { 3085 struct adapter *padap = pdbg_init->adap; 3086 struct cudbg_buffer temp_buff = { 0 }; 3087 u32 local_offset, local_range; 3088 struct ireg_buf *up_cim; 3089 u32 size, j, iter; 3090 u32 instance = 0; 3091 int i, rc, n; 3092 3093 if (is_t5(padap->params.chip)) 3094 n = sizeof(t5_up_cim_reg_array) / 3095 ((IREG_NUM_ELEM + 1) * sizeof(u32)); 3096 else if (is_t6(padap->params.chip)) 3097 n = sizeof(t6_up_cim_reg_array) / 3098 ((IREG_NUM_ELEM + 1) * sizeof(u32)); 3099 else 3100 return CUDBG_STATUS_NOT_IMPLEMENTED; 3101 3102 size = sizeof(struct ireg_buf) * n; 3103 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 3104 if (rc) 3105 return rc; 3106 3107 up_cim = (struct ireg_buf *)temp_buff.data; 3108 for (i = 0; i < n; i++) { 3109 struct ireg_field *up_cim_reg = &up_cim->tp_pio; 3110 u32 *buff = up_cim->outbuf; 3111 3112 if (is_t5(padap->params.chip)) { 3113 up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0]; 3114 up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1]; 3115 up_cim_reg->ireg_local_offset = 3116 t5_up_cim_reg_array[i][2]; 3117 up_cim_reg->ireg_offset_range = 3118 t5_up_cim_reg_array[i][3]; 3119 instance = t5_up_cim_reg_array[i][4]; 3120 } else if (is_t6(padap->params.chip)) { 3121 up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0]; 3122 up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1]; 3123 up_cim_reg->ireg_local_offset = 3124 t6_up_cim_reg_array[i][2]; 3125 up_cim_reg->ireg_offset_range = 3126 t6_up_cim_reg_array[i][3]; 3127 instance = t6_up_cim_reg_array[i][4]; 3128 } 3129 3130 switch (instance) { 3131 case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES: 3132 iter = up_cim_reg->ireg_offset_range; 3133 local_offset = 0x120; 3134 local_range = 1; 3135 break; 3136 case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES: 3137 iter = up_cim_reg->ireg_offset_range; 3138 local_offset = 0x10; 3139 local_range = 1; 3140 break; 3141 default: 3142 iter = 1; 3143 local_offset = 0; 3144 local_range = up_cim_reg->ireg_offset_range; 3145 break; 3146 } 3147 3148 for (j = 0; j < iter; j++, buff++) { 3149 rc = t4_cim_read(padap, 3150 up_cim_reg->ireg_local_offset + 3151 (j * local_offset), local_range, buff); 3152 if (rc) { 3153 cudbg_put_buff(pdbg_init, &temp_buff); 3154 return rc; 3155 } 3156 } 3157 up_cim++; 3158 } 3159 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 3160 } 3161 3162 int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init, 3163 struct cudbg_buffer *dbg_buff, 3164 struct cudbg_error *cudbg_err) 3165 { 3166 struct adapter *padap = pdbg_init->adap; 3167 struct cudbg_buffer temp_buff = { 0 }; 3168 struct cudbg_pbt_tables *pbt; 3169 int i, rc; 3170 u32 addr; 3171 3172 rc = cudbg_get_buff(pdbg_init, dbg_buff, 3173 sizeof(struct cudbg_pbt_tables), 3174 &temp_buff); 3175 if (rc) 3176 return rc; 3177 3178 pbt = (struct cudbg_pbt_tables *)temp_buff.data; 3179 /* PBT dynamic entries */ 3180 addr = CUDBG_CHAC_PBT_ADDR; 3181 for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) { 3182 rc = t4_cim_read(padap, addr + (i * 4), 1, 3183 &pbt->pbt_dynamic[i]); 3184 if (rc) { 3185 cudbg_err->sys_err = rc; 3186 cudbg_put_buff(pdbg_init, &temp_buff); 3187 return rc; 3188 } 3189 } 3190 3191 /* PBT static entries */ 3192 /* static entries start when bit 6 is set */ 3193 addr = CUDBG_CHAC_PBT_ADDR + (1 << 6); 3194 for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) { 3195 rc = t4_cim_read(padap, addr + (i * 4), 1, 3196 &pbt->pbt_static[i]); 3197 if (rc) { 3198 cudbg_err->sys_err = rc; 3199 cudbg_put_buff(pdbg_init, &temp_buff); 3200 return rc; 3201 } 3202 } 3203 3204 /* LRF entries */ 3205 addr = CUDBG_CHAC_PBT_LRF; 3206 for (i = 0; i < CUDBG_LRF_ENTRIES; i++) { 3207 rc = t4_cim_read(padap, addr + (i * 4), 1, 3208 &pbt->lrf_table[i]); 3209 if (rc) { 3210 cudbg_err->sys_err = rc; 3211 cudbg_put_buff(pdbg_init, &temp_buff); 3212 return rc; 3213 } 3214 } 3215 3216 /* PBT data entries */ 3217 addr = CUDBG_CHAC_PBT_DATA; 3218 for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) { 3219 rc = t4_cim_read(padap, addr + (i * 4), 1, 3220 &pbt->pbt_data[i]); 3221 if (rc) { 3222 cudbg_err->sys_err = rc; 3223 cudbg_put_buff(pdbg_init, &temp_buff); 3224 return rc; 3225 } 3226 } 3227 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 3228 } 3229 3230 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, 3231 struct cudbg_buffer *dbg_buff, 3232 struct cudbg_error *cudbg_err) 3233 { 3234 struct adapter *padap = pdbg_init->adap; 3235 struct cudbg_mbox_log *mboxlog = NULL; 3236 struct cudbg_buffer temp_buff = { 0 }; 3237 struct mbox_cmd_log *log = NULL; 3238 struct mbox_cmd *entry; 3239 unsigned int entry_idx; 3240 u16 mbox_cmds; 3241 int i, k, rc; 3242 u64 flit; 3243 u32 size; 3244 3245 log = padap->mbox_log; 3246 mbox_cmds = padap->mbox_log->size; 3247 size = sizeof(struct cudbg_mbox_log) * mbox_cmds; 3248 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 3249 if (rc) 3250 return rc; 3251 3252 mboxlog = (struct cudbg_mbox_log *)temp_buff.data; 3253 for (k = 0; k < mbox_cmds; k++) { 3254 entry_idx = log->cursor + k; 3255 if (entry_idx >= log->size) 3256 entry_idx -= log->size; 3257 3258 entry = mbox_cmd_log_entry(log, entry_idx); 3259 /* skip over unused entries */ 3260 if (entry->timestamp == 0) 3261 continue; 3262 3263 memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd)); 3264 for (i = 0; i < MBOX_LEN / 8; i++) { 3265 flit = entry->cmd[i]; 3266 mboxlog->hi[i] = (u32)(flit >> 32); 3267 mboxlog->lo[i] = (u32)flit; 3268 } 3269 mboxlog++; 3270 } 3271 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 3272 } 3273 3274 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, 3275 struct cudbg_buffer *dbg_buff, 3276 struct cudbg_error *cudbg_err) 3277 { 3278 struct adapter *padap = pdbg_init->adap; 3279 struct cudbg_buffer temp_buff = { 0 }; 3280 struct ireg_buf *hma_indr; 3281 int i, rc, n; 3282 u32 size; 3283 3284 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) 3285 return CUDBG_STATUS_ENTITY_NOT_FOUND; 3286 3287 n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); 3288 size = sizeof(struct ireg_buf) * n; 3289 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); 3290 if (rc) 3291 return rc; 3292 3293 hma_indr = (struct ireg_buf *)temp_buff.data; 3294 for (i = 0; i < n; i++) { 3295 struct ireg_field *hma_fli = &hma_indr->tp_pio; 3296 u32 *buff = hma_indr->outbuf; 3297 3298 hma_fli->ireg_addr = t6_hma_ireg_array[i][0]; 3299 hma_fli->ireg_data = t6_hma_ireg_array[i][1]; 3300 hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2]; 3301 hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3]; 3302 t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data, 3303 buff, hma_fli->ireg_offset_range, 3304 hma_fli->ireg_local_offset); 3305 hma_indr++; 3306 } 3307 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); 3308 } 3309 3310 void cudbg_fill_qdesc_num_and_size(const struct adapter *padap, 3311 u32 *num, u32 *size) 3312 { 3313 u32 tot_entries = 0, tot_size = 0; 3314 3315 /* NIC TXQ, RXQ, FLQ, and CTRLQ */ 3316 tot_entries += MAX_ETH_QSETS * 3; 3317 tot_entries += MAX_CTRL_QUEUES; 3318 3319 tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; 3320 tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE; 3321 tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE; 3322 tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES * 3323 MAX_CTRL_TXQ_DESC_SIZE; 3324 3325 /* FW_EVTQ and INTRQ */ 3326 tot_entries += INGQ_EXTRAS; 3327 tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE; 3328 3329 /* PTP_TXQ */ 3330 tot_entries += 1; 3331 tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; 3332 3333 /* ULD TXQ, RXQ, and FLQ */ 3334 tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS; 3335 tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2; 3336 3337 tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * 3338 MAX_TXQ_DESC_SIZE; 3339 tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES * 3340 MAX_RXQ_DESC_SIZE; 3341 tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS * 3342 MAX_FL_DESC_SIZE; 3343 3344 /* ULD CIQ */ 3345 tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS; 3346 tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE * 3347 MAX_RXQ_DESC_SIZE; 3348 3349 /* ETHOFLD TXQ, RXQ, and FLQ */ 3350 tot_entries += MAX_OFLD_QSETS * 3; 3351 tot_size += MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; 3352 3353 tot_size += sizeof(struct cudbg_ver_hdr) + 3354 sizeof(struct cudbg_qdesc_info) + 3355 sizeof(struct cudbg_qdesc_entry) * tot_entries; 3356 3357 if (num) 3358 *num = tot_entries; 3359 3360 if (size) 3361 *size = tot_size; 3362 } 3363 3364 int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, 3365 struct cudbg_buffer *dbg_buff, 3366 struct cudbg_error *cudbg_err) 3367 { 3368 u32 num_queues = 0, tot_entries = 0, size = 0; 3369 struct adapter *padap = pdbg_init->adap; 3370 struct cudbg_buffer temp_buff = { 0 }; 3371 struct cudbg_qdesc_entry *qdesc_entry; 3372 struct cudbg_qdesc_info *qdesc_info; 3373 struct cudbg_ver_hdr *ver_hdr; 3374 struct sge *s = &padap->sge; 3375 u32 i, j, cur_off, tot_len; 3376 u8 *data; 3377 int rc; 3378 3379 cudbg_fill_qdesc_num_and_size(padap, &tot_entries, &size); 3380 size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE); 3381 tot_len = size; 3382 data = kvzalloc(size, GFP_KERNEL); 3383 if (!data) 3384 return -ENOMEM; 3385 3386 ver_hdr = (struct cudbg_ver_hdr *)data; 3387 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; 3388 ver_hdr->revision = CUDBG_QDESC_REV; 3389 ver_hdr->size = sizeof(struct cudbg_qdesc_info); 3390 size -= sizeof(*ver_hdr); 3391 3392 qdesc_info = (struct cudbg_qdesc_info *)(data + 3393 sizeof(*ver_hdr)); 3394 size -= sizeof(*qdesc_info); 3395 qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data; 3396 3397 #define QDESC_GET(q, desc, type, label) do { \ 3398 if (size <= 0) { \ 3399 goto label; \ 3400 } \ 3401 if (desc) { \ 3402 cudbg_fill_qdesc_##q(q, type, qdesc_entry); \ 3403 size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \ 3404 num_queues++; \ 3405 qdesc_entry = cudbg_next_qdesc(qdesc_entry); \ 3406 } \ 3407 } while (0) 3408 3409 #define QDESC_GET_TXQ(q, type, label) do { \ 3410 struct sge_txq *txq = (struct sge_txq *)q; \ 3411 QDESC_GET(txq, txq->desc, type, label); \ 3412 } while (0) 3413 3414 #define QDESC_GET_RXQ(q, type, label) do { \ 3415 struct sge_rspq *rxq = (struct sge_rspq *)q; \ 3416 QDESC_GET(rxq, rxq->desc, type, label); \ 3417 } while (0) 3418 3419 #define QDESC_GET_FLQ(q, type, label) do { \ 3420 struct sge_fl *flq = (struct sge_fl *)q; \ 3421 QDESC_GET(flq, flq->desc, type, label); \ 3422 } while (0) 3423 3424 /* NIC TXQ */ 3425 for (i = 0; i < s->ethqsets; i++) 3426 QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out); 3427 3428 /* NIC RXQ */ 3429 for (i = 0; i < s->ethqsets; i++) 3430 QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out); 3431 3432 /* NIC FLQ */ 3433 for (i = 0; i < s->ethqsets; i++) 3434 QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out); 3435 3436 /* NIC CTRLQ */ 3437 for (i = 0; i < padap->params.nports; i++) 3438 QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out); 3439 3440 /* FW_EVTQ */ 3441 QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out); 3442 3443 /* INTRQ */ 3444 QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out); 3445 3446 /* PTP_TXQ */ 3447 QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out); 3448 3449 /* ULD Queues */ 3450 mutex_lock(&uld_mutex); 3451 3452 if (s->uld_txq_info) { 3453 struct sge_uld_txq_info *utxq; 3454 3455 /* ULD TXQ */ 3456 for (j = 0; j < CXGB4_TX_MAX; j++) { 3457 if (!s->uld_txq_info[j]) 3458 continue; 3459 3460 utxq = s->uld_txq_info[j]; 3461 for (i = 0; i < utxq->ntxq; i++) 3462 QDESC_GET_TXQ(&utxq->uldtxq[i].q, 3463 cudbg_uld_txq_to_qtype(j), 3464 out_unlock); 3465 } 3466 } 3467 3468 if (s->uld_rxq_info) { 3469 struct sge_uld_rxq_info *urxq; 3470 u32 base; 3471 3472 /* ULD RXQ */ 3473 for (j = 0; j < CXGB4_ULD_MAX; j++) { 3474 if (!s->uld_rxq_info[j]) 3475 continue; 3476 3477 urxq = s->uld_rxq_info[j]; 3478 for (i = 0; i < urxq->nrxq; i++) 3479 QDESC_GET_RXQ(&urxq->uldrxq[i].rspq, 3480 cudbg_uld_rxq_to_qtype(j), 3481 out_unlock); 3482 } 3483 3484 /* ULD FLQ */ 3485 for (j = 0; j < CXGB4_ULD_MAX; j++) { 3486 if (!s->uld_rxq_info[j]) 3487 continue; 3488 3489 urxq = s->uld_rxq_info[j]; 3490 for (i = 0; i < urxq->nrxq; i++) 3491 QDESC_GET_FLQ(&urxq->uldrxq[i].fl, 3492 cudbg_uld_flq_to_qtype(j), 3493 out_unlock); 3494 } 3495 3496 /* ULD CIQ */ 3497 for (j = 0; j < CXGB4_ULD_MAX; j++) { 3498 if (!s->uld_rxq_info[j]) 3499 continue; 3500 3501 urxq = s->uld_rxq_info[j]; 3502 base = urxq->nrxq; 3503 for (i = 0; i < urxq->nciq; i++) 3504 QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq, 3505 cudbg_uld_ciq_to_qtype(j), 3506 out_unlock); 3507 } 3508 } 3509 3510 /* ETHOFLD TXQ */ 3511 if (s->eohw_txq) 3512 for (i = 0; i < s->eoqsets; i++) 3513 QDESC_GET_TXQ(&s->eohw_txq[i].q, 3514 CUDBG_QTYPE_ETHOFLD_TXQ, out); 3515 3516 /* ETHOFLD RXQ and FLQ */ 3517 if (s->eohw_rxq) { 3518 for (i = 0; i < s->eoqsets; i++) 3519 QDESC_GET_RXQ(&s->eohw_rxq[i].rspq, 3520 CUDBG_QTYPE_ETHOFLD_RXQ, out); 3521 3522 for (i = 0; i < s->eoqsets; i++) 3523 QDESC_GET_FLQ(&s->eohw_rxq[i].fl, 3524 CUDBG_QTYPE_ETHOFLD_FLQ, out); 3525 } 3526 3527 out_unlock: 3528 mutex_unlock(&uld_mutex); 3529 3530 out: 3531 qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry); 3532 qdesc_info->num_queues = num_queues; 3533 cur_off = 0; 3534 while (tot_len) { 3535 u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE); 3536 3537 rc = cudbg_get_buff(pdbg_init, dbg_buff, chunk_size, 3538 &temp_buff); 3539 if (rc) { 3540 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; 3541 goto out_free; 3542 } 3543 3544 memcpy(temp_buff.data, data + cur_off, chunk_size); 3545 tot_len -= chunk_size; 3546 cur_off += chunk_size; 3547 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff, 3548 dbg_buff); 3549 if (rc) { 3550 cudbg_put_buff(pdbg_init, &temp_buff); 3551 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; 3552 goto out_free; 3553 } 3554 } 3555 3556 out_free: 3557 if (data) 3558 kvfree(data); 3559 3560 #undef QDESC_GET_FLQ 3561 #undef QDESC_GET_RXQ 3562 #undef QDESC_GET_TXQ 3563 #undef QDESC_GET 3564 3565 return rc; 3566 } 3567 3568 int cudbg_collect_flash(struct cudbg_init *pdbg_init, 3569 struct cudbg_buffer *dbg_buff, 3570 struct cudbg_error *cudbg_err) 3571 { 3572 struct adapter *padap = pdbg_init->adap; 3573 u32 count = padap->params.sf_size, n; 3574 struct cudbg_buffer temp_buff = {0}; 3575 u32 addr, i; 3576 int rc; 3577 3578 addr = FLASH_EXP_ROM_START; 3579 3580 for (i = 0; i < count; i += SF_PAGE_SIZE) { 3581 n = min_t(u32, count - i, SF_PAGE_SIZE); 3582 3583 rc = cudbg_get_buff(pdbg_init, dbg_buff, n, &temp_buff); 3584 if (rc) { 3585 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; 3586 goto out; 3587 } 3588 rc = t4_read_flash(padap, addr, n, (u32 *)temp_buff.data, 0); 3589 if (rc) 3590 goto out; 3591 3592 addr += (n * 4); 3593 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff, 3594 dbg_buff); 3595 if (rc) { 3596 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; 3597 goto out; 3598 } 3599 } 3600 3601 out: 3602 return rc; 3603 } 3604