1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <asm/byteorder.h> 9 #include <linux/io.h> 10 #include <linux/bitops.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/errno.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/pci.h> 17 #include <linux/slab.h> 18 #include <linux/string.h> 19 #include "qed.h" 20 #include "qed_hsi.h" 21 #include "qed_hw.h" 22 #include "qed_init_ops.h" 23 #include "qed_int.h" 24 #include "qed_mcp.h" 25 #include "qed_reg_addr.h" 26 #include "qed_sp.h" 27 #include "qed_sriov.h" 28 #include "qed_vf.h" 29 30 struct qed_pi_info { 31 qed_int_comp_cb_t comp_cb; 32 void *cookie; 33 }; 34 35 struct qed_sb_sp_info { 36 struct qed_sb_info sb_info; 37 38 /* per protocol index data */ 39 struct qed_pi_info pi_info_arr[PIS_PER_SB_E4]; 40 }; 41 42 enum qed_attention_type { 43 QED_ATTN_TYPE_ATTN, 44 QED_ATTN_TYPE_PARITY, 45 }; 46 47 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 48 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 49 50 struct aeu_invert_reg_bit { 51 char bit_name[30]; 52 53 #define ATTENTION_PARITY (1 << 0) 54 55 #define ATTENTION_LENGTH_MASK (0x00000ff0) 56 #define ATTENTION_LENGTH_SHIFT (4) 57 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 58 ATTENTION_LENGTH_SHIFT) 59 #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT) 60 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 61 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 62 ATTENTION_PARITY) 63 64 /* Multiple bits start with this offset */ 65 #define ATTENTION_OFFSET_MASK (0x000ff000) 66 #define ATTENTION_OFFSET_SHIFT (12) 67 68 #define ATTENTION_BB_MASK (0x00700000) 69 #define ATTENTION_BB_SHIFT (20) 70 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT) 71 #define ATTENTION_BB_DIFFERENT BIT(23) 72 73 #define ATTENTION_CLEAR_ENABLE BIT(28) 74 unsigned int flags; 75 76 /* Callback to call if attention will be triggered */ 77 int (*cb)(struct qed_hwfn *p_hwfn); 78 79 enum block_id block_index; 80 }; 81 82 struct aeu_invert_reg { 83 struct aeu_invert_reg_bit bits[32]; 84 }; 85 86 #define MAX_ATTN_GRPS (8) 87 #define NUM_ATTN_REGS (9) 88 89 /* Specific HW attention callbacks */ 90 static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn) 91 { 92 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 93 94 /* This might occur on certain instances; Log it once then mask it */ 95 DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n", 96 tmp); 97 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 98 0xffffffff); 99 100 return 0; 101 } 102 103 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 104 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 105 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 106 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf) 107 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 108 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1) 109 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 110 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff) 111 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 112 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf) 113 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 114 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff) 115 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 116 static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn) 117 { 118 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 119 PSWHST_REG_INCORRECT_ACCESS_VALID); 120 121 if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) { 122 u32 addr, data, length; 123 124 addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 125 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 126 data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 127 PSWHST_REG_INCORRECT_ACCESS_DATA); 128 length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 129 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 130 131 DP_INFO(p_hwfn->cdev, 132 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", 133 addr, length, 134 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID), 135 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID), 136 (u8) GET_FIELD(data, 137 ATTENTION_INCORRECT_ACCESS_VF_VALID), 138 (u8) GET_FIELD(data, 139 ATTENTION_INCORRECT_ACCESS_CLIENT), 140 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR), 141 (u8) GET_FIELD(data, 142 ATTENTION_INCORRECT_ACCESS_BYTE_EN), 143 data); 144 } 145 146 return 0; 147 } 148 149 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0) 150 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff) 151 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0) 152 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23) 153 #define QED_GRC_ATTENTION_MASTER_MASK (0xf) 154 #define QED_GRC_ATTENTION_MASTER_SHIFT (24) 155 #define QED_GRC_ATTENTION_PF_MASK (0xf) 156 #define QED_GRC_ATTENTION_PF_SHIFT (0) 157 #define QED_GRC_ATTENTION_VF_MASK (0xff) 158 #define QED_GRC_ATTENTION_VF_SHIFT (4) 159 #define QED_GRC_ATTENTION_PRIV_MASK (0x3) 160 #define QED_GRC_ATTENTION_PRIV_SHIFT (14) 161 #define QED_GRC_ATTENTION_PRIV_VF (0) 162 static const char *attn_master_to_str(u8 master) 163 { 164 switch (master) { 165 case 1: return "PXP"; 166 case 2: return "MCP"; 167 case 3: return "MSDM"; 168 case 4: return "PSDM"; 169 case 5: return "YSDM"; 170 case 6: return "USDM"; 171 case 7: return "TSDM"; 172 case 8: return "XSDM"; 173 case 9: return "DBU"; 174 case 10: return "DMAE"; 175 default: 176 return "Unknown"; 177 } 178 } 179 180 static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) 181 { 182 u32 tmp, tmp2; 183 184 /* We've already cleared the timeout interrupt register, so we learn 185 * of interrupts via the validity register 186 */ 187 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 188 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 189 if (!(tmp & QED_GRC_ATTENTION_VALID_BIT)) 190 goto out; 191 192 /* Read the GRC timeout information */ 193 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 194 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 195 tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 196 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 197 198 DP_INFO(p_hwfn->cdev, 199 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 200 tmp2, tmp, 201 (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from", 202 GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2, 203 attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), 204 GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), 205 (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == 206 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)", 207 GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); 208 209 out: 210 /* Regardles of anything else, clean the validity bit */ 211 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 212 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 213 return 0; 214 } 215 216 #define PGLUE_ATTENTION_VALID (1 << 29) 217 #define PGLUE_ATTENTION_RD_VALID (1 << 26) 218 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf) 219 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 220 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1) 221 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19) 222 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff) 223 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 224 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1) 225 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21) 226 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1) 227 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22) 228 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1) 229 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23) 230 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23) 231 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) 232 #define PGLUE_ATTENTION_ILT_VALID (1 << 23) 233 234 int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, 235 struct qed_ptt *p_ptt) 236 { 237 u32 tmp; 238 239 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); 240 if (tmp & PGLUE_ATTENTION_VALID) { 241 u32 addr_lo, addr_hi, details; 242 243 addr_lo = qed_rd(p_hwfn, p_ptt, 244 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 245 addr_hi = qed_rd(p_hwfn, p_ptt, 246 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 247 details = qed_rd(p_hwfn, p_ptt, 248 PGLUE_B_REG_TX_ERR_WR_DETAILS); 249 250 DP_NOTICE(p_hwfn, 251 "Illegal write by chip to [%08x:%08x] blocked.\n" 252 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 253 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 254 addr_hi, addr_lo, details, 255 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 256 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 257 GET_FIELD(details, 258 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 259 tmp, 260 GET_FIELD(tmp, 261 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, 262 GET_FIELD(tmp, 263 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 264 GET_FIELD(tmp, 265 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); 266 } 267 268 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); 269 if (tmp & PGLUE_ATTENTION_RD_VALID) { 270 u32 addr_lo, addr_hi, details; 271 272 addr_lo = qed_rd(p_hwfn, p_ptt, 273 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 274 addr_hi = qed_rd(p_hwfn, p_ptt, 275 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 276 details = qed_rd(p_hwfn, p_ptt, 277 PGLUE_B_REG_TX_ERR_RD_DETAILS); 278 279 DP_NOTICE(p_hwfn, 280 "Illegal read by chip from [%08x:%08x] blocked.\n" 281 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 282 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 283 addr_hi, addr_lo, details, 284 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 285 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 286 GET_FIELD(details, 287 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 288 tmp, 289 GET_FIELD(tmp, 290 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, 291 GET_FIELD(tmp, 292 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 293 GET_FIELD(tmp, 294 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); 295 } 296 297 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 298 if (tmp & PGLUE_ATTENTION_ICPL_VALID) 299 DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp); 300 301 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 302 if (tmp & PGLUE_ATTENTION_ZLR_VALID) { 303 u32 addr_hi, addr_lo; 304 305 addr_lo = qed_rd(p_hwfn, p_ptt, 306 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 307 addr_hi = qed_rd(p_hwfn, p_ptt, 308 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 309 310 DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n", 311 tmp, addr_hi, addr_lo); 312 } 313 314 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 315 if (tmp & PGLUE_ATTENTION_ILT_VALID) { 316 u32 addr_hi, addr_lo, details; 317 318 addr_lo = qed_rd(p_hwfn, p_ptt, 319 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 320 addr_hi = qed_rd(p_hwfn, p_ptt, 321 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 322 details = qed_rd(p_hwfn, p_ptt, 323 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 324 325 DP_NOTICE(p_hwfn, 326 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 327 details, tmp, addr_hi, addr_lo); 328 } 329 330 /* Clear the indications */ 331 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2)); 332 333 return 0; 334 } 335 336 static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) 337 { 338 return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); 339 } 340 341 static int qed_fw_assertion(struct qed_hwfn *p_hwfn) 342 { 343 qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT, 344 "FW assertion!\n"); 345 346 return -EINVAL; 347 } 348 349 static int qed_general_attention_35(struct qed_hwfn *p_hwfn) 350 { 351 DP_INFO(p_hwfn, "General attention 35!\n"); 352 353 return 0; 354 } 355 356 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) 357 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 358 #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 359 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) 360 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16) 361 362 #define QED_DB_REC_COUNT 1000 363 #define QED_DB_REC_INTERVAL 100 364 365 static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, 366 struct qed_ptt *p_ptt) 367 { 368 u32 count = QED_DB_REC_COUNT; 369 u32 usage = 1; 370 371 /* Flush any pending (e)dpms as they may never arrive */ 372 qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 373 374 /* wait for usage to zero or count to run out. This is necessary since 375 * EDPM doorbell transactions can take multiple 64b cycles, and as such 376 * can "split" over the pci. Possibly, the doorbell drop can happen with 377 * half an EDPM in the queue and other half dropped. Another EDPM 378 * doorbell to the same address (from doorbell recovery mechanism or 379 * from the doorbelling entity) could have first half dropped and second 380 * half interpreted as continuation of the first. To prevent such 381 * malformed doorbells from reaching the device, flush the queue before 382 * releasing the overflow sticky indication. 383 */ 384 while (count-- && usage) { 385 usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 386 udelay(QED_DB_REC_INTERVAL); 387 } 388 389 /* should have been depleted by now */ 390 if (usage) { 391 DP_NOTICE(p_hwfn->cdev, 392 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 393 QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage); 394 return -EBUSY; 395 } 396 397 return 0; 398 } 399 400 int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 401 { 402 u32 attn_ovfl, cur_ovfl; 403 int rc; 404 405 attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT, 406 &p_hwfn->db_recovery_info.overflow); 407 cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); 408 if (!cur_ovfl && !attn_ovfl) 409 return 0; 410 411 DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n", 412 attn_ovfl, cur_ovfl); 413 414 if (cur_ovfl && !p_hwfn->db_bar_no_edpm) { 415 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); 416 if (rc) 417 return rc; 418 } 419 420 /* Release overflow sticky indication (stop silently dropping everything) */ 421 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 422 423 /* Repeat all last doorbells (doorbell drop recovery) */ 424 qed_db_recovery_execute(p_hwfn); 425 426 return 0; 427 } 428 429 static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn) 430 { 431 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; 432 u32 overflow; 433 int rc; 434 435 overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); 436 if (!overflow) 437 goto out; 438 439 /* Run PF doorbell recovery in next periodic handler */ 440 set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow); 441 442 if (!p_hwfn->db_bar_no_edpm) { 443 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); 444 if (rc) 445 goto out; 446 } 447 448 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 449 out: 450 /* Schedule the handler even if overflow was not detected */ 451 qed_periodic_db_rec_start(p_hwfn); 452 } 453 454 static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) 455 { 456 u32 int_sts, first_drop_reason, details, address, all_drops_reason; 457 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; 458 459 /* int_sts may be zero since all PFs were interrupted for doorbell 460 * overflow but another one already handled it. Can abort here. If 461 * This PF also requires overflow recovery we will be interrupted again. 462 * The masked almost full indication may also be set. Ignoring. 463 */ 464 int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 465 if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) 466 return 0; 467 468 DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); 469 470 /* check if db_drop or overflow happened */ 471 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 472 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 473 /* Obtain data about db drop/overflow */ 474 first_drop_reason = qed_rd(p_hwfn, p_ptt, 475 DORQ_REG_DB_DROP_REASON) & 476 QED_DORQ_ATTENTION_REASON_MASK; 477 details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS); 478 address = qed_rd(p_hwfn, p_ptt, 479 DORQ_REG_DB_DROP_DETAILS_ADDRESS); 480 all_drops_reason = qed_rd(p_hwfn, p_ptt, 481 DORQ_REG_DB_DROP_DETAILS_REASON); 482 483 /* Log info */ 484 DP_NOTICE(p_hwfn->cdev, 485 "Doorbell drop occurred\n" 486 "Address\t\t0x%08x\t(second BAR address)\n" 487 "FID\t\t0x%04x\t\t(Opaque FID)\n" 488 "Size\t\t0x%04x\t\t(in bytes)\n" 489 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 490 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", 491 address, 492 GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE), 493 GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, 494 first_drop_reason, all_drops_reason); 495 496 /* Clear the doorbell drop details and prepare for next drop */ 497 qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 498 499 /* Mark interrupt as handled (note: even if drop was due to a different 500 * reason than overflow we mark as handled) 501 */ 502 qed_wr(p_hwfn, 503 p_ptt, 504 DORQ_REG_INT_STS_WR, 505 DORQ_REG_INT_STS_DB_DROP | 506 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 507 508 /* If there are no indications other than drop indications, success */ 509 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 510 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 511 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 512 return 0; 513 } 514 515 /* Some other indication was present - non recoverable */ 516 DP_INFO(p_hwfn, "DORQ fatal attention\n"); 517 518 return -EINVAL; 519 } 520 521 static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) 522 { 523 p_hwfn->db_recovery_info.dorq_attn = true; 524 qed_dorq_attn_overflow(p_hwfn); 525 526 return qed_dorq_attn_int_sts(p_hwfn); 527 } 528 529 static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn) 530 { 531 if (p_hwfn->db_recovery_info.dorq_attn) 532 goto out; 533 534 /* Call DORQ callback if the attention was missed */ 535 qed_dorq_attn_cb(p_hwfn); 536 out: 537 p_hwfn->db_recovery_info.dorq_attn = false; 538 } 539 540 /* Instead of major changes to the data-structure, we have a some 'special' 541 * identifiers for sources that changed meaning between adapters. 542 */ 543 enum aeu_invert_reg_special_type { 544 AEU_INVERT_REG_SPECIAL_CNIG_0, 545 AEU_INVERT_REG_SPECIAL_CNIG_1, 546 AEU_INVERT_REG_SPECIAL_CNIG_2, 547 AEU_INVERT_REG_SPECIAL_CNIG_3, 548 AEU_INVERT_REG_SPECIAL_MAX, 549 }; 550 551 static struct aeu_invert_reg_bit 552 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 553 {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 554 {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 555 {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 556 {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 557 }; 558 559 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 560 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 561 { 562 { /* After Invert 1 */ 563 {"GPIO0 function%d", 564 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 565 } 566 }, 567 568 { 569 { /* After Invert 2 */ 570 {"PGLUE config_space", ATTENTION_SINGLE, 571 NULL, MAX_BLOCK_ID}, 572 {"PGLUE misc_flr", ATTENTION_SINGLE, 573 NULL, MAX_BLOCK_ID}, 574 {"PGLUE B RBC", ATTENTION_PAR_INT, 575 qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B}, 576 {"PGLUE misc_mctp", ATTENTION_SINGLE, 577 NULL, MAX_BLOCK_ID}, 578 {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 579 {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 580 {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 581 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | 582 (1 << ATTENTION_OFFSET_SHIFT), 583 NULL, MAX_BLOCK_ID}, 584 {"PCIE glue/PXP VPD %d", 585 (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS}, 586 } 587 }, 588 589 { 590 { /* After Invert 3 */ 591 {"General Attention %d", 592 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 593 } 594 }, 595 596 { 597 { /* After Invert 4 */ 598 {"General Attention 32", ATTENTION_SINGLE | 599 ATTENTION_CLEAR_ENABLE, qed_fw_assertion, 600 MAX_BLOCK_ID}, 601 {"General Attention %d", 602 (2 << ATTENTION_LENGTH_SHIFT) | 603 (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID}, 604 {"General Attention 35", ATTENTION_SINGLE | 605 ATTENTION_CLEAR_ENABLE, qed_general_attention_35, 606 MAX_BLOCK_ID}, 607 {"NWS Parity", 608 ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 609 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), 610 NULL, BLOCK_NWS}, 611 {"NWS Interrupt", 612 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 613 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), 614 NULL, BLOCK_NWS}, 615 {"NWM Parity", 616 ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 617 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), 618 NULL, BLOCK_NWM}, 619 {"NWM Interrupt", 620 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 621 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), 622 NULL, BLOCK_NWM}, 623 {"MCP CPU", ATTENTION_SINGLE, 624 qed_mcp_attn_cb, MAX_BLOCK_ID}, 625 {"MCP Watchdog timer", ATTENTION_SINGLE, 626 NULL, MAX_BLOCK_ID}, 627 {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 628 {"AVS stop status ready", ATTENTION_SINGLE, 629 NULL, MAX_BLOCK_ID}, 630 {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 631 {"MSTAT per-path", ATTENTION_PAR_INT, 632 NULL, MAX_BLOCK_ID}, 633 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), 634 NULL, MAX_BLOCK_ID}, 635 {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG}, 636 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB}, 637 {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB}, 638 {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB}, 639 {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS}, 640 } 641 }, 642 643 { 644 { /* After Invert 5 */ 645 {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC}, 646 {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1}, 647 {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2}, 648 {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB}, 649 {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF}, 650 {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM}, 651 {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM}, 652 {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM}, 653 {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM}, 654 {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM}, 655 {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM}, 656 {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM}, 657 {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM}, 658 {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM}, 659 {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM}, 660 {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM}, 661 } 662 }, 663 664 { 665 { /* After Invert 6 */ 666 {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM}, 667 {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM}, 668 {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM}, 669 {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM}, 670 {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM}, 671 {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM}, 672 {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM}, 673 {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM}, 674 {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM}, 675 {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD}, 676 {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD}, 677 {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD}, 678 {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD}, 679 {"DORQ", ATTENTION_PAR_INT, 680 qed_dorq_attn_cb, BLOCK_DORQ}, 681 {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG}, 682 {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC}, 683 } 684 }, 685 686 { 687 { /* After Invert 7 */ 688 {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC}, 689 {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU}, 690 {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE}, 691 {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU}, 692 {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 693 {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU}, 694 {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU}, 695 {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM}, 696 {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC}, 697 {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF}, 698 {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF}, 699 {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS}, 700 {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC}, 701 {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS}, 702 {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE}, 703 {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS}, 704 {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ}, 705 } 706 }, 707 708 { 709 { /* After Invert 8 */ 710 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, 711 NULL, BLOCK_PSWRQ2}, 712 {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR}, 713 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, 714 NULL, BLOCK_PSWWR2}, 715 {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD}, 716 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, 717 NULL, BLOCK_PSWRD2}, 718 {"PSWHST", ATTENTION_PAR_INT, 719 qed_pswhst_attn_cb, BLOCK_PSWHST}, 720 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, 721 NULL, BLOCK_PSWHST2}, 722 {"GRC", ATTENTION_PAR_INT, 723 qed_grc_attn_cb, BLOCK_GRC}, 724 {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU}, 725 {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI}, 726 {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 727 {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 728 {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 729 {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 730 {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 731 {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 732 {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS}, 733 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, 734 NULL, BLOCK_PGLCS}, 735 {"PERST_B assertion", ATTENTION_SINGLE, 736 NULL, MAX_BLOCK_ID}, 737 {"PERST_B deassertion", ATTENTION_SINGLE, 738 NULL, MAX_BLOCK_ID}, 739 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), 740 NULL, MAX_BLOCK_ID}, 741 } 742 }, 743 744 { 745 { /* After Invert 9 */ 746 {"MCP Latched memory", ATTENTION_PAR, 747 NULL, MAX_BLOCK_ID}, 748 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, 749 NULL, MAX_BLOCK_ID}, 750 {"MCP Latched ump_tx", ATTENTION_PAR, 751 NULL, MAX_BLOCK_ID}, 752 {"MCP Latched scratchpad", ATTENTION_PAR, 753 NULL, MAX_BLOCK_ID}, 754 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), 755 NULL, MAX_BLOCK_ID}, 756 } 757 }, 758 }; 759 760 static struct aeu_invert_reg_bit * 761 qed_int_aeu_translate(struct qed_hwfn *p_hwfn, 762 struct aeu_invert_reg_bit *p_bit) 763 { 764 if (!QED_IS_BB(p_hwfn->cdev)) 765 return p_bit; 766 767 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 768 return p_bit; 769 770 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 771 ATTENTION_BB_SHIFT]; 772 } 773 774 static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn, 775 struct aeu_invert_reg_bit *p_bit) 776 { 777 return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags & 778 ATTENTION_PARITY); 779 } 780 781 #define ATTN_STATE_BITS (0xfff) 782 #define ATTN_BITS_MASKABLE (0x3ff) 783 struct qed_sb_attn_info { 784 /* Virtual & Physical address of the SB */ 785 struct atten_status_block *sb_attn; 786 dma_addr_t sb_phys; 787 788 /* Last seen running index */ 789 u16 index; 790 791 /* A mask of the AEU bits resulting in a parity error */ 792 u32 parity_mask[NUM_ATTN_REGS]; 793 794 /* A pointer to the attention description structure */ 795 struct aeu_invert_reg *p_aeu_desc; 796 797 /* Previously asserted attentions, which are still unasserted */ 798 u16 known_attn; 799 800 /* Cleanup address for the link's general hw attention */ 801 u32 mfw_attn_addr; 802 }; 803 804 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, 805 struct qed_sb_attn_info *p_sb_desc) 806 { 807 u16 rc = 0, index; 808 809 index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); 810 if (p_sb_desc->index != index) { 811 p_sb_desc->index = index; 812 rc = QED_SB_ATT_IDX; 813 } 814 815 return rc; 816 } 817 818 /** 819 * qed_int_assertion() - Handle asserted attention bits. 820 * 821 * @p_hwfn: HW device data. 822 * @asserted_bits: Newly asserted bits. 823 * 824 * Return: Zero value. 825 */ 826 static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits) 827 { 828 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 829 u32 igu_mask; 830 831 /* Mask the source of the attention in the IGU */ 832 igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); 833 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 834 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 835 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 836 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 837 838 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 839 "inner known ATTN state: 0x%04x --> 0x%04x\n", 840 sb_attn_sw->known_attn, 841 sb_attn_sw->known_attn | asserted_bits); 842 sb_attn_sw->known_attn |= asserted_bits; 843 844 /* Handle MCP events */ 845 if (asserted_bits & 0x100) { 846 qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 847 /* Clean the MCP attention */ 848 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 849 sb_attn_sw->mfw_attn_addr, 0); 850 } 851 852 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 853 GTT_BAR0_MAP_REG_IGU_CMD + 854 ((IGU_CMD_ATTN_BIT_SET_UPPER - 855 IGU_CMD_INT_ACK_BASE) << 3), 856 (u32)asserted_bits); 857 858 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", 859 asserted_bits); 860 861 return 0; 862 } 863 864 static void qed_int_attn_print(struct qed_hwfn *p_hwfn, 865 enum block_id id, 866 enum dbg_attn_type type, bool b_clear) 867 { 868 struct dbg_attn_block_result attn_results; 869 enum dbg_status status; 870 871 memset(&attn_results, 0, sizeof(attn_results)); 872 873 status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type, 874 b_clear, &attn_results); 875 if (status != DBG_STATUS_OK) 876 DP_NOTICE(p_hwfn, 877 "Failed to parse attention information [status: %s]\n", 878 qed_dbg_get_status_str(status)); 879 else 880 qed_dbg_parse_attn(p_hwfn, &attn_results); 881 } 882 883 /** 884 * qed_int_deassertion_aeu_bit() - Handles the effects of a single 885 * cause of the attention. 886 * 887 * @p_hwfn: HW device data. 888 * @p_aeu: Descriptor of an AEU bit which caused the attention. 889 * @aeu_en_reg: Register offset of the AEU enable reg. which configured 890 * this bit to this group. 891 * @p_bit_name: AEU bit description for logging purposes. 892 * @bitmask: Index of this bit in the aeu_en_reg. 893 * 894 * Return: Zero on success, negative errno otherwise. 895 */ 896 static int 897 qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, 898 struct aeu_invert_reg_bit *p_aeu, 899 u32 aeu_en_reg, 900 const char *p_bit_name, u32 bitmask) 901 { 902 bool b_fatal = false; 903 int rc = -EINVAL; 904 u32 val; 905 906 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 907 p_bit_name, bitmask); 908 909 /* Call callback before clearing the interrupt status */ 910 if (p_aeu->cb) { 911 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 912 p_bit_name); 913 rc = p_aeu->cb(p_hwfn); 914 } 915 916 if (rc) 917 b_fatal = true; 918 919 /* Print HW block interrupt registers */ 920 if (p_aeu->block_index != MAX_BLOCK_ID) 921 qed_int_attn_print(p_hwfn, p_aeu->block_index, 922 ATTN_TYPE_INTERRUPT, !b_fatal); 923 924 /* Reach assertion if attention is fatal */ 925 if (b_fatal) 926 qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_HW_ATTN, 927 "`%s': Fatal attention\n", 928 p_bit_name); 929 else /* If the attention is benign, no need to prevent it */ 930 goto out; 931 932 /* Prevent this Attention from being asserted in the future */ 933 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 934 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask)); 935 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", 936 p_bit_name); 937 938 out: 939 return rc; 940 } 941 942 /** 943 * qed_int_deassertion_parity() - Handle a single parity AEU source. 944 * 945 * @p_hwfn: HW device data. 946 * @p_aeu: Descriptor of an AEU bit which caused the parity. 947 * @aeu_en_reg: Address of the AEU enable register. 948 * @bit_index: Index (0-31) of an AEU bit. 949 */ 950 static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, 951 struct aeu_invert_reg_bit *p_aeu, 952 u32 aeu_en_reg, u8 bit_index) 953 { 954 u32 block_id = p_aeu->block_index, mask, val; 955 956 DP_NOTICE(p_hwfn->cdev, 957 "%s parity attention is set [address 0x%08x, bit %d]\n", 958 p_aeu->bit_name, aeu_en_reg, bit_index); 959 960 if (block_id != MAX_BLOCK_ID) { 961 qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 962 963 /* In BB, there's a single parity bit for several blocks */ 964 if (block_id == BLOCK_BTB) { 965 qed_int_attn_print(p_hwfn, BLOCK_OPTE, 966 ATTN_TYPE_PARITY, false); 967 qed_int_attn_print(p_hwfn, BLOCK_MCP, 968 ATTN_TYPE_PARITY, false); 969 } 970 } 971 972 /* Prevent this parity error from being re-asserted */ 973 mask = ~BIT(bit_index); 974 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 975 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 976 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 977 p_aeu->bit_name); 978 } 979 980 /** 981 * qed_int_deassertion() - Handle deassertion of previously asserted 982 * attentions. 983 * 984 * @p_hwfn: HW device data. 985 * @deasserted_bits: newly deasserted bits. 986 * 987 * Return: Zero value. 988 */ 989 static int qed_int_deassertion(struct qed_hwfn *p_hwfn, 990 u16 deasserted_bits) 991 { 992 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 993 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 994 u8 i, j, k, bit_idx; 995 int rc = 0; 996 997 /* Read the attention registers in the AEU */ 998 for (i = 0; i < NUM_ATTN_REGS; i++) { 999 aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1000 MISC_REG_AEU_AFTER_INVERT_1_IGU + 1001 i * 0x4); 1002 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1003 "Deasserted bits [%d]: %08x\n", 1004 i, aeu_inv_arr[i]); 1005 } 1006 1007 /* Find parity attentions first */ 1008 for (i = 0; i < NUM_ATTN_REGS; i++) { 1009 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 1010 u32 parities; 1011 1012 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); 1013 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1014 1015 /* Skip register in which no parity bit is currently set */ 1016 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 1017 if (!parities) 1018 continue; 1019 1020 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1021 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 1022 1023 if (qed_int_is_parity_flag(p_hwfn, p_bit) && 1024 !!(parities & BIT(bit_idx))) 1025 qed_int_deassertion_parity(p_hwfn, p_bit, 1026 aeu_en, bit_idx); 1027 1028 bit_idx += ATTENTION_LENGTH(p_bit->flags); 1029 } 1030 } 1031 1032 /* Find non-parity cause for attention and act */ 1033 for (k = 0; k < MAX_ATTN_GRPS; k++) { 1034 struct aeu_invert_reg_bit *p_aeu; 1035 1036 /* Handle only groups whose attention is currently deasserted */ 1037 if (!(deasserted_bits & (1 << k))) 1038 continue; 1039 1040 for (i = 0; i < NUM_ATTN_REGS; i++) { 1041 u32 bits; 1042 1043 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 1044 i * sizeof(u32) + 1045 k * sizeof(u32) * NUM_ATTN_REGS; 1046 1047 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1048 bits = aeu_inv_arr[i] & en; 1049 1050 /* Skip if no bit from this group is currently set */ 1051 if (!bits) 1052 continue; 1053 1054 /* Find all set bits from current register which belong 1055 * to current group, making them responsible for the 1056 * previous assertion. 1057 */ 1058 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1059 long unsigned int bitmask; 1060 u8 bit, bit_len; 1061 1062 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1063 p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu); 1064 1065 bit = bit_idx; 1066 bit_len = ATTENTION_LENGTH(p_aeu->flags); 1067 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) { 1068 /* Skip Parity */ 1069 bit++; 1070 bit_len--; 1071 } 1072 1073 bitmask = bits & (((1 << bit_len) - 1) << bit); 1074 bitmask >>= bit; 1075 1076 if (bitmask) { 1077 u32 flags = p_aeu->flags; 1078 char bit_name[30]; 1079 u8 num; 1080 1081 num = (u8)find_first_bit(&bitmask, 1082 bit_len); 1083 1084 /* Some bits represent more than a 1085 * a single interrupt. Correctly print 1086 * their name. 1087 */ 1088 if (ATTENTION_LENGTH(flags) > 2 || 1089 ((flags & ATTENTION_PAR_INT) && 1090 ATTENTION_LENGTH(flags) > 1)) 1091 snprintf(bit_name, 30, 1092 p_aeu->bit_name, num); 1093 else 1094 strlcpy(bit_name, 1095 p_aeu->bit_name, 30); 1096 1097 /* We now need to pass bitmask in its 1098 * correct position. 1099 */ 1100 bitmask <<= bit; 1101 1102 /* Handle source of the attention */ 1103 qed_int_deassertion_aeu_bit(p_hwfn, 1104 p_aeu, 1105 aeu_en, 1106 bit_name, 1107 bitmask); 1108 } 1109 1110 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1111 } 1112 } 1113 } 1114 1115 /* Handle missed DORQ attention */ 1116 qed_dorq_attn_handler(p_hwfn); 1117 1118 /* Clear IGU indication for the deasserted bits */ 1119 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 1120 GTT_BAR0_MAP_REG_IGU_CMD + 1121 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1122 IGU_CMD_INT_ACK_BASE) << 3), 1123 ~((u32)deasserted_bits)); 1124 1125 /* Unmask deasserted attentions in IGU */ 1126 aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); 1127 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1128 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1129 1130 /* Clear deassertion from inner state */ 1131 sb_attn_sw->known_attn &= ~deasserted_bits; 1132 1133 return rc; 1134 } 1135 1136 static int qed_int_attentions(struct qed_hwfn *p_hwfn) 1137 { 1138 struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1139 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1140 u32 attn_bits = 0, attn_acks = 0; 1141 u16 asserted_bits, deasserted_bits; 1142 __le16 index; 1143 int rc = 0; 1144 1145 /* Read current attention bits/acks - safeguard against attentions 1146 * by guaranting work on a synchronized timeframe 1147 */ 1148 do { 1149 index = p_sb_attn->sb_index; 1150 /* finish reading index before the loop condition */ 1151 dma_rmb(); 1152 attn_bits = le32_to_cpu(p_sb_attn->atten_bits); 1153 attn_acks = le32_to_cpu(p_sb_attn->atten_ack); 1154 } while (index != p_sb_attn->sb_index); 1155 p_sb_attn->sb_index = index; 1156 1157 /* Attention / Deassertion are meaningful (and in correct state) 1158 * only when they differ and consistent with known state - deassertion 1159 * when previous attention & current ack, and assertion when current 1160 * attention with no previous attention 1161 */ 1162 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1163 ~p_sb_attn_sw->known_attn; 1164 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1165 p_sb_attn_sw->known_attn; 1166 1167 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) { 1168 DP_INFO(p_hwfn, 1169 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1170 index, attn_bits, attn_acks, asserted_bits, 1171 deasserted_bits, p_sb_attn_sw->known_attn); 1172 } else if (asserted_bits == 0x100) { 1173 DP_INFO(p_hwfn, "MFW indication via attention\n"); 1174 } else { 1175 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1176 "MFW indication [deassertion]\n"); 1177 } 1178 1179 if (asserted_bits) { 1180 rc = qed_int_assertion(p_hwfn, asserted_bits); 1181 if (rc) 1182 return rc; 1183 } 1184 1185 if (deasserted_bits) 1186 rc = qed_int_deassertion(p_hwfn, deasserted_bits); 1187 1188 return rc; 1189 } 1190 1191 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, 1192 void __iomem *igu_addr, u32 ack_cons) 1193 { 1194 u32 igu_ack; 1195 1196 igu_ack = ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1197 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1198 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1199 (IGU_SEG_ACCESS_ATTN << 1200 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1201 1202 DIRECT_REG_WR(igu_addr, igu_ack); 1203 1204 /* Both segments (interrupts & acks) are written to same place address; 1205 * Need to guarantee all commands will be received (in-order) by HW. 1206 */ 1207 barrier(); 1208 } 1209 1210 void qed_int_sp_dpc(unsigned long hwfn_cookie) 1211 { 1212 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie; 1213 struct qed_pi_info *pi_info = NULL; 1214 struct qed_sb_attn_info *sb_attn; 1215 struct qed_sb_info *sb_info; 1216 int arr_size; 1217 u16 rc = 0; 1218 1219 if (!p_hwfn->p_sp_sb) { 1220 DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); 1221 return; 1222 } 1223 1224 sb_info = &p_hwfn->p_sp_sb->sb_info; 1225 arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1226 if (!sb_info) { 1227 DP_ERR(p_hwfn->cdev, 1228 "Status block is NULL - cannot ack interrupts\n"); 1229 return; 1230 } 1231 1232 if (!p_hwfn->p_sb_attn) { 1233 DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); 1234 return; 1235 } 1236 sb_attn = p_hwfn->p_sb_attn; 1237 1238 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1239 p_hwfn, p_hwfn->my_id); 1240 1241 /* Disable ack for def status block. Required both for msix + 1242 * inta in non-mask mode, in inta does no harm. 1243 */ 1244 qed_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1245 1246 /* Gather Interrupts/Attentions information */ 1247 if (!sb_info->sb_virt) { 1248 DP_ERR(p_hwfn->cdev, 1249 "Interrupt Status block is NULL - cannot check for new interrupts!\n"); 1250 } else { 1251 u32 tmp_index = sb_info->sb_ack; 1252 1253 rc = qed_sb_update_sb_idx(sb_info); 1254 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 1255 "Interrupt indices: 0x%08x --> 0x%08x\n", 1256 tmp_index, sb_info->sb_ack); 1257 } 1258 1259 if (!sb_attn || !sb_attn->sb_attn) { 1260 DP_ERR(p_hwfn->cdev, 1261 "Attentions Status block is NULL - cannot check for new attentions!\n"); 1262 } else { 1263 u16 tmp_index = sb_attn->index; 1264 1265 rc |= qed_attn_update_idx(p_hwfn, sb_attn); 1266 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 1267 "Attention indices: 0x%08x --> 0x%08x\n", 1268 tmp_index, sb_attn->index); 1269 } 1270 1271 /* Check if we expect interrupts at this time. if not just ack them */ 1272 if (!(rc & QED_SB_EVENT_MASK)) { 1273 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1274 return; 1275 } 1276 1277 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1278 if (!p_hwfn->p_dpc_ptt) { 1279 DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); 1280 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1281 return; 1282 } 1283 1284 if (rc & QED_SB_ATT_IDX) 1285 qed_int_attentions(p_hwfn); 1286 1287 if (rc & QED_SB_IDX) { 1288 int pi; 1289 1290 /* Look for a free index */ 1291 for (pi = 0; pi < arr_size; pi++) { 1292 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1293 if (pi_info->comp_cb) 1294 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1295 } 1296 } 1297 1298 if (sb_attn && (rc & QED_SB_ATT_IDX)) 1299 /* This should be done before the interrupts are enabled, 1300 * since otherwise a new attention will be generated. 1301 */ 1302 qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1303 1304 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1305 } 1306 1307 static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) 1308 { 1309 struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1310 1311 if (!p_sb) 1312 return; 1313 1314 if (p_sb->sb_attn) 1315 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1316 SB_ATTN_ALIGNED_SIZE(p_hwfn), 1317 p_sb->sb_attn, p_sb->sb_phys); 1318 kfree(p_sb); 1319 p_hwfn->p_sb_attn = NULL; 1320 } 1321 1322 static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, 1323 struct qed_ptt *p_ptt) 1324 { 1325 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1326 1327 memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1328 1329 sb_info->index = 0; 1330 sb_info->known_attn = 0; 1331 1332 /* Configure Attention Status Block in IGU */ 1333 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1334 lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); 1335 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1336 upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); 1337 } 1338 1339 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, 1340 struct qed_ptt *p_ptt, 1341 void *sb_virt_addr, dma_addr_t sb_phy_addr) 1342 { 1343 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1344 int i, j, k; 1345 1346 sb_info->sb_attn = sb_virt_addr; 1347 sb_info->sb_phys = sb_phy_addr; 1348 1349 /* Set the pointer to the AEU descriptors */ 1350 sb_info->p_aeu_desc = aeu_descs; 1351 1352 /* Calculate Parity Masks */ 1353 memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1354 for (i = 0; i < NUM_ATTN_REGS; i++) { 1355 /* j is array index, k is bit index */ 1356 for (j = 0, k = 0; k < 32; j++) { 1357 struct aeu_invert_reg_bit *p_aeu; 1358 1359 p_aeu = &aeu_descs[i].bits[j]; 1360 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) 1361 sb_info->parity_mask[i] |= 1 << k; 1362 1363 k += ATTENTION_LENGTH(p_aeu->flags); 1364 } 1365 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1366 "Attn Mask [Reg %d]: 0x%08x\n", 1367 i, sb_info->parity_mask[i]); 1368 } 1369 1370 /* Set the address of cleanup for the mcp attention */ 1371 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1372 MISC_REG_AEU_GENERAL_ATTN_0; 1373 1374 qed_int_sb_attn_setup(p_hwfn, p_ptt); 1375 } 1376 1377 static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, 1378 struct qed_ptt *p_ptt) 1379 { 1380 struct qed_dev *cdev = p_hwfn->cdev; 1381 struct qed_sb_attn_info *p_sb; 1382 dma_addr_t p_phys = 0; 1383 void *p_virt; 1384 1385 /* SB struct */ 1386 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 1387 if (!p_sb) 1388 return -ENOMEM; 1389 1390 /* SB ring */ 1391 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 1392 SB_ATTN_ALIGNED_SIZE(p_hwfn), 1393 &p_phys, GFP_KERNEL); 1394 1395 if (!p_virt) { 1396 kfree(p_sb); 1397 return -ENOMEM; 1398 } 1399 1400 /* Attention setup */ 1401 p_hwfn->p_sb_attn = p_sb; 1402 qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1403 1404 return 0; 1405 } 1406 1407 /* coalescing timeout = timeset << (timer_res + 1) */ 1408 #define QED_CAU_DEF_RX_USECS 24 1409 #define QED_CAU_DEF_TX_USECS 48 1410 1411 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, 1412 struct cau_sb_entry *p_sb_entry, 1413 u8 pf_id, u16 vf_number, u8 vf_valid) 1414 { 1415 struct qed_dev *cdev = p_hwfn->cdev; 1416 u32 cau_state, params = 0, data = 0; 1417 u8 timer_res; 1418 1419 memset(p_sb_entry, 0, sizeof(*p_sb_entry)); 1420 1421 SET_FIELD(params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1422 SET_FIELD(params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1423 SET_FIELD(params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1424 SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1425 SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1426 1427 cau_state = CAU_HC_DISABLE_STATE; 1428 1429 if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 1430 cau_state = CAU_HC_ENABLE_STATE; 1431 if (!cdev->rx_coalesce_usecs) 1432 cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS; 1433 if (!cdev->tx_coalesce_usecs) 1434 cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; 1435 } 1436 1437 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1438 if (cdev->rx_coalesce_usecs <= 0x7F) 1439 timer_res = 0; 1440 else if (cdev->rx_coalesce_usecs <= 0xFF) 1441 timer_res = 1; 1442 else 1443 timer_res = 2; 1444 1445 SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1446 1447 if (cdev->tx_coalesce_usecs <= 0x7F) 1448 timer_res = 0; 1449 else if (cdev->tx_coalesce_usecs <= 0xFF) 1450 timer_res = 1; 1451 else 1452 timer_res = 2; 1453 1454 SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1455 p_sb_entry->params = cpu_to_le32(params); 1456 1457 SET_FIELD(data, CAU_SB_ENTRY_STATE0, cau_state); 1458 SET_FIELD(data, CAU_SB_ENTRY_STATE1, cau_state); 1459 p_sb_entry->data = cpu_to_le32(data); 1460 } 1461 1462 static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, 1463 struct qed_ptt *p_ptt, 1464 u16 igu_sb_id, 1465 u32 pi_index, 1466 enum qed_coalescing_fsm coalescing_fsm, 1467 u8 timeset) 1468 { 1469 u32 sb_offset, pi_offset; 1470 u32 prod = 0; 1471 1472 if (IS_VF(p_hwfn->cdev)) 1473 return; 1474 1475 SET_FIELD(prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1476 if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) 1477 SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 0); 1478 else 1479 SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1); 1480 1481 sb_offset = igu_sb_id * PIS_PER_SB_E4; 1482 pi_offset = sb_offset + pi_index; 1483 1484 if (p_hwfn->hw_init_done) 1485 qed_wr(p_hwfn, p_ptt, 1486 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), prod); 1487 else 1488 STORE_RT_REG(p_hwfn, CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1489 prod); 1490 } 1491 1492 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, 1493 struct qed_ptt *p_ptt, 1494 dma_addr_t sb_phys, 1495 u16 igu_sb_id, u16 vf_number, u8 vf_valid) 1496 { 1497 struct cau_sb_entry sb_entry; 1498 1499 qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1500 vf_number, vf_valid); 1501 1502 if (p_hwfn->hw_init_done) { 1503 /* Wide-bus, initialize via DMAE */ 1504 u64 phys_addr = (u64)sb_phys; 1505 1506 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr, 1507 CAU_REG_SB_ADDR_MEMORY + 1508 igu_sb_id * sizeof(u64), 2, NULL); 1509 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, 1510 CAU_REG_SB_VAR_MEMORY + 1511 igu_sb_id * sizeof(u64), 2, NULL); 1512 } else { 1513 /* Initialize Status Block Address */ 1514 STORE_RT_REG_AGG(p_hwfn, 1515 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 1516 igu_sb_id * 2, 1517 sb_phys); 1518 1519 STORE_RT_REG_AGG(p_hwfn, 1520 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 1521 igu_sb_id * 2, 1522 sb_entry); 1523 } 1524 1525 /* Configure pi coalescing if set */ 1526 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 1527 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1528 u8 timeset, timer_res; 1529 u8 i; 1530 1531 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1532 if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F) 1533 timer_res = 0; 1534 else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF) 1535 timer_res = 1; 1536 else 1537 timer_res = 2; 1538 timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res); 1539 qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1540 QED_COAL_RX_STATE_MACHINE, timeset); 1541 1542 if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F) 1543 timer_res = 0; 1544 else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF) 1545 timer_res = 1; 1546 else 1547 timer_res = 2; 1548 timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res); 1549 for (i = 0; i < num_tc; i++) { 1550 qed_int_cau_conf_pi(p_hwfn, p_ptt, 1551 igu_sb_id, TX_PI(i), 1552 QED_COAL_TX_STATE_MACHINE, 1553 timeset); 1554 } 1555 } 1556 } 1557 1558 void qed_int_sb_setup(struct qed_hwfn *p_hwfn, 1559 struct qed_ptt *p_ptt, struct qed_sb_info *sb_info) 1560 { 1561 /* zero status block and ack counter */ 1562 sb_info->sb_ack = 0; 1563 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1564 1565 if (IS_PF(p_hwfn->cdev)) 1566 qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1567 sb_info->igu_sb_id, 0, 0); 1568 } 1569 1570 struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf) 1571 { 1572 struct qed_igu_block *p_block; 1573 u16 igu_id; 1574 1575 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 1576 igu_id++) { 1577 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1578 1579 if (!(p_block->status & QED_IGU_STATUS_VALID) || 1580 !(p_block->status & QED_IGU_STATUS_FREE)) 1581 continue; 1582 1583 if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf) 1584 return p_block; 1585 } 1586 1587 return NULL; 1588 } 1589 1590 static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id) 1591 { 1592 struct qed_igu_block *p_block; 1593 u16 igu_id; 1594 1595 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 1596 igu_id++) { 1597 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1598 1599 if (!(p_block->status & QED_IGU_STATUS_VALID) || 1600 !p_block->is_pf || 1601 p_block->vector_number != vector_id) 1602 continue; 1603 1604 return igu_id; 1605 } 1606 1607 return QED_SB_INVALID_IDX; 1608 } 1609 1610 u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 1611 { 1612 u16 igu_sb_id; 1613 1614 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1615 if (sb_id == QED_SP_SB_ID) 1616 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1617 else if (IS_PF(p_hwfn->cdev)) 1618 igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1619 else 1620 igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); 1621 1622 if (sb_id == QED_SP_SB_ID) 1623 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1624 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1625 else 1626 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1627 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1628 1629 return igu_sb_id; 1630 } 1631 1632 int qed_int_sb_init(struct qed_hwfn *p_hwfn, 1633 struct qed_ptt *p_ptt, 1634 struct qed_sb_info *sb_info, 1635 void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id) 1636 { 1637 sb_info->sb_virt = sb_virt_addr; 1638 sb_info->sb_phys = sb_phy_addr; 1639 1640 sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); 1641 1642 if (sb_id != QED_SP_SB_ID) { 1643 if (IS_PF(p_hwfn->cdev)) { 1644 struct qed_igu_info *p_info; 1645 struct qed_igu_block *p_block; 1646 1647 p_info = p_hwfn->hw_info.p_igu_info; 1648 p_block = &p_info->entry[sb_info->igu_sb_id]; 1649 1650 p_block->sb_info = sb_info; 1651 p_block->status &= ~QED_IGU_STATUS_FREE; 1652 p_info->usage.free_cnt--; 1653 } else { 1654 qed_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1655 } 1656 } 1657 1658 sb_info->cdev = p_hwfn->cdev; 1659 1660 /* The igu address will hold the absolute address that needs to be 1661 * written to for a specific status block 1662 */ 1663 if (IS_PF(p_hwfn->cdev)) { 1664 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 1665 GTT_BAR0_MAP_REG_IGU_CMD + 1666 (sb_info->igu_sb_id << 3); 1667 } else { 1668 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 1669 PXP_VF_BAR0_START_IGU + 1670 ((IGU_CMD_INT_ACK_BASE + 1671 sb_info->igu_sb_id) << 3); 1672 } 1673 1674 sb_info->flags |= QED_SB_INFO_INIT; 1675 1676 qed_int_sb_setup(p_hwfn, p_ptt, sb_info); 1677 1678 return 0; 1679 } 1680 1681 int qed_int_sb_release(struct qed_hwfn *p_hwfn, 1682 struct qed_sb_info *sb_info, u16 sb_id) 1683 { 1684 struct qed_igu_block *p_block; 1685 struct qed_igu_info *p_info; 1686 1687 if (!sb_info) 1688 return 0; 1689 1690 /* zero status block and ack counter */ 1691 sb_info->sb_ack = 0; 1692 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1693 1694 if (IS_VF(p_hwfn->cdev)) { 1695 qed_vf_set_sb_info(p_hwfn, sb_id, NULL); 1696 return 0; 1697 } 1698 1699 p_info = p_hwfn->hw_info.p_igu_info; 1700 p_block = &p_info->entry[sb_info->igu_sb_id]; 1701 1702 /* Vector 0 is reserved to Default SB */ 1703 if (!p_block->vector_number) { 1704 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1705 return -EINVAL; 1706 } 1707 1708 /* Lose reference to client's SB info, and fix counters */ 1709 p_block->sb_info = NULL; 1710 p_block->status |= QED_IGU_STATUS_FREE; 1711 p_info->usage.free_cnt++; 1712 1713 return 0; 1714 } 1715 1716 static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) 1717 { 1718 struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1719 1720 if (!p_sb) 1721 return; 1722 1723 if (p_sb->sb_info.sb_virt) 1724 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1725 SB_ALIGNED_SIZE(p_hwfn), 1726 p_sb->sb_info.sb_virt, 1727 p_sb->sb_info.sb_phys); 1728 kfree(p_sb); 1729 p_hwfn->p_sp_sb = NULL; 1730 } 1731 1732 static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1733 { 1734 struct qed_sb_sp_info *p_sb; 1735 dma_addr_t p_phys = 0; 1736 void *p_virt; 1737 1738 /* SB struct */ 1739 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 1740 if (!p_sb) 1741 return -ENOMEM; 1742 1743 /* SB ring */ 1744 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1745 SB_ALIGNED_SIZE(p_hwfn), 1746 &p_phys, GFP_KERNEL); 1747 if (!p_virt) { 1748 kfree(p_sb); 1749 return -ENOMEM; 1750 } 1751 1752 /* Status Block setup */ 1753 p_hwfn->p_sp_sb = p_sb; 1754 qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt, 1755 p_phys, QED_SP_SB_ID); 1756 1757 memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1758 1759 return 0; 1760 } 1761 1762 int qed_int_register_cb(struct qed_hwfn *p_hwfn, 1763 qed_int_comp_cb_t comp_cb, 1764 void *cookie, u8 *sb_idx, __le16 **p_fw_cons) 1765 { 1766 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1767 int rc = -ENOMEM; 1768 u8 pi; 1769 1770 /* Look for a free index */ 1771 for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1772 if (p_sp_sb->pi_info_arr[pi].comp_cb) 1773 continue; 1774 1775 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1776 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1777 *sb_idx = pi; 1778 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1779 rc = 0; 1780 break; 1781 } 1782 1783 return rc; 1784 } 1785 1786 int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) 1787 { 1788 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1789 1790 if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL) 1791 return -ENOMEM; 1792 1793 p_sp_sb->pi_info_arr[pi].comp_cb = NULL; 1794 p_sp_sb->pi_info_arr[pi].cookie = NULL; 1795 1796 return 0; 1797 } 1798 1799 u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) 1800 { 1801 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1802 } 1803 1804 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, 1805 struct qed_ptt *p_ptt, enum qed_int_mode int_mode) 1806 { 1807 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1808 1809 p_hwfn->cdev->int_mode = int_mode; 1810 switch (p_hwfn->cdev->int_mode) { 1811 case QED_INT_MODE_INTA: 1812 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1813 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1814 break; 1815 1816 case QED_INT_MODE_MSI: 1817 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1818 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1819 break; 1820 1821 case QED_INT_MODE_MSIX: 1822 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1823 break; 1824 case QED_INT_MODE_POLL: 1825 break; 1826 } 1827 1828 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1829 } 1830 1831 static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn, 1832 struct qed_ptt *p_ptt) 1833 { 1834 1835 /* Configure AEU signal change to produce attentions */ 1836 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1837 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1838 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1839 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1840 1841 /* Unmask AEU signals toward IGU */ 1842 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1843 } 1844 1845 int 1846 qed_int_igu_enable(struct qed_hwfn *p_hwfn, 1847 struct qed_ptt *p_ptt, enum qed_int_mode int_mode) 1848 { 1849 int rc = 0; 1850 1851 qed_int_igu_enable_attn(p_hwfn, p_ptt); 1852 1853 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1854 rc = qed_slowpath_irq_req(p_hwfn); 1855 if (rc) { 1856 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); 1857 return -EINVAL; 1858 } 1859 p_hwfn->b_int_requested = true; 1860 } 1861 /* Enable interrupt Generation */ 1862 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1863 p_hwfn->b_int_enabled = 1; 1864 1865 return rc; 1866 } 1867 1868 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1869 { 1870 p_hwfn->b_int_enabled = 0; 1871 1872 if (IS_VF(p_hwfn->cdev)) 1873 return; 1874 1875 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1876 } 1877 1878 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1879 static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, 1880 struct qed_ptt *p_ptt, 1881 u16 igu_sb_id, 1882 bool cleanup_set, u16 opaque_fid) 1883 { 1884 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1885 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 1886 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1887 1888 /* Set the data field */ 1889 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1890 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0); 1891 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1892 1893 /* Set the control register */ 1894 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1895 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1896 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 1897 1898 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 1899 1900 barrier(); 1901 1902 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 1903 1904 /* calculate where to read the status bit from */ 1905 sb_bit = 1 << (igu_sb_id % 32); 1906 sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 1907 1908 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; 1909 1910 /* Now wait for the command to complete */ 1911 do { 1912 val = qed_rd(p_hwfn, p_ptt, sb_bit_addr); 1913 1914 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 1915 break; 1916 1917 usleep_range(5000, 10000); 1918 } while (--sleep_cnt); 1919 1920 if (!sleep_cnt) 1921 DP_NOTICE(p_hwfn, 1922 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 1923 val, igu_sb_id); 1924 } 1925 1926 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, 1927 struct qed_ptt *p_ptt, 1928 u16 igu_sb_id, u16 opaque, bool b_set) 1929 { 1930 struct qed_igu_block *p_block; 1931 int pi, i; 1932 1933 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 1934 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1935 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 1936 igu_sb_id, 1937 p_block->function_id, 1938 p_block->is_pf, p_block->vector_number); 1939 1940 /* Set */ 1941 if (b_set) 1942 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 1943 1944 /* Clear */ 1945 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 1946 1947 /* Wait for the IGU SB to cleanup */ 1948 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 1949 u32 val; 1950 1951 val = qed_rd(p_hwfn, p_ptt, 1952 IGU_REG_WRITE_DONE_PENDING + 1953 ((igu_sb_id / 32) * 4)); 1954 if (val & BIT((igu_sb_id % 32))) 1955 usleep_range(10, 20); 1956 else 1957 break; 1958 } 1959 if (i == IGU_CLEANUP_SLEEP_LENGTH) 1960 DP_NOTICE(p_hwfn, 1961 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 1962 igu_sb_id); 1963 1964 /* Clear the CAU for the SB */ 1965 for (pi = 0; pi < 12; pi++) 1966 qed_wr(p_hwfn, p_ptt, 1967 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); 1968 } 1969 1970 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, 1971 struct qed_ptt *p_ptt, 1972 bool b_set, bool b_slowpath) 1973 { 1974 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 1975 struct qed_igu_block *p_block; 1976 u16 igu_sb_id = 0; 1977 u32 val = 0; 1978 1979 val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 1980 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 1981 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 1982 qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 1983 1984 for (igu_sb_id = 0; 1985 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 1986 p_block = &p_info->entry[igu_sb_id]; 1987 1988 if (!(p_block->status & QED_IGU_STATUS_VALID) || 1989 !p_block->is_pf || 1990 (p_block->status & QED_IGU_STATUS_DSB)) 1991 continue; 1992 1993 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 1994 p_hwfn->hw_info.opaque_fid, 1995 b_set); 1996 } 1997 1998 if (b_slowpath) 1999 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2000 p_info->igu_dsb_id, 2001 p_hwfn->hw_info.opaque_fid, 2002 b_set); 2003 } 2004 2005 int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2006 { 2007 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2008 struct qed_igu_block *p_block; 2009 int pf_sbs, vf_sbs; 2010 u16 igu_sb_id; 2011 u32 val, rval; 2012 2013 if (!RESC_NUM(p_hwfn, QED_SB)) { 2014 p_info->b_allow_pf_vf_change = false; 2015 } else { 2016 /* Use the numbers the MFW have provided - 2017 * don't forget MFW accounts for the default SB as well. 2018 */ 2019 p_info->b_allow_pf_vf_change = true; 2020 2021 if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) { 2022 DP_INFO(p_hwfn, 2023 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 2024 RESC_NUM(p_hwfn, QED_SB) - 1, 2025 p_info->usage.cnt); 2026 p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1; 2027 } 2028 2029 if (IS_PF_SRIOV(p_hwfn)) { 2030 u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs; 2031 2032 if (vfs != p_info->usage.iov_cnt) 2033 DP_VERBOSE(p_hwfn, 2034 NETIF_MSG_INTR, 2035 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2036 p_info->usage.iov_cnt, vfs); 2037 2038 /* At this point we know how many SBs we have totally 2039 * in IGU + number of PF SBs. So we can validate that 2040 * we'd have sufficient for VF. 2041 */ 2042 if (vfs > p_info->usage.free_cnt + 2043 p_info->usage.free_cnt_iov - p_info->usage.cnt) { 2044 DP_NOTICE(p_hwfn, 2045 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2046 p_info->usage.free_cnt + 2047 p_info->usage.free_cnt_iov, 2048 p_info->usage.cnt, vfs); 2049 return -EINVAL; 2050 } 2051 2052 /* Currently cap the number of VFs SBs by the 2053 * number of VFs. 2054 */ 2055 p_info->usage.iov_cnt = vfs; 2056 } 2057 } 2058 2059 /* Mark all SBs as free, now in the right PF/VFs division */ 2060 p_info->usage.free_cnt = p_info->usage.cnt; 2061 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2062 p_info->usage.orig = p_info->usage.cnt; 2063 p_info->usage.iov_orig = p_info->usage.iov_cnt; 2064 2065 /* We now proceed to re-configure the IGU cam to reflect the initial 2066 * configuration. We can start with the Default SB. 2067 */ 2068 pf_sbs = p_info->usage.cnt; 2069 vf_sbs = p_info->usage.iov_cnt; 2070 2071 for (igu_sb_id = p_info->igu_dsb_id; 2072 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 2073 p_block = &p_info->entry[igu_sb_id]; 2074 val = 0; 2075 2076 if (!(p_block->status & QED_IGU_STATUS_VALID)) 2077 continue; 2078 2079 if (p_block->status & QED_IGU_STATUS_DSB) { 2080 p_block->function_id = p_hwfn->rel_pf_id; 2081 p_block->is_pf = 1; 2082 p_block->vector_number = 0; 2083 p_block->status = QED_IGU_STATUS_VALID | 2084 QED_IGU_STATUS_PF | 2085 QED_IGU_STATUS_DSB; 2086 } else if (pf_sbs) { 2087 pf_sbs--; 2088 p_block->function_id = p_hwfn->rel_pf_id; 2089 p_block->is_pf = 1; 2090 p_block->vector_number = p_info->usage.cnt - pf_sbs; 2091 p_block->status = QED_IGU_STATUS_VALID | 2092 QED_IGU_STATUS_PF | 2093 QED_IGU_STATUS_FREE; 2094 } else if (vf_sbs) { 2095 p_block->function_id = 2096 p_hwfn->cdev->p_iov_info->first_vf_in_pf + 2097 p_info->usage.iov_cnt - vf_sbs; 2098 p_block->is_pf = 0; 2099 p_block->vector_number = 0; 2100 p_block->status = QED_IGU_STATUS_VALID | 2101 QED_IGU_STATUS_FREE; 2102 vf_sbs--; 2103 } else { 2104 p_block->function_id = 0; 2105 p_block->is_pf = 0; 2106 p_block->vector_number = 0; 2107 } 2108 2109 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2110 p_block->function_id); 2111 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2112 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2113 p_block->vector_number); 2114 2115 /* VF entries would be enabled when VF is initializaed */ 2116 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2117 2118 rval = qed_rd(p_hwfn, p_ptt, 2119 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2120 2121 if (rval != val) { 2122 qed_wr(p_hwfn, p_ptt, 2123 IGU_REG_MAPPING_MEMORY + 2124 sizeof(u32) * igu_sb_id, val); 2125 2126 DP_VERBOSE(p_hwfn, 2127 NETIF_MSG_INTR, 2128 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2129 igu_sb_id, 2130 p_block->function_id, 2131 p_block->is_pf, 2132 p_block->vector_number, rval, val); 2133 } 2134 } 2135 2136 return 0; 2137 } 2138 2139 static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, 2140 struct qed_ptt *p_ptt, u16 igu_sb_id) 2141 { 2142 u32 val = qed_rd(p_hwfn, p_ptt, 2143 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2144 struct qed_igu_block *p_block; 2145 2146 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2147 2148 /* Fill the block information */ 2149 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); 2150 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2151 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); 2152 p_block->igu_sb_id = igu_sb_id; 2153 } 2154 2155 int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2156 { 2157 struct qed_igu_info *p_igu_info; 2158 struct qed_igu_block *p_block; 2159 u32 min_vf = 0, max_vf = 0; 2160 u16 igu_sb_id; 2161 2162 p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); 2163 if (!p_hwfn->hw_info.p_igu_info) 2164 return -ENOMEM; 2165 2166 p_igu_info = p_hwfn->hw_info.p_igu_info; 2167 2168 /* Distinguish between existent and non-existent default SB */ 2169 p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX; 2170 2171 /* Find the range of VF ids whose SB belong to this PF */ 2172 if (p_hwfn->cdev->p_iov_info) { 2173 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 2174 2175 min_vf = p_iov->first_vf_in_pf; 2176 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2177 } 2178 2179 for (igu_sb_id = 0; 2180 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 2181 /* Read current entry; Notice it might not belong to this PF */ 2182 qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2183 p_block = &p_igu_info->entry[igu_sb_id]; 2184 2185 if ((p_block->is_pf) && 2186 (p_block->function_id == p_hwfn->rel_pf_id)) { 2187 p_block->status = QED_IGU_STATUS_PF | 2188 QED_IGU_STATUS_VALID | 2189 QED_IGU_STATUS_FREE; 2190 2191 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) 2192 p_igu_info->usage.cnt++; 2193 } else if (!(p_block->is_pf) && 2194 (p_block->function_id >= min_vf) && 2195 (p_block->function_id < max_vf)) { 2196 /* Available for VFs of this PF */ 2197 p_block->status = QED_IGU_STATUS_VALID | 2198 QED_IGU_STATUS_FREE; 2199 2200 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) 2201 p_igu_info->usage.iov_cnt++; 2202 } 2203 2204 /* Mark the First entry belonging to the PF or its VFs 2205 * as the default SB [we'll reset IGU prior to first usage]. 2206 */ 2207 if ((p_block->status & QED_IGU_STATUS_VALID) && 2208 (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) { 2209 p_igu_info->igu_dsb_id = igu_sb_id; 2210 p_block->status |= QED_IGU_STATUS_DSB; 2211 } 2212 2213 /* limit number of prints by having each PF print only its 2214 * entries with the exception of PF0 which would print 2215 * everything. 2216 */ 2217 if ((p_block->status & QED_IGU_STATUS_VALID) || 2218 (p_hwfn->abs_pf_id == 0)) { 2219 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2220 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2221 igu_sb_id, p_block->function_id, 2222 p_block->is_pf, p_block->vector_number); 2223 } 2224 } 2225 2226 if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) { 2227 DP_NOTICE(p_hwfn, 2228 "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2229 p_igu_info->igu_dsb_id); 2230 return -EINVAL; 2231 } 2232 2233 /* All non default SB are considered free at this point */ 2234 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2235 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2236 2237 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2238 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2239 p_igu_info->igu_dsb_id, 2240 p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt); 2241 2242 return 0; 2243 } 2244 2245 /** 2246 * qed_int_igu_init_rt() - Initialize IGU runtime registers. 2247 * 2248 * @p_hwfn: HW device data. 2249 */ 2250 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) 2251 { 2252 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2253 2254 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2255 } 2256 2257 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) 2258 { 2259 u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - 2260 IGU_CMD_INT_ACK_BASE; 2261 u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - 2262 IGU_CMD_INT_ACK_BASE; 2263 u32 intr_status_hi = 0, intr_status_lo = 0; 2264 u64 intr_status = 0; 2265 2266 intr_status_lo = REG_RD(p_hwfn, 2267 GTT_BAR0_MAP_REG_IGU_CMD + 2268 lsb_igu_cmd_addr * 8); 2269 intr_status_hi = REG_RD(p_hwfn, 2270 GTT_BAR0_MAP_REG_IGU_CMD + 2271 msb_igu_cmd_addr * 8); 2272 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2273 2274 return intr_status; 2275 } 2276 2277 static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) 2278 { 2279 tasklet_init(p_hwfn->sp_dpc, 2280 qed_int_sp_dpc, (unsigned long)p_hwfn); 2281 p_hwfn->b_sp_dpc_enabled = true; 2282 } 2283 2284 static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) 2285 { 2286 p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL); 2287 if (!p_hwfn->sp_dpc) 2288 return -ENOMEM; 2289 2290 return 0; 2291 } 2292 2293 static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) 2294 { 2295 kfree(p_hwfn->sp_dpc); 2296 p_hwfn->sp_dpc = NULL; 2297 } 2298 2299 int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2300 { 2301 int rc = 0; 2302 2303 rc = qed_int_sp_dpc_alloc(p_hwfn); 2304 if (rc) 2305 return rc; 2306 2307 rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt); 2308 if (rc) 2309 return rc; 2310 2311 rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); 2312 2313 return rc; 2314 } 2315 2316 void qed_int_free(struct qed_hwfn *p_hwfn) 2317 { 2318 qed_int_sp_sb_free(p_hwfn); 2319 qed_int_sb_attn_free(p_hwfn); 2320 qed_int_sp_dpc_free(p_hwfn); 2321 } 2322 2323 void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2324 { 2325 qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2326 qed_int_sb_attn_setup(p_hwfn, p_ptt); 2327 qed_int_sp_dpc_setup(p_hwfn); 2328 } 2329 2330 void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, 2331 struct qed_sb_cnt_info *p_sb_cnt_info) 2332 { 2333 struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; 2334 2335 if (!info || !p_sb_cnt_info) 2336 return; 2337 2338 memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info)); 2339 } 2340 2341 void qed_int_disable_post_isr_release(struct qed_dev *cdev) 2342 { 2343 int i; 2344 2345 for_each_hwfn(cdev, i) 2346 cdev->hwfns[i].b_int_requested = false; 2347 } 2348 2349 void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable) 2350 { 2351 cdev->attn_clr_en = clr_enable; 2352 } 2353 2354 int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 2355 u8 timer_res, u16 sb_id, bool tx) 2356 { 2357 struct cau_sb_entry sb_entry; 2358 u32 params; 2359 int rc; 2360 2361 if (!p_hwfn->hw_init_done) { 2362 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2363 return -EINVAL; 2364 } 2365 2366 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2367 sb_id * sizeof(u64), 2368 (u64)(uintptr_t)&sb_entry, 2, NULL); 2369 if (rc) { 2370 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2371 return rc; 2372 } 2373 2374 params = le32_to_cpu(sb_entry.params); 2375 2376 if (tx) 2377 SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2378 else 2379 SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2380 2381 sb_entry.params = cpu_to_le32(params); 2382 2383 rc = qed_dmae_host2grc(p_hwfn, p_ptt, 2384 (u64)(uintptr_t)&sb_entry, 2385 CAU_REG_SB_VAR_MEMORY + 2386 sb_id * sizeof(u64), 2, NULL); 2387 if (rc) { 2388 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2389 return rc; 2390 } 2391 2392 return rc; 2393 } 2394