1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <asm/byteorder.h> 35 #include <linux/io.h> 36 #include <linux/bitops.h> 37 #include <linux/delay.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/errno.h> 40 #include <linux/interrupt.h> 41 #include <linux/kernel.h> 42 #include <linux/pci.h> 43 #include <linux/slab.h> 44 #include <linux/string.h> 45 #include "qed.h" 46 #include "qed_hsi.h" 47 #include "qed_hw.h" 48 #include "qed_init_ops.h" 49 #include "qed_int.h" 50 #include "qed_mcp.h" 51 #include "qed_reg_addr.h" 52 #include "qed_sp.h" 53 #include "qed_sriov.h" 54 #include "qed_vf.h" 55 56 struct qed_pi_info { 57 qed_int_comp_cb_t comp_cb; 58 void *cookie; 59 }; 60 61 struct qed_sb_sp_info { 62 struct qed_sb_info sb_info; 63 64 /* per protocol index data */ 65 struct qed_pi_info pi_info_arr[PIS_PER_SB_E4]; 66 }; 67 68 enum qed_attention_type { 69 QED_ATTN_TYPE_ATTN, 70 QED_ATTN_TYPE_PARITY, 71 }; 72 73 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 74 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 75 76 struct aeu_invert_reg_bit { 77 char bit_name[30]; 78 79 #define ATTENTION_PARITY (1 << 0) 80 81 #define ATTENTION_LENGTH_MASK (0x00000ff0) 82 #define ATTENTION_LENGTH_SHIFT (4) 83 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 84 ATTENTION_LENGTH_SHIFT) 85 #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT) 86 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 87 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 88 ATTENTION_PARITY) 89 90 /* Multiple bits start with this offset */ 91 #define ATTENTION_OFFSET_MASK (0x000ff000) 92 #define ATTENTION_OFFSET_SHIFT (12) 93 94 #define ATTENTION_BB_MASK (0x00700000) 95 #define ATTENTION_BB_SHIFT (20) 96 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT) 97 #define ATTENTION_BB_DIFFERENT BIT(23) 98 99 unsigned int flags; 100 101 /* Callback to call if attention will be triggered */ 102 int (*cb)(struct qed_hwfn *p_hwfn); 103 104 enum block_id block_index; 105 }; 106 107 struct aeu_invert_reg { 108 struct aeu_invert_reg_bit bits[32]; 109 }; 110 111 #define MAX_ATTN_GRPS (8) 112 #define NUM_ATTN_REGS (9) 113 114 /* Specific HW attention callbacks */ 115 static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn) 116 { 117 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 118 119 /* This might occur on certain instances; Log it once then mask it */ 120 DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n", 121 tmp); 122 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 123 0xffffffff); 124 125 return 0; 126 } 127 128 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 129 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 130 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 131 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf) 132 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 133 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1) 134 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 135 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff) 136 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 137 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf) 138 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 139 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff) 140 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 141 static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn) 142 { 143 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 144 PSWHST_REG_INCORRECT_ACCESS_VALID); 145 146 if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) { 147 u32 addr, data, length; 148 149 addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 150 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 151 data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 152 PSWHST_REG_INCORRECT_ACCESS_DATA); 153 length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 154 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 155 156 DP_INFO(p_hwfn->cdev, 157 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", 158 addr, length, 159 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID), 160 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID), 161 (u8) GET_FIELD(data, 162 ATTENTION_INCORRECT_ACCESS_VF_VALID), 163 (u8) GET_FIELD(data, 164 ATTENTION_INCORRECT_ACCESS_CLIENT), 165 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR), 166 (u8) GET_FIELD(data, 167 ATTENTION_INCORRECT_ACCESS_BYTE_EN), 168 data); 169 } 170 171 return 0; 172 } 173 174 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0) 175 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff) 176 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0) 177 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23) 178 #define QED_GRC_ATTENTION_MASTER_MASK (0xf) 179 #define QED_GRC_ATTENTION_MASTER_SHIFT (24) 180 #define QED_GRC_ATTENTION_PF_MASK (0xf) 181 #define QED_GRC_ATTENTION_PF_SHIFT (0) 182 #define QED_GRC_ATTENTION_VF_MASK (0xff) 183 #define QED_GRC_ATTENTION_VF_SHIFT (4) 184 #define QED_GRC_ATTENTION_PRIV_MASK (0x3) 185 #define QED_GRC_ATTENTION_PRIV_SHIFT (14) 186 #define QED_GRC_ATTENTION_PRIV_VF (0) 187 static const char *attn_master_to_str(u8 master) 188 { 189 switch (master) { 190 case 1: return "PXP"; 191 case 2: return "MCP"; 192 case 3: return "MSDM"; 193 case 4: return "PSDM"; 194 case 5: return "YSDM"; 195 case 6: return "USDM"; 196 case 7: return "TSDM"; 197 case 8: return "XSDM"; 198 case 9: return "DBU"; 199 case 10: return "DMAE"; 200 default: 201 return "Unknown"; 202 } 203 } 204 205 static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) 206 { 207 u32 tmp, tmp2; 208 209 /* We've already cleared the timeout interrupt register, so we learn 210 * of interrupts via the validity register 211 */ 212 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 213 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 214 if (!(tmp & QED_GRC_ATTENTION_VALID_BIT)) 215 goto out; 216 217 /* Read the GRC timeout information */ 218 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 219 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 220 tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 221 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 222 223 DP_INFO(p_hwfn->cdev, 224 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 225 tmp2, tmp, 226 (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from", 227 GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2, 228 attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), 229 GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), 230 (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == 231 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)", 232 GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); 233 234 out: 235 /* Regardles of anything else, clean the validity bit */ 236 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 237 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 238 return 0; 239 } 240 241 #define PGLUE_ATTENTION_VALID (1 << 29) 242 #define PGLUE_ATTENTION_RD_VALID (1 << 26) 243 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf) 244 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 245 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1) 246 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19) 247 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff) 248 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 249 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1) 250 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21) 251 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1) 252 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22) 253 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1) 254 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23) 255 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23) 256 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) 257 #define PGLUE_ATTENTION_ILT_VALID (1 << 23) 258 259 int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, 260 struct qed_ptt *p_ptt) 261 { 262 u32 tmp; 263 264 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); 265 if (tmp & PGLUE_ATTENTION_VALID) { 266 u32 addr_lo, addr_hi, details; 267 268 addr_lo = qed_rd(p_hwfn, p_ptt, 269 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 270 addr_hi = qed_rd(p_hwfn, p_ptt, 271 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 272 details = qed_rd(p_hwfn, p_ptt, 273 PGLUE_B_REG_TX_ERR_WR_DETAILS); 274 275 DP_NOTICE(p_hwfn, 276 "Illegal write by chip to [%08x:%08x] blocked.\n" 277 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 278 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 279 addr_hi, addr_lo, details, 280 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 281 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 282 GET_FIELD(details, 283 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 284 tmp, 285 GET_FIELD(tmp, 286 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, 287 GET_FIELD(tmp, 288 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 289 GET_FIELD(tmp, 290 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); 291 } 292 293 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); 294 if (tmp & PGLUE_ATTENTION_RD_VALID) { 295 u32 addr_lo, addr_hi, details; 296 297 addr_lo = qed_rd(p_hwfn, p_ptt, 298 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 299 addr_hi = qed_rd(p_hwfn, p_ptt, 300 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 301 details = qed_rd(p_hwfn, p_ptt, 302 PGLUE_B_REG_TX_ERR_RD_DETAILS); 303 304 DP_NOTICE(p_hwfn, 305 "Illegal read by chip from [%08x:%08x] blocked.\n" 306 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 307 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 308 addr_hi, addr_lo, details, 309 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 310 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 311 GET_FIELD(details, 312 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 313 tmp, 314 GET_FIELD(tmp, 315 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, 316 GET_FIELD(tmp, 317 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 318 GET_FIELD(tmp, 319 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); 320 } 321 322 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 323 if (tmp & PGLUE_ATTENTION_ICPL_VALID) 324 DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp); 325 326 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 327 if (tmp & PGLUE_ATTENTION_ZLR_VALID) { 328 u32 addr_hi, addr_lo; 329 330 addr_lo = qed_rd(p_hwfn, p_ptt, 331 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 332 addr_hi = qed_rd(p_hwfn, p_ptt, 333 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 334 335 DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n", 336 tmp, addr_hi, addr_lo); 337 } 338 339 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 340 if (tmp & PGLUE_ATTENTION_ILT_VALID) { 341 u32 addr_hi, addr_lo, details; 342 343 addr_lo = qed_rd(p_hwfn, p_ptt, 344 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 345 addr_hi = qed_rd(p_hwfn, p_ptt, 346 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 347 details = qed_rd(p_hwfn, p_ptt, 348 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 349 350 DP_NOTICE(p_hwfn, 351 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 352 details, tmp, addr_hi, addr_lo); 353 } 354 355 /* Clear the indications */ 356 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2)); 357 358 return 0; 359 } 360 361 static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) 362 { 363 return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); 364 } 365 366 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) 367 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 368 #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 369 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) 370 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16) 371 372 #define QED_DB_REC_COUNT 1000 373 #define QED_DB_REC_INTERVAL 100 374 375 static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, 376 struct qed_ptt *p_ptt) 377 { 378 u32 count = QED_DB_REC_COUNT; 379 u32 usage = 1; 380 381 /* Flush any pending (e)dpms as they may never arrive */ 382 qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 383 384 /* wait for usage to zero or count to run out. This is necessary since 385 * EDPM doorbell transactions can take multiple 64b cycles, and as such 386 * can "split" over the pci. Possibly, the doorbell drop can happen with 387 * half an EDPM in the queue and other half dropped. Another EDPM 388 * doorbell to the same address (from doorbell recovery mechanism or 389 * from the doorbelling entity) could have first half dropped and second 390 * half interpreted as continuation of the first. To prevent such 391 * malformed doorbells from reaching the device, flush the queue before 392 * releasing the overflow sticky indication. 393 */ 394 while (count-- && usage) { 395 usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 396 udelay(QED_DB_REC_INTERVAL); 397 } 398 399 /* should have been depleted by now */ 400 if (usage) { 401 DP_NOTICE(p_hwfn->cdev, 402 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 403 QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage); 404 return -EBUSY; 405 } 406 407 return 0; 408 } 409 410 int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 411 { 412 u32 attn_ovfl, cur_ovfl; 413 int rc; 414 415 attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT, 416 &p_hwfn->db_recovery_info.overflow); 417 cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); 418 if (!cur_ovfl && !attn_ovfl) 419 return 0; 420 421 DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n", 422 attn_ovfl, cur_ovfl); 423 424 if (cur_ovfl && !p_hwfn->db_bar_no_edpm) { 425 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); 426 if (rc) 427 return rc; 428 } 429 430 /* Release overflow sticky indication (stop silently dropping everything) */ 431 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 432 433 /* Repeat all last doorbells (doorbell drop recovery) */ 434 qed_db_recovery_execute(p_hwfn); 435 436 return 0; 437 } 438 439 static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn) 440 { 441 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; 442 u32 overflow; 443 int rc; 444 445 overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); 446 if (!overflow) 447 goto out; 448 449 /* Run PF doorbell recovery in next periodic handler */ 450 set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow); 451 452 if (!p_hwfn->db_bar_no_edpm) { 453 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); 454 if (rc) 455 goto out; 456 } 457 458 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 459 out: 460 /* Schedule the handler even if overflow was not detected */ 461 qed_periodic_db_rec_start(p_hwfn); 462 } 463 464 static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) 465 { 466 u32 int_sts, first_drop_reason, details, address, all_drops_reason; 467 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; 468 469 /* int_sts may be zero since all PFs were interrupted for doorbell 470 * overflow but another one already handled it. Can abort here. If 471 * This PF also requires overflow recovery we will be interrupted again. 472 * The masked almost full indication may also be set. Ignoring. 473 */ 474 int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 475 if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) 476 return 0; 477 478 DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); 479 480 /* check if db_drop or overflow happened */ 481 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 482 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 483 /* Obtain data about db drop/overflow */ 484 first_drop_reason = qed_rd(p_hwfn, p_ptt, 485 DORQ_REG_DB_DROP_REASON) & 486 QED_DORQ_ATTENTION_REASON_MASK; 487 details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS); 488 address = qed_rd(p_hwfn, p_ptt, 489 DORQ_REG_DB_DROP_DETAILS_ADDRESS); 490 all_drops_reason = qed_rd(p_hwfn, p_ptt, 491 DORQ_REG_DB_DROP_DETAILS_REASON); 492 493 /* Log info */ 494 DP_NOTICE(p_hwfn->cdev, 495 "Doorbell drop occurred\n" 496 "Address\t\t0x%08x\t(second BAR address)\n" 497 "FID\t\t0x%04x\t\t(Opaque FID)\n" 498 "Size\t\t0x%04x\t\t(in bytes)\n" 499 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 500 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", 501 address, 502 GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE), 503 GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, 504 first_drop_reason, all_drops_reason); 505 506 /* Clear the doorbell drop details and prepare for next drop */ 507 qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 508 509 /* Mark interrupt as handled (note: even if drop was due to a different 510 * reason than overflow we mark as handled) 511 */ 512 qed_wr(p_hwfn, 513 p_ptt, 514 DORQ_REG_INT_STS_WR, 515 DORQ_REG_INT_STS_DB_DROP | 516 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 517 518 /* If there are no indications other than drop indications, success */ 519 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 520 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 521 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 522 return 0; 523 } 524 525 /* Some other indication was present - non recoverable */ 526 DP_INFO(p_hwfn, "DORQ fatal attention\n"); 527 528 return -EINVAL; 529 } 530 531 static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) 532 { 533 p_hwfn->db_recovery_info.dorq_attn = true; 534 qed_dorq_attn_overflow(p_hwfn); 535 536 return qed_dorq_attn_int_sts(p_hwfn); 537 } 538 539 static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn) 540 { 541 if (p_hwfn->db_recovery_info.dorq_attn) 542 goto out; 543 544 /* Call DORQ callback if the attention was missed */ 545 qed_dorq_attn_cb(p_hwfn); 546 out: 547 p_hwfn->db_recovery_info.dorq_attn = false; 548 } 549 550 /* Instead of major changes to the data-structure, we have a some 'special' 551 * identifiers for sources that changed meaning between adapters. 552 */ 553 enum aeu_invert_reg_special_type { 554 AEU_INVERT_REG_SPECIAL_CNIG_0, 555 AEU_INVERT_REG_SPECIAL_CNIG_1, 556 AEU_INVERT_REG_SPECIAL_CNIG_2, 557 AEU_INVERT_REG_SPECIAL_CNIG_3, 558 AEU_INVERT_REG_SPECIAL_MAX, 559 }; 560 561 static struct aeu_invert_reg_bit 562 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 563 {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 564 {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 565 {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 566 {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 567 }; 568 569 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 570 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 571 { 572 { /* After Invert 1 */ 573 {"GPIO0 function%d", 574 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 575 } 576 }, 577 578 { 579 { /* After Invert 2 */ 580 {"PGLUE config_space", ATTENTION_SINGLE, 581 NULL, MAX_BLOCK_ID}, 582 {"PGLUE misc_flr", ATTENTION_SINGLE, 583 NULL, MAX_BLOCK_ID}, 584 {"PGLUE B RBC", ATTENTION_PAR_INT, 585 qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B}, 586 {"PGLUE misc_mctp", ATTENTION_SINGLE, 587 NULL, MAX_BLOCK_ID}, 588 {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 589 {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 590 {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 591 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | 592 (1 << ATTENTION_OFFSET_SHIFT), 593 NULL, MAX_BLOCK_ID}, 594 {"PCIE glue/PXP VPD %d", 595 (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS}, 596 } 597 }, 598 599 { 600 { /* After Invert 3 */ 601 {"General Attention %d", 602 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 603 } 604 }, 605 606 { 607 { /* After Invert 4 */ 608 {"General Attention 32", ATTENTION_SINGLE, 609 NULL, MAX_BLOCK_ID}, 610 {"General Attention %d", 611 (2 << ATTENTION_LENGTH_SHIFT) | 612 (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID}, 613 {"General Attention 35", ATTENTION_SINGLE, 614 NULL, MAX_BLOCK_ID}, 615 {"NWS Parity", 616 ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 617 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), 618 NULL, BLOCK_NWS}, 619 {"NWS Interrupt", 620 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 621 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), 622 NULL, BLOCK_NWS}, 623 {"NWM Parity", 624 ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 625 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), 626 NULL, BLOCK_NWM}, 627 {"NWM Interrupt", 628 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 629 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), 630 NULL, BLOCK_NWM}, 631 {"MCP CPU", ATTENTION_SINGLE, 632 qed_mcp_attn_cb, MAX_BLOCK_ID}, 633 {"MCP Watchdog timer", ATTENTION_SINGLE, 634 NULL, MAX_BLOCK_ID}, 635 {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 636 {"AVS stop status ready", ATTENTION_SINGLE, 637 NULL, MAX_BLOCK_ID}, 638 {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 639 {"MSTAT per-path", ATTENTION_PAR_INT, 640 NULL, MAX_BLOCK_ID}, 641 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), 642 NULL, MAX_BLOCK_ID}, 643 {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG}, 644 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB}, 645 {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB}, 646 {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB}, 647 {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS}, 648 } 649 }, 650 651 { 652 { /* After Invert 5 */ 653 {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC}, 654 {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1}, 655 {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2}, 656 {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB}, 657 {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF}, 658 {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM}, 659 {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM}, 660 {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM}, 661 {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM}, 662 {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM}, 663 {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM}, 664 {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM}, 665 {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM}, 666 {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM}, 667 {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM}, 668 {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM}, 669 } 670 }, 671 672 { 673 { /* After Invert 6 */ 674 {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM}, 675 {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM}, 676 {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM}, 677 {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM}, 678 {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM}, 679 {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM}, 680 {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM}, 681 {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM}, 682 {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM}, 683 {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD}, 684 {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD}, 685 {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD}, 686 {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD}, 687 {"DORQ", ATTENTION_PAR_INT, 688 qed_dorq_attn_cb, BLOCK_DORQ}, 689 {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG}, 690 {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC}, 691 } 692 }, 693 694 { 695 { /* After Invert 7 */ 696 {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC}, 697 {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU}, 698 {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE}, 699 {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU}, 700 {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 701 {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU}, 702 {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU}, 703 {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM}, 704 {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC}, 705 {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF}, 706 {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF}, 707 {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS}, 708 {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC}, 709 {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS}, 710 {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE}, 711 {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS}, 712 {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ}, 713 } 714 }, 715 716 { 717 { /* After Invert 8 */ 718 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, 719 NULL, BLOCK_PSWRQ2}, 720 {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR}, 721 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, 722 NULL, BLOCK_PSWWR2}, 723 {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD}, 724 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, 725 NULL, BLOCK_PSWRD2}, 726 {"PSWHST", ATTENTION_PAR_INT, 727 qed_pswhst_attn_cb, BLOCK_PSWHST}, 728 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, 729 NULL, BLOCK_PSWHST2}, 730 {"GRC", ATTENTION_PAR_INT, 731 qed_grc_attn_cb, BLOCK_GRC}, 732 {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU}, 733 {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI}, 734 {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 735 {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 736 {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 737 {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 738 {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 739 {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 740 {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS}, 741 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, 742 NULL, BLOCK_PGLCS}, 743 {"PERST_B assertion", ATTENTION_SINGLE, 744 NULL, MAX_BLOCK_ID}, 745 {"PERST_B deassertion", ATTENTION_SINGLE, 746 NULL, MAX_BLOCK_ID}, 747 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), 748 NULL, MAX_BLOCK_ID}, 749 } 750 }, 751 752 { 753 { /* After Invert 9 */ 754 {"MCP Latched memory", ATTENTION_PAR, 755 NULL, MAX_BLOCK_ID}, 756 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, 757 NULL, MAX_BLOCK_ID}, 758 {"MCP Latched ump_tx", ATTENTION_PAR, 759 NULL, MAX_BLOCK_ID}, 760 {"MCP Latched scratchpad", ATTENTION_PAR, 761 NULL, MAX_BLOCK_ID}, 762 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), 763 NULL, MAX_BLOCK_ID}, 764 } 765 }, 766 }; 767 768 static struct aeu_invert_reg_bit * 769 qed_int_aeu_translate(struct qed_hwfn *p_hwfn, 770 struct aeu_invert_reg_bit *p_bit) 771 { 772 if (!QED_IS_BB(p_hwfn->cdev)) 773 return p_bit; 774 775 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 776 return p_bit; 777 778 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 779 ATTENTION_BB_SHIFT]; 780 } 781 782 static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn, 783 struct aeu_invert_reg_bit *p_bit) 784 { 785 return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags & 786 ATTENTION_PARITY); 787 } 788 789 #define ATTN_STATE_BITS (0xfff) 790 #define ATTN_BITS_MASKABLE (0x3ff) 791 struct qed_sb_attn_info { 792 /* Virtual & Physical address of the SB */ 793 struct atten_status_block *sb_attn; 794 dma_addr_t sb_phys; 795 796 /* Last seen running index */ 797 u16 index; 798 799 /* A mask of the AEU bits resulting in a parity error */ 800 u32 parity_mask[NUM_ATTN_REGS]; 801 802 /* A pointer to the attention description structure */ 803 struct aeu_invert_reg *p_aeu_desc; 804 805 /* Previously asserted attentions, which are still unasserted */ 806 u16 known_attn; 807 808 /* Cleanup address for the link's general hw attention */ 809 u32 mfw_attn_addr; 810 }; 811 812 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, 813 struct qed_sb_attn_info *p_sb_desc) 814 { 815 u16 rc = 0, index; 816 817 /* Make certain HW write took affect */ 818 mmiowb(); 819 820 index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); 821 if (p_sb_desc->index != index) { 822 p_sb_desc->index = index; 823 rc = QED_SB_ATT_IDX; 824 } 825 826 /* Make certain we got a consistent view with HW */ 827 mmiowb(); 828 829 return rc; 830 } 831 832 /** 833 * @brief qed_int_assertion - handles asserted attention bits 834 * 835 * @param p_hwfn 836 * @param asserted_bits newly asserted bits 837 * @return int 838 */ 839 static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits) 840 { 841 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 842 u32 igu_mask; 843 844 /* Mask the source of the attention in the IGU */ 845 igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); 846 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 847 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 848 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 849 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 850 851 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 852 "inner known ATTN state: 0x%04x --> 0x%04x\n", 853 sb_attn_sw->known_attn, 854 sb_attn_sw->known_attn | asserted_bits); 855 sb_attn_sw->known_attn |= asserted_bits; 856 857 /* Handle MCP events */ 858 if (asserted_bits & 0x100) { 859 qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 860 /* Clean the MCP attention */ 861 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 862 sb_attn_sw->mfw_attn_addr, 0); 863 } 864 865 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 866 GTT_BAR0_MAP_REG_IGU_CMD + 867 ((IGU_CMD_ATTN_BIT_SET_UPPER - 868 IGU_CMD_INT_ACK_BASE) << 3), 869 (u32)asserted_bits); 870 871 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", 872 asserted_bits); 873 874 return 0; 875 } 876 877 static void qed_int_attn_print(struct qed_hwfn *p_hwfn, 878 enum block_id id, 879 enum dbg_attn_type type, bool b_clear) 880 { 881 struct dbg_attn_block_result attn_results; 882 enum dbg_status status; 883 884 memset(&attn_results, 0, sizeof(attn_results)); 885 886 status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type, 887 b_clear, &attn_results); 888 if (status != DBG_STATUS_OK) 889 DP_NOTICE(p_hwfn, 890 "Failed to parse attention information [status: %s]\n", 891 qed_dbg_get_status_str(status)); 892 else 893 qed_dbg_parse_attn(p_hwfn, &attn_results); 894 } 895 896 /** 897 * @brief qed_int_deassertion_aeu_bit - handles the effects of a single 898 * cause of the attention 899 * 900 * @param p_hwfn 901 * @param p_aeu - descriptor of an AEU bit which caused the attention 902 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 903 * this bit to this group. 904 * @param bit_index - index of this bit in the aeu_en_reg 905 * 906 * @return int 907 */ 908 static int 909 qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, 910 struct aeu_invert_reg_bit *p_aeu, 911 u32 aeu_en_reg, 912 const char *p_bit_name, u32 bitmask) 913 { 914 bool b_fatal = false; 915 int rc = -EINVAL; 916 u32 val; 917 918 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 919 p_bit_name, bitmask); 920 921 /* Call callback before clearing the interrupt status */ 922 if (p_aeu->cb) { 923 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 924 p_bit_name); 925 rc = p_aeu->cb(p_hwfn); 926 } 927 928 if (rc) 929 b_fatal = true; 930 931 /* Print HW block interrupt registers */ 932 if (p_aeu->block_index != MAX_BLOCK_ID) 933 qed_int_attn_print(p_hwfn, p_aeu->block_index, 934 ATTN_TYPE_INTERRUPT, !b_fatal); 935 936 937 /* If the attention is benign, no need to prevent it */ 938 if (!rc) 939 goto out; 940 941 /* Prevent this Attention from being asserted in the future */ 942 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 943 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask)); 944 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", 945 p_bit_name); 946 947 out: 948 return rc; 949 } 950 951 /** 952 * @brief qed_int_deassertion_parity - handle a single parity AEU source 953 * 954 * @param p_hwfn 955 * @param p_aeu - descriptor of an AEU bit which caused the parity 956 * @param aeu_en_reg - address of the AEU enable register 957 * @param bit_index 958 */ 959 static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, 960 struct aeu_invert_reg_bit *p_aeu, 961 u32 aeu_en_reg, u8 bit_index) 962 { 963 u32 block_id = p_aeu->block_index, mask, val; 964 965 DP_NOTICE(p_hwfn->cdev, 966 "%s parity attention is set [address 0x%08x, bit %d]\n", 967 p_aeu->bit_name, aeu_en_reg, bit_index); 968 969 if (block_id != MAX_BLOCK_ID) { 970 qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 971 972 /* In BB, there's a single parity bit for several blocks */ 973 if (block_id == BLOCK_BTB) { 974 qed_int_attn_print(p_hwfn, BLOCK_OPTE, 975 ATTN_TYPE_PARITY, false); 976 qed_int_attn_print(p_hwfn, BLOCK_MCP, 977 ATTN_TYPE_PARITY, false); 978 } 979 } 980 981 /* Prevent this parity error from being re-asserted */ 982 mask = ~BIT(bit_index); 983 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 984 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 985 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 986 p_aeu->bit_name); 987 } 988 989 /** 990 * @brief - handles deassertion of previously asserted attentions. 991 * 992 * @param p_hwfn 993 * @param deasserted_bits - newly deasserted bits 994 * @return int 995 * 996 */ 997 static int qed_int_deassertion(struct qed_hwfn *p_hwfn, 998 u16 deasserted_bits) 999 { 1000 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 1001 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 1002 u8 i, j, k, bit_idx; 1003 int rc = 0; 1004 1005 /* Read the attention registers in the AEU */ 1006 for (i = 0; i < NUM_ATTN_REGS; i++) { 1007 aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1008 MISC_REG_AEU_AFTER_INVERT_1_IGU + 1009 i * 0x4); 1010 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1011 "Deasserted bits [%d]: %08x\n", 1012 i, aeu_inv_arr[i]); 1013 } 1014 1015 /* Find parity attentions first */ 1016 for (i = 0; i < NUM_ATTN_REGS; i++) { 1017 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 1018 u32 parities; 1019 1020 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); 1021 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1022 1023 /* Skip register in which no parity bit is currently set */ 1024 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 1025 if (!parities) 1026 continue; 1027 1028 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1029 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 1030 1031 if (qed_int_is_parity_flag(p_hwfn, p_bit) && 1032 !!(parities & BIT(bit_idx))) 1033 qed_int_deassertion_parity(p_hwfn, p_bit, 1034 aeu_en, bit_idx); 1035 1036 bit_idx += ATTENTION_LENGTH(p_bit->flags); 1037 } 1038 } 1039 1040 /* Find non-parity cause for attention and act */ 1041 for (k = 0; k < MAX_ATTN_GRPS; k++) { 1042 struct aeu_invert_reg_bit *p_aeu; 1043 1044 /* Handle only groups whose attention is currently deasserted */ 1045 if (!(deasserted_bits & (1 << k))) 1046 continue; 1047 1048 for (i = 0; i < NUM_ATTN_REGS; i++) { 1049 u32 bits; 1050 1051 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 1052 i * sizeof(u32) + 1053 k * sizeof(u32) * NUM_ATTN_REGS; 1054 1055 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1056 bits = aeu_inv_arr[i] & en; 1057 1058 /* Skip if no bit from this group is currently set */ 1059 if (!bits) 1060 continue; 1061 1062 /* Find all set bits from current register which belong 1063 * to current group, making them responsible for the 1064 * previous assertion. 1065 */ 1066 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1067 long unsigned int bitmask; 1068 u8 bit, bit_len; 1069 1070 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1071 p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu); 1072 1073 bit = bit_idx; 1074 bit_len = ATTENTION_LENGTH(p_aeu->flags); 1075 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) { 1076 /* Skip Parity */ 1077 bit++; 1078 bit_len--; 1079 } 1080 1081 bitmask = bits & (((1 << bit_len) - 1) << bit); 1082 bitmask >>= bit; 1083 1084 if (bitmask) { 1085 u32 flags = p_aeu->flags; 1086 char bit_name[30]; 1087 u8 num; 1088 1089 num = (u8)find_first_bit(&bitmask, 1090 bit_len); 1091 1092 /* Some bits represent more than a 1093 * a single interrupt. Correctly print 1094 * their name. 1095 */ 1096 if (ATTENTION_LENGTH(flags) > 2 || 1097 ((flags & ATTENTION_PAR_INT) && 1098 ATTENTION_LENGTH(flags) > 1)) 1099 snprintf(bit_name, 30, 1100 p_aeu->bit_name, num); 1101 else 1102 strncpy(bit_name, 1103 p_aeu->bit_name, 30); 1104 1105 /* We now need to pass bitmask in its 1106 * correct position. 1107 */ 1108 bitmask <<= bit; 1109 1110 /* Handle source of the attention */ 1111 qed_int_deassertion_aeu_bit(p_hwfn, 1112 p_aeu, 1113 aeu_en, 1114 bit_name, 1115 bitmask); 1116 } 1117 1118 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1119 } 1120 } 1121 } 1122 1123 /* Handle missed DORQ attention */ 1124 qed_dorq_attn_handler(p_hwfn); 1125 1126 /* Clear IGU indication for the deasserted bits */ 1127 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 1128 GTT_BAR0_MAP_REG_IGU_CMD + 1129 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1130 IGU_CMD_INT_ACK_BASE) << 3), 1131 ~((u32)deasserted_bits)); 1132 1133 /* Unmask deasserted attentions in IGU */ 1134 aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); 1135 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1136 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1137 1138 /* Clear deassertion from inner state */ 1139 sb_attn_sw->known_attn &= ~deasserted_bits; 1140 1141 return rc; 1142 } 1143 1144 static int qed_int_attentions(struct qed_hwfn *p_hwfn) 1145 { 1146 struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1147 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1148 u32 attn_bits = 0, attn_acks = 0; 1149 u16 asserted_bits, deasserted_bits; 1150 __le16 index; 1151 int rc = 0; 1152 1153 /* Read current attention bits/acks - safeguard against attentions 1154 * by guaranting work on a synchronized timeframe 1155 */ 1156 do { 1157 index = p_sb_attn->sb_index; 1158 /* finish reading index before the loop condition */ 1159 dma_rmb(); 1160 attn_bits = le32_to_cpu(p_sb_attn->atten_bits); 1161 attn_acks = le32_to_cpu(p_sb_attn->atten_ack); 1162 } while (index != p_sb_attn->sb_index); 1163 p_sb_attn->sb_index = index; 1164 1165 /* Attention / Deassertion are meaningful (and in correct state) 1166 * only when they differ and consistent with known state - deassertion 1167 * when previous attention & current ack, and assertion when current 1168 * attention with no previous attention 1169 */ 1170 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1171 ~p_sb_attn_sw->known_attn; 1172 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1173 p_sb_attn_sw->known_attn; 1174 1175 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) { 1176 DP_INFO(p_hwfn, 1177 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1178 index, attn_bits, attn_acks, asserted_bits, 1179 deasserted_bits, p_sb_attn_sw->known_attn); 1180 } else if (asserted_bits == 0x100) { 1181 DP_INFO(p_hwfn, "MFW indication via attention\n"); 1182 } else { 1183 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1184 "MFW indication [deassertion]\n"); 1185 } 1186 1187 if (asserted_bits) { 1188 rc = qed_int_assertion(p_hwfn, asserted_bits); 1189 if (rc) 1190 return rc; 1191 } 1192 1193 if (deasserted_bits) 1194 rc = qed_int_deassertion(p_hwfn, deasserted_bits); 1195 1196 return rc; 1197 } 1198 1199 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, 1200 void __iomem *igu_addr, u32 ack_cons) 1201 { 1202 struct igu_prod_cons_update igu_ack = { 0 }; 1203 1204 igu_ack.sb_id_and_flags = 1205 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1206 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1207 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1208 (IGU_SEG_ACCESS_ATTN << 1209 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1210 1211 DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags); 1212 1213 /* Both segments (interrupts & acks) are written to same place address; 1214 * Need to guarantee all commands will be received (in-order) by HW. 1215 */ 1216 mmiowb(); 1217 barrier(); 1218 } 1219 1220 void qed_int_sp_dpc(unsigned long hwfn_cookie) 1221 { 1222 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie; 1223 struct qed_pi_info *pi_info = NULL; 1224 struct qed_sb_attn_info *sb_attn; 1225 struct qed_sb_info *sb_info; 1226 int arr_size; 1227 u16 rc = 0; 1228 1229 if (!p_hwfn->p_sp_sb) { 1230 DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); 1231 return; 1232 } 1233 1234 sb_info = &p_hwfn->p_sp_sb->sb_info; 1235 arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1236 if (!sb_info) { 1237 DP_ERR(p_hwfn->cdev, 1238 "Status block is NULL - cannot ack interrupts\n"); 1239 return; 1240 } 1241 1242 if (!p_hwfn->p_sb_attn) { 1243 DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); 1244 return; 1245 } 1246 sb_attn = p_hwfn->p_sb_attn; 1247 1248 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1249 p_hwfn, p_hwfn->my_id); 1250 1251 /* Disable ack for def status block. Required both for msix + 1252 * inta in non-mask mode, in inta does no harm. 1253 */ 1254 qed_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1255 1256 /* Gather Interrupts/Attentions information */ 1257 if (!sb_info->sb_virt) { 1258 DP_ERR(p_hwfn->cdev, 1259 "Interrupt Status block is NULL - cannot check for new interrupts!\n"); 1260 } else { 1261 u32 tmp_index = sb_info->sb_ack; 1262 1263 rc = qed_sb_update_sb_idx(sb_info); 1264 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 1265 "Interrupt indices: 0x%08x --> 0x%08x\n", 1266 tmp_index, sb_info->sb_ack); 1267 } 1268 1269 if (!sb_attn || !sb_attn->sb_attn) { 1270 DP_ERR(p_hwfn->cdev, 1271 "Attentions Status block is NULL - cannot check for new attentions!\n"); 1272 } else { 1273 u16 tmp_index = sb_attn->index; 1274 1275 rc |= qed_attn_update_idx(p_hwfn, sb_attn); 1276 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 1277 "Attention indices: 0x%08x --> 0x%08x\n", 1278 tmp_index, sb_attn->index); 1279 } 1280 1281 /* Check if we expect interrupts at this time. if not just ack them */ 1282 if (!(rc & QED_SB_EVENT_MASK)) { 1283 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1284 return; 1285 } 1286 1287 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1288 if (!p_hwfn->p_dpc_ptt) { 1289 DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); 1290 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1291 return; 1292 } 1293 1294 if (rc & QED_SB_ATT_IDX) 1295 qed_int_attentions(p_hwfn); 1296 1297 if (rc & QED_SB_IDX) { 1298 int pi; 1299 1300 /* Look for a free index */ 1301 for (pi = 0; pi < arr_size; pi++) { 1302 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1303 if (pi_info->comp_cb) 1304 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1305 } 1306 } 1307 1308 if (sb_attn && (rc & QED_SB_ATT_IDX)) 1309 /* This should be done before the interrupts are enabled, 1310 * since otherwise a new attention will be generated. 1311 */ 1312 qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1313 1314 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1315 } 1316 1317 static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) 1318 { 1319 struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1320 1321 if (!p_sb) 1322 return; 1323 1324 if (p_sb->sb_attn) 1325 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1326 SB_ATTN_ALIGNED_SIZE(p_hwfn), 1327 p_sb->sb_attn, p_sb->sb_phys); 1328 kfree(p_sb); 1329 p_hwfn->p_sb_attn = NULL; 1330 } 1331 1332 static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, 1333 struct qed_ptt *p_ptt) 1334 { 1335 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1336 1337 memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1338 1339 sb_info->index = 0; 1340 sb_info->known_attn = 0; 1341 1342 /* Configure Attention Status Block in IGU */ 1343 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1344 lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); 1345 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1346 upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); 1347 } 1348 1349 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, 1350 struct qed_ptt *p_ptt, 1351 void *sb_virt_addr, dma_addr_t sb_phy_addr) 1352 { 1353 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1354 int i, j, k; 1355 1356 sb_info->sb_attn = sb_virt_addr; 1357 sb_info->sb_phys = sb_phy_addr; 1358 1359 /* Set the pointer to the AEU descriptors */ 1360 sb_info->p_aeu_desc = aeu_descs; 1361 1362 /* Calculate Parity Masks */ 1363 memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1364 for (i = 0; i < NUM_ATTN_REGS; i++) { 1365 /* j is array index, k is bit index */ 1366 for (j = 0, k = 0; k < 32; j++) { 1367 struct aeu_invert_reg_bit *p_aeu; 1368 1369 p_aeu = &aeu_descs[i].bits[j]; 1370 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) 1371 sb_info->parity_mask[i] |= 1 << k; 1372 1373 k += ATTENTION_LENGTH(p_aeu->flags); 1374 } 1375 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1376 "Attn Mask [Reg %d]: 0x%08x\n", 1377 i, sb_info->parity_mask[i]); 1378 } 1379 1380 /* Set the address of cleanup for the mcp attention */ 1381 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1382 MISC_REG_AEU_GENERAL_ATTN_0; 1383 1384 qed_int_sb_attn_setup(p_hwfn, p_ptt); 1385 } 1386 1387 static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, 1388 struct qed_ptt *p_ptt) 1389 { 1390 struct qed_dev *cdev = p_hwfn->cdev; 1391 struct qed_sb_attn_info *p_sb; 1392 dma_addr_t p_phys = 0; 1393 void *p_virt; 1394 1395 /* SB struct */ 1396 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 1397 if (!p_sb) 1398 return -ENOMEM; 1399 1400 /* SB ring */ 1401 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 1402 SB_ATTN_ALIGNED_SIZE(p_hwfn), 1403 &p_phys, GFP_KERNEL); 1404 1405 if (!p_virt) { 1406 kfree(p_sb); 1407 return -ENOMEM; 1408 } 1409 1410 /* Attention setup */ 1411 p_hwfn->p_sb_attn = p_sb; 1412 qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1413 1414 return 0; 1415 } 1416 1417 /* coalescing timeout = timeset << (timer_res + 1) */ 1418 #define QED_CAU_DEF_RX_USECS 24 1419 #define QED_CAU_DEF_TX_USECS 48 1420 1421 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, 1422 struct cau_sb_entry *p_sb_entry, 1423 u8 pf_id, u16 vf_number, u8 vf_valid) 1424 { 1425 struct qed_dev *cdev = p_hwfn->cdev; 1426 u32 cau_state; 1427 u8 timer_res; 1428 1429 memset(p_sb_entry, 0, sizeof(*p_sb_entry)); 1430 1431 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1432 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1433 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1434 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1435 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1436 1437 cau_state = CAU_HC_DISABLE_STATE; 1438 1439 if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 1440 cau_state = CAU_HC_ENABLE_STATE; 1441 if (!cdev->rx_coalesce_usecs) 1442 cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS; 1443 if (!cdev->tx_coalesce_usecs) 1444 cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; 1445 } 1446 1447 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1448 if (cdev->rx_coalesce_usecs <= 0x7F) 1449 timer_res = 0; 1450 else if (cdev->rx_coalesce_usecs <= 0xFF) 1451 timer_res = 1; 1452 else 1453 timer_res = 2; 1454 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1455 1456 if (cdev->tx_coalesce_usecs <= 0x7F) 1457 timer_res = 0; 1458 else if (cdev->tx_coalesce_usecs <= 0xFF) 1459 timer_res = 1; 1460 else 1461 timer_res = 2; 1462 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1463 1464 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1465 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1466 } 1467 1468 static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, 1469 struct qed_ptt *p_ptt, 1470 u16 igu_sb_id, 1471 u32 pi_index, 1472 enum qed_coalescing_fsm coalescing_fsm, 1473 u8 timeset) 1474 { 1475 struct cau_pi_entry pi_entry; 1476 u32 sb_offset, pi_offset; 1477 1478 if (IS_VF(p_hwfn->cdev)) 1479 return; 1480 1481 sb_offset = igu_sb_id * PIS_PER_SB_E4; 1482 memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1483 1484 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1485 if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) 1486 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1487 else 1488 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1489 1490 pi_offset = sb_offset + pi_index; 1491 if (p_hwfn->hw_init_done) { 1492 qed_wr(p_hwfn, p_ptt, 1493 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1494 *((u32 *)&(pi_entry))); 1495 } else { 1496 STORE_RT_REG(p_hwfn, 1497 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1498 *((u32 *)&(pi_entry))); 1499 } 1500 } 1501 1502 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, 1503 struct qed_ptt *p_ptt, 1504 dma_addr_t sb_phys, 1505 u16 igu_sb_id, u16 vf_number, u8 vf_valid) 1506 { 1507 struct cau_sb_entry sb_entry; 1508 1509 qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1510 vf_number, vf_valid); 1511 1512 if (p_hwfn->hw_init_done) { 1513 /* Wide-bus, initialize via DMAE */ 1514 u64 phys_addr = (u64)sb_phys; 1515 1516 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr, 1517 CAU_REG_SB_ADDR_MEMORY + 1518 igu_sb_id * sizeof(u64), 2, 0); 1519 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, 1520 CAU_REG_SB_VAR_MEMORY + 1521 igu_sb_id * sizeof(u64), 2, 0); 1522 } else { 1523 /* Initialize Status Block Address */ 1524 STORE_RT_REG_AGG(p_hwfn, 1525 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 1526 igu_sb_id * 2, 1527 sb_phys); 1528 1529 STORE_RT_REG_AGG(p_hwfn, 1530 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 1531 igu_sb_id * 2, 1532 sb_entry); 1533 } 1534 1535 /* Configure pi coalescing if set */ 1536 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 1537 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1538 u8 timeset, timer_res; 1539 u8 i; 1540 1541 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1542 if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F) 1543 timer_res = 0; 1544 else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF) 1545 timer_res = 1; 1546 else 1547 timer_res = 2; 1548 timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res); 1549 qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1550 QED_COAL_RX_STATE_MACHINE, timeset); 1551 1552 if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F) 1553 timer_res = 0; 1554 else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF) 1555 timer_res = 1; 1556 else 1557 timer_res = 2; 1558 timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res); 1559 for (i = 0; i < num_tc; i++) { 1560 qed_int_cau_conf_pi(p_hwfn, p_ptt, 1561 igu_sb_id, TX_PI(i), 1562 QED_COAL_TX_STATE_MACHINE, 1563 timeset); 1564 } 1565 } 1566 } 1567 1568 void qed_int_sb_setup(struct qed_hwfn *p_hwfn, 1569 struct qed_ptt *p_ptt, struct qed_sb_info *sb_info) 1570 { 1571 /* zero status block and ack counter */ 1572 sb_info->sb_ack = 0; 1573 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1574 1575 if (IS_PF(p_hwfn->cdev)) 1576 qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1577 sb_info->igu_sb_id, 0, 0); 1578 } 1579 1580 struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf) 1581 { 1582 struct qed_igu_block *p_block; 1583 u16 igu_id; 1584 1585 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 1586 igu_id++) { 1587 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1588 1589 if (!(p_block->status & QED_IGU_STATUS_VALID) || 1590 !(p_block->status & QED_IGU_STATUS_FREE)) 1591 continue; 1592 1593 if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf) 1594 return p_block; 1595 } 1596 1597 return NULL; 1598 } 1599 1600 static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id) 1601 { 1602 struct qed_igu_block *p_block; 1603 u16 igu_id; 1604 1605 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 1606 igu_id++) { 1607 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1608 1609 if (!(p_block->status & QED_IGU_STATUS_VALID) || 1610 !p_block->is_pf || 1611 p_block->vector_number != vector_id) 1612 continue; 1613 1614 return igu_id; 1615 } 1616 1617 return QED_SB_INVALID_IDX; 1618 } 1619 1620 u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 1621 { 1622 u16 igu_sb_id; 1623 1624 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1625 if (sb_id == QED_SP_SB_ID) 1626 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1627 else if (IS_PF(p_hwfn->cdev)) 1628 igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1629 else 1630 igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); 1631 1632 if (sb_id == QED_SP_SB_ID) 1633 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1634 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1635 else 1636 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1637 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1638 1639 return igu_sb_id; 1640 } 1641 1642 int qed_int_sb_init(struct qed_hwfn *p_hwfn, 1643 struct qed_ptt *p_ptt, 1644 struct qed_sb_info *sb_info, 1645 void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id) 1646 { 1647 sb_info->sb_virt = sb_virt_addr; 1648 sb_info->sb_phys = sb_phy_addr; 1649 1650 sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); 1651 1652 if (sb_id != QED_SP_SB_ID) { 1653 if (IS_PF(p_hwfn->cdev)) { 1654 struct qed_igu_info *p_info; 1655 struct qed_igu_block *p_block; 1656 1657 p_info = p_hwfn->hw_info.p_igu_info; 1658 p_block = &p_info->entry[sb_info->igu_sb_id]; 1659 1660 p_block->sb_info = sb_info; 1661 p_block->status &= ~QED_IGU_STATUS_FREE; 1662 p_info->usage.free_cnt--; 1663 } else { 1664 qed_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1665 } 1666 } 1667 1668 sb_info->cdev = p_hwfn->cdev; 1669 1670 /* The igu address will hold the absolute address that needs to be 1671 * written to for a specific status block 1672 */ 1673 if (IS_PF(p_hwfn->cdev)) { 1674 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 1675 GTT_BAR0_MAP_REG_IGU_CMD + 1676 (sb_info->igu_sb_id << 3); 1677 } else { 1678 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 1679 PXP_VF_BAR0_START_IGU + 1680 ((IGU_CMD_INT_ACK_BASE + 1681 sb_info->igu_sb_id) << 3); 1682 } 1683 1684 sb_info->flags |= QED_SB_INFO_INIT; 1685 1686 qed_int_sb_setup(p_hwfn, p_ptt, sb_info); 1687 1688 return 0; 1689 } 1690 1691 int qed_int_sb_release(struct qed_hwfn *p_hwfn, 1692 struct qed_sb_info *sb_info, u16 sb_id) 1693 { 1694 struct qed_igu_block *p_block; 1695 struct qed_igu_info *p_info; 1696 1697 if (!sb_info) 1698 return 0; 1699 1700 /* zero status block and ack counter */ 1701 sb_info->sb_ack = 0; 1702 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1703 1704 if (IS_VF(p_hwfn->cdev)) { 1705 qed_vf_set_sb_info(p_hwfn, sb_id, NULL); 1706 return 0; 1707 } 1708 1709 p_info = p_hwfn->hw_info.p_igu_info; 1710 p_block = &p_info->entry[sb_info->igu_sb_id]; 1711 1712 /* Vector 0 is reserved to Default SB */ 1713 if (!p_block->vector_number) { 1714 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1715 return -EINVAL; 1716 } 1717 1718 /* Lose reference to client's SB info, and fix counters */ 1719 p_block->sb_info = NULL; 1720 p_block->status |= QED_IGU_STATUS_FREE; 1721 p_info->usage.free_cnt++; 1722 1723 return 0; 1724 } 1725 1726 static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) 1727 { 1728 struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1729 1730 if (!p_sb) 1731 return; 1732 1733 if (p_sb->sb_info.sb_virt) 1734 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1735 SB_ALIGNED_SIZE(p_hwfn), 1736 p_sb->sb_info.sb_virt, 1737 p_sb->sb_info.sb_phys); 1738 kfree(p_sb); 1739 p_hwfn->p_sp_sb = NULL; 1740 } 1741 1742 static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1743 { 1744 struct qed_sb_sp_info *p_sb; 1745 dma_addr_t p_phys = 0; 1746 void *p_virt; 1747 1748 /* SB struct */ 1749 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 1750 if (!p_sb) 1751 return -ENOMEM; 1752 1753 /* SB ring */ 1754 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1755 SB_ALIGNED_SIZE(p_hwfn), 1756 &p_phys, GFP_KERNEL); 1757 if (!p_virt) { 1758 kfree(p_sb); 1759 return -ENOMEM; 1760 } 1761 1762 /* Status Block setup */ 1763 p_hwfn->p_sp_sb = p_sb; 1764 qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt, 1765 p_phys, QED_SP_SB_ID); 1766 1767 memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1768 1769 return 0; 1770 } 1771 1772 int qed_int_register_cb(struct qed_hwfn *p_hwfn, 1773 qed_int_comp_cb_t comp_cb, 1774 void *cookie, u8 *sb_idx, __le16 **p_fw_cons) 1775 { 1776 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1777 int rc = -ENOMEM; 1778 u8 pi; 1779 1780 /* Look for a free index */ 1781 for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1782 if (p_sp_sb->pi_info_arr[pi].comp_cb) 1783 continue; 1784 1785 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1786 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1787 *sb_idx = pi; 1788 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1789 rc = 0; 1790 break; 1791 } 1792 1793 return rc; 1794 } 1795 1796 int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) 1797 { 1798 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1799 1800 if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL) 1801 return -ENOMEM; 1802 1803 p_sp_sb->pi_info_arr[pi].comp_cb = NULL; 1804 p_sp_sb->pi_info_arr[pi].cookie = NULL; 1805 1806 return 0; 1807 } 1808 1809 u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) 1810 { 1811 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1812 } 1813 1814 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, 1815 struct qed_ptt *p_ptt, enum qed_int_mode int_mode) 1816 { 1817 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1818 1819 p_hwfn->cdev->int_mode = int_mode; 1820 switch (p_hwfn->cdev->int_mode) { 1821 case QED_INT_MODE_INTA: 1822 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1823 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1824 break; 1825 1826 case QED_INT_MODE_MSI: 1827 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1828 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1829 break; 1830 1831 case QED_INT_MODE_MSIX: 1832 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1833 break; 1834 case QED_INT_MODE_POLL: 1835 break; 1836 } 1837 1838 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1839 } 1840 1841 static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn, 1842 struct qed_ptt *p_ptt) 1843 { 1844 1845 /* Configure AEU signal change to produce attentions */ 1846 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1847 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1848 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1849 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1850 1851 /* Flush the writes to IGU */ 1852 mmiowb(); 1853 1854 /* Unmask AEU signals toward IGU */ 1855 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1856 } 1857 1858 int 1859 qed_int_igu_enable(struct qed_hwfn *p_hwfn, 1860 struct qed_ptt *p_ptt, enum qed_int_mode int_mode) 1861 { 1862 int rc = 0; 1863 1864 qed_int_igu_enable_attn(p_hwfn, p_ptt); 1865 1866 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1867 rc = qed_slowpath_irq_req(p_hwfn); 1868 if (rc) { 1869 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); 1870 return -EINVAL; 1871 } 1872 p_hwfn->b_int_requested = true; 1873 } 1874 /* Enable interrupt Generation */ 1875 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1876 p_hwfn->b_int_enabled = 1; 1877 1878 return rc; 1879 } 1880 1881 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1882 { 1883 p_hwfn->b_int_enabled = 0; 1884 1885 if (IS_VF(p_hwfn->cdev)) 1886 return; 1887 1888 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1889 } 1890 1891 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1892 static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, 1893 struct qed_ptt *p_ptt, 1894 u16 igu_sb_id, 1895 bool cleanup_set, u16 opaque_fid) 1896 { 1897 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1898 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 1899 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1900 1901 /* Set the data field */ 1902 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1903 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0); 1904 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1905 1906 /* Set the control register */ 1907 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1908 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1909 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 1910 1911 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 1912 1913 barrier(); 1914 1915 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 1916 1917 /* Flush the write to IGU */ 1918 mmiowb(); 1919 1920 /* calculate where to read the status bit from */ 1921 sb_bit = 1 << (igu_sb_id % 32); 1922 sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 1923 1924 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; 1925 1926 /* Now wait for the command to complete */ 1927 do { 1928 val = qed_rd(p_hwfn, p_ptt, sb_bit_addr); 1929 1930 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 1931 break; 1932 1933 usleep_range(5000, 10000); 1934 } while (--sleep_cnt); 1935 1936 if (!sleep_cnt) 1937 DP_NOTICE(p_hwfn, 1938 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 1939 val, igu_sb_id); 1940 } 1941 1942 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, 1943 struct qed_ptt *p_ptt, 1944 u16 igu_sb_id, u16 opaque, bool b_set) 1945 { 1946 struct qed_igu_block *p_block; 1947 int pi, i; 1948 1949 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 1950 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1951 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 1952 igu_sb_id, 1953 p_block->function_id, 1954 p_block->is_pf, p_block->vector_number); 1955 1956 /* Set */ 1957 if (b_set) 1958 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 1959 1960 /* Clear */ 1961 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 1962 1963 /* Wait for the IGU SB to cleanup */ 1964 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 1965 u32 val; 1966 1967 val = qed_rd(p_hwfn, p_ptt, 1968 IGU_REG_WRITE_DONE_PENDING + 1969 ((igu_sb_id / 32) * 4)); 1970 if (val & BIT((igu_sb_id % 32))) 1971 usleep_range(10, 20); 1972 else 1973 break; 1974 } 1975 if (i == IGU_CLEANUP_SLEEP_LENGTH) 1976 DP_NOTICE(p_hwfn, 1977 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 1978 igu_sb_id); 1979 1980 /* Clear the CAU for the SB */ 1981 for (pi = 0; pi < 12; pi++) 1982 qed_wr(p_hwfn, p_ptt, 1983 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); 1984 } 1985 1986 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, 1987 struct qed_ptt *p_ptt, 1988 bool b_set, bool b_slowpath) 1989 { 1990 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 1991 struct qed_igu_block *p_block; 1992 u16 igu_sb_id = 0; 1993 u32 val = 0; 1994 1995 val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 1996 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 1997 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 1998 qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 1999 2000 for (igu_sb_id = 0; 2001 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 2002 p_block = &p_info->entry[igu_sb_id]; 2003 2004 if (!(p_block->status & QED_IGU_STATUS_VALID) || 2005 !p_block->is_pf || 2006 (p_block->status & QED_IGU_STATUS_DSB)) 2007 continue; 2008 2009 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 2010 p_hwfn->hw_info.opaque_fid, 2011 b_set); 2012 } 2013 2014 if (b_slowpath) 2015 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2016 p_info->igu_dsb_id, 2017 p_hwfn->hw_info.opaque_fid, 2018 b_set); 2019 } 2020 2021 int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2022 { 2023 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2024 struct qed_igu_block *p_block; 2025 int pf_sbs, vf_sbs; 2026 u16 igu_sb_id; 2027 u32 val, rval; 2028 2029 if (!RESC_NUM(p_hwfn, QED_SB)) { 2030 p_info->b_allow_pf_vf_change = false; 2031 } else { 2032 /* Use the numbers the MFW have provided - 2033 * don't forget MFW accounts for the default SB as well. 2034 */ 2035 p_info->b_allow_pf_vf_change = true; 2036 2037 if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) { 2038 DP_INFO(p_hwfn, 2039 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 2040 RESC_NUM(p_hwfn, QED_SB) - 1, 2041 p_info->usage.cnt); 2042 p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1; 2043 } 2044 2045 if (IS_PF_SRIOV(p_hwfn)) { 2046 u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs; 2047 2048 if (vfs != p_info->usage.iov_cnt) 2049 DP_VERBOSE(p_hwfn, 2050 NETIF_MSG_INTR, 2051 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2052 p_info->usage.iov_cnt, vfs); 2053 2054 /* At this point we know how many SBs we have totally 2055 * in IGU + number of PF SBs. So we can validate that 2056 * we'd have sufficient for VF. 2057 */ 2058 if (vfs > p_info->usage.free_cnt + 2059 p_info->usage.free_cnt_iov - p_info->usage.cnt) { 2060 DP_NOTICE(p_hwfn, 2061 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2062 p_info->usage.free_cnt + 2063 p_info->usage.free_cnt_iov, 2064 p_info->usage.cnt, vfs); 2065 return -EINVAL; 2066 } 2067 2068 /* Currently cap the number of VFs SBs by the 2069 * number of VFs. 2070 */ 2071 p_info->usage.iov_cnt = vfs; 2072 } 2073 } 2074 2075 /* Mark all SBs as free, now in the right PF/VFs division */ 2076 p_info->usage.free_cnt = p_info->usage.cnt; 2077 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2078 p_info->usage.orig = p_info->usage.cnt; 2079 p_info->usage.iov_orig = p_info->usage.iov_cnt; 2080 2081 /* We now proceed to re-configure the IGU cam to reflect the initial 2082 * configuration. We can start with the Default SB. 2083 */ 2084 pf_sbs = p_info->usage.cnt; 2085 vf_sbs = p_info->usage.iov_cnt; 2086 2087 for (igu_sb_id = p_info->igu_dsb_id; 2088 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 2089 p_block = &p_info->entry[igu_sb_id]; 2090 val = 0; 2091 2092 if (!(p_block->status & QED_IGU_STATUS_VALID)) 2093 continue; 2094 2095 if (p_block->status & QED_IGU_STATUS_DSB) { 2096 p_block->function_id = p_hwfn->rel_pf_id; 2097 p_block->is_pf = 1; 2098 p_block->vector_number = 0; 2099 p_block->status = QED_IGU_STATUS_VALID | 2100 QED_IGU_STATUS_PF | 2101 QED_IGU_STATUS_DSB; 2102 } else if (pf_sbs) { 2103 pf_sbs--; 2104 p_block->function_id = p_hwfn->rel_pf_id; 2105 p_block->is_pf = 1; 2106 p_block->vector_number = p_info->usage.cnt - pf_sbs; 2107 p_block->status = QED_IGU_STATUS_VALID | 2108 QED_IGU_STATUS_PF | 2109 QED_IGU_STATUS_FREE; 2110 } else if (vf_sbs) { 2111 p_block->function_id = 2112 p_hwfn->cdev->p_iov_info->first_vf_in_pf + 2113 p_info->usage.iov_cnt - vf_sbs; 2114 p_block->is_pf = 0; 2115 p_block->vector_number = 0; 2116 p_block->status = QED_IGU_STATUS_VALID | 2117 QED_IGU_STATUS_FREE; 2118 vf_sbs--; 2119 } else { 2120 p_block->function_id = 0; 2121 p_block->is_pf = 0; 2122 p_block->vector_number = 0; 2123 } 2124 2125 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2126 p_block->function_id); 2127 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2128 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2129 p_block->vector_number); 2130 2131 /* VF entries would be enabled when VF is initializaed */ 2132 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2133 2134 rval = qed_rd(p_hwfn, p_ptt, 2135 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2136 2137 if (rval != val) { 2138 qed_wr(p_hwfn, p_ptt, 2139 IGU_REG_MAPPING_MEMORY + 2140 sizeof(u32) * igu_sb_id, val); 2141 2142 DP_VERBOSE(p_hwfn, 2143 NETIF_MSG_INTR, 2144 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2145 igu_sb_id, 2146 p_block->function_id, 2147 p_block->is_pf, 2148 p_block->vector_number, rval, val); 2149 } 2150 } 2151 2152 return 0; 2153 } 2154 2155 static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, 2156 struct qed_ptt *p_ptt, u16 igu_sb_id) 2157 { 2158 u32 val = qed_rd(p_hwfn, p_ptt, 2159 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2160 struct qed_igu_block *p_block; 2161 2162 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2163 2164 /* Fill the block information */ 2165 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); 2166 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2167 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); 2168 p_block->igu_sb_id = igu_sb_id; 2169 } 2170 2171 int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2172 { 2173 struct qed_igu_info *p_igu_info; 2174 struct qed_igu_block *p_block; 2175 u32 min_vf = 0, max_vf = 0; 2176 u16 igu_sb_id; 2177 2178 p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); 2179 if (!p_hwfn->hw_info.p_igu_info) 2180 return -ENOMEM; 2181 2182 p_igu_info = p_hwfn->hw_info.p_igu_info; 2183 2184 /* Distinguish between existent and non-existent default SB */ 2185 p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX; 2186 2187 /* Find the range of VF ids whose SB belong to this PF */ 2188 if (p_hwfn->cdev->p_iov_info) { 2189 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 2190 2191 min_vf = p_iov->first_vf_in_pf; 2192 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2193 } 2194 2195 for (igu_sb_id = 0; 2196 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 2197 /* Read current entry; Notice it might not belong to this PF */ 2198 qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2199 p_block = &p_igu_info->entry[igu_sb_id]; 2200 2201 if ((p_block->is_pf) && 2202 (p_block->function_id == p_hwfn->rel_pf_id)) { 2203 p_block->status = QED_IGU_STATUS_PF | 2204 QED_IGU_STATUS_VALID | 2205 QED_IGU_STATUS_FREE; 2206 2207 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) 2208 p_igu_info->usage.cnt++; 2209 } else if (!(p_block->is_pf) && 2210 (p_block->function_id >= min_vf) && 2211 (p_block->function_id < max_vf)) { 2212 /* Available for VFs of this PF */ 2213 p_block->status = QED_IGU_STATUS_VALID | 2214 QED_IGU_STATUS_FREE; 2215 2216 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) 2217 p_igu_info->usage.iov_cnt++; 2218 } 2219 2220 /* Mark the First entry belonging to the PF or its VFs 2221 * as the default SB [we'll reset IGU prior to first usage]. 2222 */ 2223 if ((p_block->status & QED_IGU_STATUS_VALID) && 2224 (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) { 2225 p_igu_info->igu_dsb_id = igu_sb_id; 2226 p_block->status |= QED_IGU_STATUS_DSB; 2227 } 2228 2229 /* limit number of prints by having each PF print only its 2230 * entries with the exception of PF0 which would print 2231 * everything. 2232 */ 2233 if ((p_block->status & QED_IGU_STATUS_VALID) || 2234 (p_hwfn->abs_pf_id == 0)) { 2235 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2236 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2237 igu_sb_id, p_block->function_id, 2238 p_block->is_pf, p_block->vector_number); 2239 } 2240 } 2241 2242 if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) { 2243 DP_NOTICE(p_hwfn, 2244 "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2245 p_igu_info->igu_dsb_id); 2246 return -EINVAL; 2247 } 2248 2249 /* All non default SB are considered free at this point */ 2250 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2251 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2252 2253 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2254 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2255 p_igu_info->igu_dsb_id, 2256 p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt); 2257 2258 return 0; 2259 } 2260 2261 /** 2262 * @brief Initialize igu runtime registers 2263 * 2264 * @param p_hwfn 2265 */ 2266 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) 2267 { 2268 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2269 2270 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2271 } 2272 2273 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) 2274 { 2275 u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - 2276 IGU_CMD_INT_ACK_BASE; 2277 u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - 2278 IGU_CMD_INT_ACK_BASE; 2279 u32 intr_status_hi = 0, intr_status_lo = 0; 2280 u64 intr_status = 0; 2281 2282 intr_status_lo = REG_RD(p_hwfn, 2283 GTT_BAR0_MAP_REG_IGU_CMD + 2284 lsb_igu_cmd_addr * 8); 2285 intr_status_hi = REG_RD(p_hwfn, 2286 GTT_BAR0_MAP_REG_IGU_CMD + 2287 msb_igu_cmd_addr * 8); 2288 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2289 2290 return intr_status; 2291 } 2292 2293 static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) 2294 { 2295 tasklet_init(p_hwfn->sp_dpc, 2296 qed_int_sp_dpc, (unsigned long)p_hwfn); 2297 p_hwfn->b_sp_dpc_enabled = true; 2298 } 2299 2300 static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) 2301 { 2302 p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL); 2303 if (!p_hwfn->sp_dpc) 2304 return -ENOMEM; 2305 2306 return 0; 2307 } 2308 2309 static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) 2310 { 2311 kfree(p_hwfn->sp_dpc); 2312 p_hwfn->sp_dpc = NULL; 2313 } 2314 2315 int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2316 { 2317 int rc = 0; 2318 2319 rc = qed_int_sp_dpc_alloc(p_hwfn); 2320 if (rc) 2321 return rc; 2322 2323 rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt); 2324 if (rc) 2325 return rc; 2326 2327 rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); 2328 2329 return rc; 2330 } 2331 2332 void qed_int_free(struct qed_hwfn *p_hwfn) 2333 { 2334 qed_int_sp_sb_free(p_hwfn); 2335 qed_int_sb_attn_free(p_hwfn); 2336 qed_int_sp_dpc_free(p_hwfn); 2337 } 2338 2339 void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2340 { 2341 qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2342 qed_int_sb_attn_setup(p_hwfn, p_ptt); 2343 qed_int_sp_dpc_setup(p_hwfn); 2344 } 2345 2346 void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, 2347 struct qed_sb_cnt_info *p_sb_cnt_info) 2348 { 2349 struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; 2350 2351 if (!info || !p_sb_cnt_info) 2352 return; 2353 2354 memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info)); 2355 } 2356 2357 void qed_int_disable_post_isr_release(struct qed_dev *cdev) 2358 { 2359 int i; 2360 2361 for_each_hwfn(cdev, i) 2362 cdev->hwfns[i].b_int_requested = false; 2363 } 2364 2365 int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 2366 u8 timer_res, u16 sb_id, bool tx) 2367 { 2368 struct cau_sb_entry sb_entry; 2369 int rc; 2370 2371 if (!p_hwfn->hw_init_done) { 2372 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2373 return -EINVAL; 2374 } 2375 2376 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2377 sb_id * sizeof(u64), 2378 (u64)(uintptr_t)&sb_entry, 2, 0); 2379 if (rc) { 2380 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2381 return rc; 2382 } 2383 2384 if (tx) 2385 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2386 else 2387 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2388 2389 rc = qed_dmae_host2grc(p_hwfn, p_ptt, 2390 (u64)(uintptr_t)&sb_entry, 2391 CAU_REG_SB_VAR_MEMORY + 2392 sb_id * sizeof(u64), 2, 0); 2393 if (rc) { 2394 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2395 return rc; 2396 } 2397 2398 return rc; 2399 } 2400