1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <asm/byteorder.h> 35 #include <linux/io.h> 36 #include <linux/bitops.h> 37 #include <linux/delay.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/errno.h> 40 #include <linux/interrupt.h> 41 #include <linux/kernel.h> 42 #include <linux/pci.h> 43 #include <linux/slab.h> 44 #include <linux/string.h> 45 #include "qed.h" 46 #include "qed_hsi.h" 47 #include "qed_hw.h" 48 #include "qed_init_ops.h" 49 #include "qed_int.h" 50 #include "qed_mcp.h" 51 #include "qed_reg_addr.h" 52 #include "qed_sp.h" 53 #include "qed_sriov.h" 54 #include "qed_vf.h" 55 56 struct qed_pi_info { 57 qed_int_comp_cb_t comp_cb; 58 void *cookie; 59 }; 60 61 struct qed_sb_sp_info { 62 struct qed_sb_info sb_info; 63 64 /* per protocol index data */ 65 struct qed_pi_info pi_info_arr[PIS_PER_SB_E4]; 66 }; 67 68 enum qed_attention_type { 69 QED_ATTN_TYPE_ATTN, 70 QED_ATTN_TYPE_PARITY, 71 }; 72 73 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 74 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 75 76 struct aeu_invert_reg_bit { 77 char bit_name[30]; 78 79 #define ATTENTION_PARITY (1 << 0) 80 81 #define ATTENTION_LENGTH_MASK (0x00000ff0) 82 #define ATTENTION_LENGTH_SHIFT (4) 83 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 84 ATTENTION_LENGTH_SHIFT) 85 #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT) 86 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 87 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 88 ATTENTION_PARITY) 89 90 /* Multiple bits start with this offset */ 91 #define ATTENTION_OFFSET_MASK (0x000ff000) 92 #define ATTENTION_OFFSET_SHIFT (12) 93 94 #define ATTENTION_BB_MASK (0x00700000) 95 #define ATTENTION_BB_SHIFT (20) 96 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT) 97 #define ATTENTION_BB_DIFFERENT BIT(23) 98 99 #define ATTENTION_CLEAR_ENABLE BIT(28) 100 unsigned int flags; 101 102 /* Callback to call if attention will be triggered */ 103 int (*cb)(struct qed_hwfn *p_hwfn); 104 105 enum block_id block_index; 106 }; 107 108 struct aeu_invert_reg { 109 struct aeu_invert_reg_bit bits[32]; 110 }; 111 112 #define MAX_ATTN_GRPS (8) 113 #define NUM_ATTN_REGS (9) 114 115 /* Specific HW attention callbacks */ 116 static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn) 117 { 118 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 119 120 /* This might occur on certain instances; Log it once then mask it */ 121 DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n", 122 tmp); 123 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 124 0xffffffff); 125 126 return 0; 127 } 128 129 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 130 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 131 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 132 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf) 133 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 134 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1) 135 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 136 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff) 137 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 138 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf) 139 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 140 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff) 141 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 142 static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn) 143 { 144 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 145 PSWHST_REG_INCORRECT_ACCESS_VALID); 146 147 if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) { 148 u32 addr, data, length; 149 150 addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 151 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 152 data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 153 PSWHST_REG_INCORRECT_ACCESS_DATA); 154 length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 155 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 156 157 DP_INFO(p_hwfn->cdev, 158 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", 159 addr, length, 160 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID), 161 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID), 162 (u8) GET_FIELD(data, 163 ATTENTION_INCORRECT_ACCESS_VF_VALID), 164 (u8) GET_FIELD(data, 165 ATTENTION_INCORRECT_ACCESS_CLIENT), 166 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR), 167 (u8) GET_FIELD(data, 168 ATTENTION_INCORRECT_ACCESS_BYTE_EN), 169 data); 170 } 171 172 return 0; 173 } 174 175 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0) 176 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff) 177 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0) 178 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23) 179 #define QED_GRC_ATTENTION_MASTER_MASK (0xf) 180 #define QED_GRC_ATTENTION_MASTER_SHIFT (24) 181 #define QED_GRC_ATTENTION_PF_MASK (0xf) 182 #define QED_GRC_ATTENTION_PF_SHIFT (0) 183 #define QED_GRC_ATTENTION_VF_MASK (0xff) 184 #define QED_GRC_ATTENTION_VF_SHIFT (4) 185 #define QED_GRC_ATTENTION_PRIV_MASK (0x3) 186 #define QED_GRC_ATTENTION_PRIV_SHIFT (14) 187 #define QED_GRC_ATTENTION_PRIV_VF (0) 188 static const char *attn_master_to_str(u8 master) 189 { 190 switch (master) { 191 case 1: return "PXP"; 192 case 2: return "MCP"; 193 case 3: return "MSDM"; 194 case 4: return "PSDM"; 195 case 5: return "YSDM"; 196 case 6: return "USDM"; 197 case 7: return "TSDM"; 198 case 8: return "XSDM"; 199 case 9: return "DBU"; 200 case 10: return "DMAE"; 201 default: 202 return "Unknown"; 203 } 204 } 205 206 static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) 207 { 208 u32 tmp, tmp2; 209 210 /* We've already cleared the timeout interrupt register, so we learn 211 * of interrupts via the validity register 212 */ 213 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 214 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 215 if (!(tmp & QED_GRC_ATTENTION_VALID_BIT)) 216 goto out; 217 218 /* Read the GRC timeout information */ 219 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 220 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 221 tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 222 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 223 224 DP_INFO(p_hwfn->cdev, 225 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 226 tmp2, tmp, 227 (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from", 228 GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2, 229 attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), 230 GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), 231 (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == 232 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)", 233 GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); 234 235 out: 236 /* Regardles of anything else, clean the validity bit */ 237 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 238 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 239 return 0; 240 } 241 242 #define PGLUE_ATTENTION_VALID (1 << 29) 243 #define PGLUE_ATTENTION_RD_VALID (1 << 26) 244 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf) 245 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 246 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1) 247 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19) 248 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff) 249 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 250 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1) 251 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21) 252 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1) 253 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22) 254 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1) 255 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23) 256 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23) 257 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) 258 #define PGLUE_ATTENTION_ILT_VALID (1 << 23) 259 260 int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 261 bool hw_init) 262 { 263 char msg[256]; 264 u32 tmp; 265 266 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); 267 if (tmp & PGLUE_ATTENTION_VALID) { 268 u32 addr_lo, addr_hi, details; 269 270 addr_lo = qed_rd(p_hwfn, p_ptt, 271 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 272 addr_hi = qed_rd(p_hwfn, p_ptt, 273 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 274 details = qed_rd(p_hwfn, p_ptt, 275 PGLUE_B_REG_TX_ERR_WR_DETAILS); 276 277 snprintf(msg, sizeof(msg), 278 "Illegal write by chip to [%08x:%08x] blocked.\n" 279 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 280 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]", 281 addr_hi, addr_lo, details, 282 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 283 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 284 !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID), 285 tmp, 286 !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR), 287 !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME), 288 !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN)); 289 290 if (hw_init) 291 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); 292 else 293 DP_NOTICE(p_hwfn, "%s\n", msg); 294 } 295 296 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); 297 if (tmp & PGLUE_ATTENTION_RD_VALID) { 298 u32 addr_lo, addr_hi, details; 299 300 addr_lo = qed_rd(p_hwfn, p_ptt, 301 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 302 addr_hi = qed_rd(p_hwfn, p_ptt, 303 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 304 details = qed_rd(p_hwfn, p_ptt, 305 PGLUE_B_REG_TX_ERR_RD_DETAILS); 306 307 DP_NOTICE(p_hwfn, 308 "Illegal read by chip from [%08x:%08x] blocked.\n" 309 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 310 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 311 addr_hi, addr_lo, details, 312 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 313 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 314 GET_FIELD(details, 315 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 316 tmp, 317 GET_FIELD(tmp, 318 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, 319 GET_FIELD(tmp, 320 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 321 GET_FIELD(tmp, 322 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); 323 } 324 325 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 326 if (tmp & PGLUE_ATTENTION_ICPL_VALID) { 327 snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp); 328 329 if (hw_init) 330 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); 331 else 332 DP_NOTICE(p_hwfn, "%s\n", msg); 333 } 334 335 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 336 if (tmp & PGLUE_ATTENTION_ZLR_VALID) { 337 u32 addr_hi, addr_lo; 338 339 addr_lo = qed_rd(p_hwfn, p_ptt, 340 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 341 addr_hi = qed_rd(p_hwfn, p_ptt, 342 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 343 344 DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n", 345 tmp, addr_hi, addr_lo); 346 } 347 348 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 349 if (tmp & PGLUE_ATTENTION_ILT_VALID) { 350 u32 addr_hi, addr_lo, details; 351 352 addr_lo = qed_rd(p_hwfn, p_ptt, 353 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 354 addr_hi = qed_rd(p_hwfn, p_ptt, 355 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 356 details = qed_rd(p_hwfn, p_ptt, 357 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 358 359 DP_NOTICE(p_hwfn, 360 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 361 details, tmp, addr_hi, addr_lo); 362 } 363 364 /* Clear the indications */ 365 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2)); 366 367 return 0; 368 } 369 370 static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) 371 { 372 return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); 373 } 374 375 static int qed_fw_assertion(struct qed_hwfn *p_hwfn) 376 { 377 qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT, 378 "FW assertion!\n"); 379 380 return -EINVAL; 381 } 382 383 static int qed_general_attention_35(struct qed_hwfn *p_hwfn) 384 { 385 DP_INFO(p_hwfn, "General attention 35!\n"); 386 387 return 0; 388 } 389 390 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) 391 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 392 #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 393 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) 394 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16) 395 396 #define QED_DB_REC_COUNT 1000 397 #define QED_DB_REC_INTERVAL 100 398 399 static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, 400 struct qed_ptt *p_ptt) 401 { 402 u32 count = QED_DB_REC_COUNT; 403 u32 usage = 1; 404 405 /* Flush any pending (e)dpms as they may never arrive */ 406 qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 407 408 /* wait for usage to zero or count to run out. This is necessary since 409 * EDPM doorbell transactions can take multiple 64b cycles, and as such 410 * can "split" over the pci. Possibly, the doorbell drop can happen with 411 * half an EDPM in the queue and other half dropped. Another EDPM 412 * doorbell to the same address (from doorbell recovery mechanism or 413 * from the doorbelling entity) could have first half dropped and second 414 * half interpreted as continuation of the first. To prevent such 415 * malformed doorbells from reaching the device, flush the queue before 416 * releasing the overflow sticky indication. 417 */ 418 while (count-- && usage) { 419 usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 420 udelay(QED_DB_REC_INTERVAL); 421 } 422 423 /* should have been depleted by now */ 424 if (usage) { 425 DP_NOTICE(p_hwfn->cdev, 426 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 427 QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage); 428 return -EBUSY; 429 } 430 431 return 0; 432 } 433 434 int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 435 { 436 u32 attn_ovfl, cur_ovfl; 437 int rc; 438 439 attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT, 440 &p_hwfn->db_recovery_info.overflow); 441 cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); 442 if (!cur_ovfl && !attn_ovfl) 443 return 0; 444 445 DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n", 446 attn_ovfl, cur_ovfl); 447 448 if (cur_ovfl && !p_hwfn->db_bar_no_edpm) { 449 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); 450 if (rc) 451 return rc; 452 } 453 454 /* Release overflow sticky indication (stop silently dropping everything) */ 455 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 456 457 /* Repeat all last doorbells (doorbell drop recovery) */ 458 qed_db_recovery_execute(p_hwfn); 459 460 return 0; 461 } 462 463 static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn) 464 { 465 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; 466 u32 overflow; 467 int rc; 468 469 overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); 470 if (!overflow) 471 goto out; 472 473 /* Run PF doorbell recovery in next periodic handler */ 474 set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow); 475 476 if (!p_hwfn->db_bar_no_edpm) { 477 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); 478 if (rc) 479 goto out; 480 } 481 482 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 483 out: 484 /* Schedule the handler even if overflow was not detected */ 485 qed_periodic_db_rec_start(p_hwfn); 486 } 487 488 static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) 489 { 490 u32 int_sts, first_drop_reason, details, address, all_drops_reason; 491 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; 492 493 /* int_sts may be zero since all PFs were interrupted for doorbell 494 * overflow but another one already handled it. Can abort here. If 495 * This PF also requires overflow recovery we will be interrupted again. 496 * The masked almost full indication may also be set. Ignoring. 497 */ 498 int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 499 if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) 500 return 0; 501 502 DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); 503 504 /* check if db_drop or overflow happened */ 505 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 506 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 507 /* Obtain data about db drop/overflow */ 508 first_drop_reason = qed_rd(p_hwfn, p_ptt, 509 DORQ_REG_DB_DROP_REASON) & 510 QED_DORQ_ATTENTION_REASON_MASK; 511 details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS); 512 address = qed_rd(p_hwfn, p_ptt, 513 DORQ_REG_DB_DROP_DETAILS_ADDRESS); 514 all_drops_reason = qed_rd(p_hwfn, p_ptt, 515 DORQ_REG_DB_DROP_DETAILS_REASON); 516 517 /* Log info */ 518 DP_NOTICE(p_hwfn->cdev, 519 "Doorbell drop occurred\n" 520 "Address\t\t0x%08x\t(second BAR address)\n" 521 "FID\t\t0x%04x\t\t(Opaque FID)\n" 522 "Size\t\t0x%04x\t\t(in bytes)\n" 523 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 524 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", 525 address, 526 GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE), 527 GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, 528 first_drop_reason, all_drops_reason); 529 530 /* Clear the doorbell drop details and prepare for next drop */ 531 qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 532 533 /* Mark interrupt as handled (note: even if drop was due to a different 534 * reason than overflow we mark as handled) 535 */ 536 qed_wr(p_hwfn, 537 p_ptt, 538 DORQ_REG_INT_STS_WR, 539 DORQ_REG_INT_STS_DB_DROP | 540 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 541 542 /* If there are no indications other than drop indications, success */ 543 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 544 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 545 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 546 return 0; 547 } 548 549 /* Some other indication was present - non recoverable */ 550 DP_INFO(p_hwfn, "DORQ fatal attention\n"); 551 552 return -EINVAL; 553 } 554 555 static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) 556 { 557 p_hwfn->db_recovery_info.dorq_attn = true; 558 qed_dorq_attn_overflow(p_hwfn); 559 560 return qed_dorq_attn_int_sts(p_hwfn); 561 } 562 563 static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn) 564 { 565 if (p_hwfn->db_recovery_info.dorq_attn) 566 goto out; 567 568 /* Call DORQ callback if the attention was missed */ 569 qed_dorq_attn_cb(p_hwfn); 570 out: 571 p_hwfn->db_recovery_info.dorq_attn = false; 572 } 573 574 /* Instead of major changes to the data-structure, we have a some 'special' 575 * identifiers for sources that changed meaning between adapters. 576 */ 577 enum aeu_invert_reg_special_type { 578 AEU_INVERT_REG_SPECIAL_CNIG_0, 579 AEU_INVERT_REG_SPECIAL_CNIG_1, 580 AEU_INVERT_REG_SPECIAL_CNIG_2, 581 AEU_INVERT_REG_SPECIAL_CNIG_3, 582 AEU_INVERT_REG_SPECIAL_MAX, 583 }; 584 585 static struct aeu_invert_reg_bit 586 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 587 {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 588 {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 589 {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 590 {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 591 }; 592 593 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 594 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 595 { 596 { /* After Invert 1 */ 597 {"GPIO0 function%d", 598 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 599 } 600 }, 601 602 { 603 { /* After Invert 2 */ 604 {"PGLUE config_space", ATTENTION_SINGLE, 605 NULL, MAX_BLOCK_ID}, 606 {"PGLUE misc_flr", ATTENTION_SINGLE, 607 NULL, MAX_BLOCK_ID}, 608 {"PGLUE B RBC", ATTENTION_PAR_INT, 609 qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B}, 610 {"PGLUE misc_mctp", ATTENTION_SINGLE, 611 NULL, MAX_BLOCK_ID}, 612 {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 613 {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 614 {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 615 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | 616 (1 << ATTENTION_OFFSET_SHIFT), 617 NULL, MAX_BLOCK_ID}, 618 {"PCIE glue/PXP VPD %d", 619 (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS}, 620 } 621 }, 622 623 { 624 { /* After Invert 3 */ 625 {"General Attention %d", 626 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 627 } 628 }, 629 630 { 631 { /* After Invert 4 */ 632 {"General Attention 32", ATTENTION_SINGLE | 633 ATTENTION_CLEAR_ENABLE, qed_fw_assertion, 634 MAX_BLOCK_ID}, 635 {"General Attention %d", 636 (2 << ATTENTION_LENGTH_SHIFT) | 637 (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID}, 638 {"General Attention 35", ATTENTION_SINGLE | 639 ATTENTION_CLEAR_ENABLE, qed_general_attention_35, 640 MAX_BLOCK_ID}, 641 {"NWS Parity", 642 ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 643 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), 644 NULL, BLOCK_NWS}, 645 {"NWS Interrupt", 646 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 647 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), 648 NULL, BLOCK_NWS}, 649 {"NWM Parity", 650 ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 651 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), 652 NULL, BLOCK_NWM}, 653 {"NWM Interrupt", 654 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 655 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), 656 NULL, BLOCK_NWM}, 657 {"MCP CPU", ATTENTION_SINGLE, 658 qed_mcp_attn_cb, MAX_BLOCK_ID}, 659 {"MCP Watchdog timer", ATTENTION_SINGLE, 660 NULL, MAX_BLOCK_ID}, 661 {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 662 {"AVS stop status ready", ATTENTION_SINGLE, 663 NULL, MAX_BLOCK_ID}, 664 {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 665 {"MSTAT per-path", ATTENTION_PAR_INT, 666 NULL, MAX_BLOCK_ID}, 667 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), 668 NULL, MAX_BLOCK_ID}, 669 {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG}, 670 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB}, 671 {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB}, 672 {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB}, 673 {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS}, 674 } 675 }, 676 677 { 678 { /* After Invert 5 */ 679 {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC}, 680 {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1}, 681 {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2}, 682 {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB}, 683 {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF}, 684 {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM}, 685 {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM}, 686 {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM}, 687 {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM}, 688 {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM}, 689 {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM}, 690 {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM}, 691 {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM}, 692 {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM}, 693 {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM}, 694 {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM}, 695 } 696 }, 697 698 { 699 { /* After Invert 6 */ 700 {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM}, 701 {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM}, 702 {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM}, 703 {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM}, 704 {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM}, 705 {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM}, 706 {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM}, 707 {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM}, 708 {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM}, 709 {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD}, 710 {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD}, 711 {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD}, 712 {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD}, 713 {"DORQ", ATTENTION_PAR_INT, 714 qed_dorq_attn_cb, BLOCK_DORQ}, 715 {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG}, 716 {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC}, 717 } 718 }, 719 720 { 721 { /* After Invert 7 */ 722 {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC}, 723 {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU}, 724 {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE}, 725 {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU}, 726 {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 727 {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU}, 728 {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU}, 729 {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM}, 730 {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC}, 731 {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF}, 732 {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF}, 733 {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS}, 734 {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC}, 735 {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS}, 736 {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE}, 737 {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS}, 738 {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ}, 739 } 740 }, 741 742 { 743 { /* After Invert 8 */ 744 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, 745 NULL, BLOCK_PSWRQ2}, 746 {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR}, 747 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, 748 NULL, BLOCK_PSWWR2}, 749 {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD}, 750 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, 751 NULL, BLOCK_PSWRD2}, 752 {"PSWHST", ATTENTION_PAR_INT, 753 qed_pswhst_attn_cb, BLOCK_PSWHST}, 754 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, 755 NULL, BLOCK_PSWHST2}, 756 {"GRC", ATTENTION_PAR_INT, 757 qed_grc_attn_cb, BLOCK_GRC}, 758 {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU}, 759 {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI}, 760 {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 761 {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 762 {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 763 {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 764 {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 765 {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 766 {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS}, 767 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, 768 NULL, BLOCK_PGLCS}, 769 {"PERST_B assertion", ATTENTION_SINGLE, 770 NULL, MAX_BLOCK_ID}, 771 {"PERST_B deassertion", ATTENTION_SINGLE, 772 NULL, MAX_BLOCK_ID}, 773 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), 774 NULL, MAX_BLOCK_ID}, 775 } 776 }, 777 778 { 779 { /* After Invert 9 */ 780 {"MCP Latched memory", ATTENTION_PAR, 781 NULL, MAX_BLOCK_ID}, 782 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, 783 NULL, MAX_BLOCK_ID}, 784 {"MCP Latched ump_tx", ATTENTION_PAR, 785 NULL, MAX_BLOCK_ID}, 786 {"MCP Latched scratchpad", ATTENTION_PAR, 787 NULL, MAX_BLOCK_ID}, 788 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), 789 NULL, MAX_BLOCK_ID}, 790 } 791 }, 792 }; 793 794 static struct aeu_invert_reg_bit * 795 qed_int_aeu_translate(struct qed_hwfn *p_hwfn, 796 struct aeu_invert_reg_bit *p_bit) 797 { 798 if (!QED_IS_BB(p_hwfn->cdev)) 799 return p_bit; 800 801 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 802 return p_bit; 803 804 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 805 ATTENTION_BB_SHIFT]; 806 } 807 808 static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn, 809 struct aeu_invert_reg_bit *p_bit) 810 { 811 return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags & 812 ATTENTION_PARITY); 813 } 814 815 #define ATTN_STATE_BITS (0xfff) 816 #define ATTN_BITS_MASKABLE (0x3ff) 817 struct qed_sb_attn_info { 818 /* Virtual & Physical address of the SB */ 819 struct atten_status_block *sb_attn; 820 dma_addr_t sb_phys; 821 822 /* Last seen running index */ 823 u16 index; 824 825 /* A mask of the AEU bits resulting in a parity error */ 826 u32 parity_mask[NUM_ATTN_REGS]; 827 828 /* A pointer to the attention description structure */ 829 struct aeu_invert_reg *p_aeu_desc; 830 831 /* Previously asserted attentions, which are still unasserted */ 832 u16 known_attn; 833 834 /* Cleanup address for the link's general hw attention */ 835 u32 mfw_attn_addr; 836 }; 837 838 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, 839 struct qed_sb_attn_info *p_sb_desc) 840 { 841 u16 rc = 0, index; 842 843 index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); 844 if (p_sb_desc->index != index) { 845 p_sb_desc->index = index; 846 rc = QED_SB_ATT_IDX; 847 } 848 849 return rc; 850 } 851 852 /** 853 * @brief qed_int_assertion - handles asserted attention bits 854 * 855 * @param p_hwfn 856 * @param asserted_bits newly asserted bits 857 * @return int 858 */ 859 static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits) 860 { 861 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 862 u32 igu_mask; 863 864 /* Mask the source of the attention in the IGU */ 865 igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); 866 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 867 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 868 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 869 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 870 871 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 872 "inner known ATTN state: 0x%04x --> 0x%04x\n", 873 sb_attn_sw->known_attn, 874 sb_attn_sw->known_attn | asserted_bits); 875 sb_attn_sw->known_attn |= asserted_bits; 876 877 /* Handle MCP events */ 878 if (asserted_bits & 0x100) { 879 qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 880 /* Clean the MCP attention */ 881 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 882 sb_attn_sw->mfw_attn_addr, 0); 883 } 884 885 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 886 GTT_BAR0_MAP_REG_IGU_CMD + 887 ((IGU_CMD_ATTN_BIT_SET_UPPER - 888 IGU_CMD_INT_ACK_BASE) << 3), 889 (u32)asserted_bits); 890 891 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", 892 asserted_bits); 893 894 return 0; 895 } 896 897 static void qed_int_attn_print(struct qed_hwfn *p_hwfn, 898 enum block_id id, 899 enum dbg_attn_type type, bool b_clear) 900 { 901 struct dbg_attn_block_result attn_results; 902 enum dbg_status status; 903 904 memset(&attn_results, 0, sizeof(attn_results)); 905 906 status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type, 907 b_clear, &attn_results); 908 if (status != DBG_STATUS_OK) 909 DP_NOTICE(p_hwfn, 910 "Failed to parse attention information [status: %s]\n", 911 qed_dbg_get_status_str(status)); 912 else 913 qed_dbg_parse_attn(p_hwfn, &attn_results); 914 } 915 916 /** 917 * @brief qed_int_deassertion_aeu_bit - handles the effects of a single 918 * cause of the attention 919 * 920 * @param p_hwfn 921 * @param p_aeu - descriptor of an AEU bit which caused the attention 922 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 923 * this bit to this group. 924 * @param bit_index - index of this bit in the aeu_en_reg 925 * 926 * @return int 927 */ 928 static int 929 qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, 930 struct aeu_invert_reg_bit *p_aeu, 931 u32 aeu_en_reg, 932 const char *p_bit_name, u32 bitmask) 933 { 934 bool b_fatal = false; 935 int rc = -EINVAL; 936 u32 val; 937 938 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 939 p_bit_name, bitmask); 940 941 /* Call callback before clearing the interrupt status */ 942 if (p_aeu->cb) { 943 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 944 p_bit_name); 945 rc = p_aeu->cb(p_hwfn); 946 } 947 948 if (rc) 949 b_fatal = true; 950 951 /* Print HW block interrupt registers */ 952 if (p_aeu->block_index != MAX_BLOCK_ID) 953 qed_int_attn_print(p_hwfn, p_aeu->block_index, 954 ATTN_TYPE_INTERRUPT, !b_fatal); 955 956 /* Reach assertion if attention is fatal */ 957 if (b_fatal) 958 qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_HW_ATTN, 959 "`%s': Fatal attention\n", 960 p_bit_name); 961 else /* If the attention is benign, no need to prevent it */ 962 goto out; 963 964 /* Prevent this Attention from being asserted in the future */ 965 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 966 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask)); 967 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", 968 p_bit_name); 969 970 out: 971 return rc; 972 } 973 974 /** 975 * @brief qed_int_deassertion_parity - handle a single parity AEU source 976 * 977 * @param p_hwfn 978 * @param p_aeu - descriptor of an AEU bit which caused the parity 979 * @param aeu_en_reg - address of the AEU enable register 980 * @param bit_index 981 */ 982 static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, 983 struct aeu_invert_reg_bit *p_aeu, 984 u32 aeu_en_reg, u8 bit_index) 985 { 986 u32 block_id = p_aeu->block_index, mask, val; 987 988 DP_NOTICE(p_hwfn->cdev, 989 "%s parity attention is set [address 0x%08x, bit %d]\n", 990 p_aeu->bit_name, aeu_en_reg, bit_index); 991 992 if (block_id != MAX_BLOCK_ID) { 993 qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 994 995 /* In BB, there's a single parity bit for several blocks */ 996 if (block_id == BLOCK_BTB) { 997 qed_int_attn_print(p_hwfn, BLOCK_OPTE, 998 ATTN_TYPE_PARITY, false); 999 qed_int_attn_print(p_hwfn, BLOCK_MCP, 1000 ATTN_TYPE_PARITY, false); 1001 } 1002 } 1003 1004 /* Prevent this parity error from being re-asserted */ 1005 mask = ~BIT(bit_index); 1006 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 1007 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 1008 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 1009 p_aeu->bit_name); 1010 } 1011 1012 /** 1013 * @brief - handles deassertion of previously asserted attentions. 1014 * 1015 * @param p_hwfn 1016 * @param deasserted_bits - newly deasserted bits 1017 * @return int 1018 * 1019 */ 1020 static int qed_int_deassertion(struct qed_hwfn *p_hwfn, 1021 u16 deasserted_bits) 1022 { 1023 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 1024 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 1025 u8 i, j, k, bit_idx; 1026 int rc = 0; 1027 1028 /* Read the attention registers in the AEU */ 1029 for (i = 0; i < NUM_ATTN_REGS; i++) { 1030 aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1031 MISC_REG_AEU_AFTER_INVERT_1_IGU + 1032 i * 0x4); 1033 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1034 "Deasserted bits [%d]: %08x\n", 1035 i, aeu_inv_arr[i]); 1036 } 1037 1038 /* Find parity attentions first */ 1039 for (i = 0; i < NUM_ATTN_REGS; i++) { 1040 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 1041 u32 parities; 1042 1043 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); 1044 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1045 1046 /* Skip register in which no parity bit is currently set */ 1047 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 1048 if (!parities) 1049 continue; 1050 1051 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1052 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 1053 1054 if (qed_int_is_parity_flag(p_hwfn, p_bit) && 1055 !!(parities & BIT(bit_idx))) 1056 qed_int_deassertion_parity(p_hwfn, p_bit, 1057 aeu_en, bit_idx); 1058 1059 bit_idx += ATTENTION_LENGTH(p_bit->flags); 1060 } 1061 } 1062 1063 /* Find non-parity cause for attention and act */ 1064 for (k = 0; k < MAX_ATTN_GRPS; k++) { 1065 struct aeu_invert_reg_bit *p_aeu; 1066 1067 /* Handle only groups whose attention is currently deasserted */ 1068 if (!(deasserted_bits & (1 << k))) 1069 continue; 1070 1071 for (i = 0; i < NUM_ATTN_REGS; i++) { 1072 u32 bits; 1073 1074 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 1075 i * sizeof(u32) + 1076 k * sizeof(u32) * NUM_ATTN_REGS; 1077 1078 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1079 bits = aeu_inv_arr[i] & en; 1080 1081 /* Skip if no bit from this group is currently set */ 1082 if (!bits) 1083 continue; 1084 1085 /* Find all set bits from current register which belong 1086 * to current group, making them responsible for the 1087 * previous assertion. 1088 */ 1089 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1090 long unsigned int bitmask; 1091 u8 bit, bit_len; 1092 1093 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1094 p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu); 1095 1096 bit = bit_idx; 1097 bit_len = ATTENTION_LENGTH(p_aeu->flags); 1098 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) { 1099 /* Skip Parity */ 1100 bit++; 1101 bit_len--; 1102 } 1103 1104 bitmask = bits & (((1 << bit_len) - 1) << bit); 1105 bitmask >>= bit; 1106 1107 if (bitmask) { 1108 u32 flags = p_aeu->flags; 1109 char bit_name[30]; 1110 u8 num; 1111 1112 num = (u8)find_first_bit(&bitmask, 1113 bit_len); 1114 1115 /* Some bits represent more than a 1116 * a single interrupt. Correctly print 1117 * their name. 1118 */ 1119 if (ATTENTION_LENGTH(flags) > 2 || 1120 ((flags & ATTENTION_PAR_INT) && 1121 ATTENTION_LENGTH(flags) > 1)) 1122 snprintf(bit_name, 30, 1123 p_aeu->bit_name, num); 1124 else 1125 strlcpy(bit_name, 1126 p_aeu->bit_name, 30); 1127 1128 /* We now need to pass bitmask in its 1129 * correct position. 1130 */ 1131 bitmask <<= bit; 1132 1133 /* Handle source of the attention */ 1134 qed_int_deassertion_aeu_bit(p_hwfn, 1135 p_aeu, 1136 aeu_en, 1137 bit_name, 1138 bitmask); 1139 } 1140 1141 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1142 } 1143 } 1144 } 1145 1146 /* Handle missed DORQ attention */ 1147 qed_dorq_attn_handler(p_hwfn); 1148 1149 /* Clear IGU indication for the deasserted bits */ 1150 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 1151 GTT_BAR0_MAP_REG_IGU_CMD + 1152 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1153 IGU_CMD_INT_ACK_BASE) << 3), 1154 ~((u32)deasserted_bits)); 1155 1156 /* Unmask deasserted attentions in IGU */ 1157 aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); 1158 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1159 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1160 1161 /* Clear deassertion from inner state */ 1162 sb_attn_sw->known_attn &= ~deasserted_bits; 1163 1164 return rc; 1165 } 1166 1167 static int qed_int_attentions(struct qed_hwfn *p_hwfn) 1168 { 1169 struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1170 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1171 u32 attn_bits = 0, attn_acks = 0; 1172 u16 asserted_bits, deasserted_bits; 1173 __le16 index; 1174 int rc = 0; 1175 1176 /* Read current attention bits/acks - safeguard against attentions 1177 * by guaranting work on a synchronized timeframe 1178 */ 1179 do { 1180 index = p_sb_attn->sb_index; 1181 /* finish reading index before the loop condition */ 1182 dma_rmb(); 1183 attn_bits = le32_to_cpu(p_sb_attn->atten_bits); 1184 attn_acks = le32_to_cpu(p_sb_attn->atten_ack); 1185 } while (index != p_sb_attn->sb_index); 1186 p_sb_attn->sb_index = index; 1187 1188 /* Attention / Deassertion are meaningful (and in correct state) 1189 * only when they differ and consistent with known state - deassertion 1190 * when previous attention & current ack, and assertion when current 1191 * attention with no previous attention 1192 */ 1193 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1194 ~p_sb_attn_sw->known_attn; 1195 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1196 p_sb_attn_sw->known_attn; 1197 1198 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) { 1199 DP_INFO(p_hwfn, 1200 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1201 index, attn_bits, attn_acks, asserted_bits, 1202 deasserted_bits, p_sb_attn_sw->known_attn); 1203 } else if (asserted_bits == 0x100) { 1204 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1205 "MFW indication via attention\n"); 1206 } else { 1207 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1208 "MFW indication [deassertion]\n"); 1209 } 1210 1211 if (asserted_bits) { 1212 rc = qed_int_assertion(p_hwfn, asserted_bits); 1213 if (rc) 1214 return rc; 1215 } 1216 1217 if (deasserted_bits) 1218 rc = qed_int_deassertion(p_hwfn, deasserted_bits); 1219 1220 return rc; 1221 } 1222 1223 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, 1224 void __iomem *igu_addr, u32 ack_cons) 1225 { 1226 struct igu_prod_cons_update igu_ack = { 0 }; 1227 1228 igu_ack.sb_id_and_flags = 1229 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1230 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1231 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1232 (IGU_SEG_ACCESS_ATTN << 1233 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1234 1235 DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags); 1236 1237 /* Both segments (interrupts & acks) are written to same place address; 1238 * Need to guarantee all commands will be received (in-order) by HW. 1239 */ 1240 barrier(); 1241 } 1242 1243 void qed_int_sp_dpc(unsigned long hwfn_cookie) 1244 { 1245 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie; 1246 struct qed_pi_info *pi_info = NULL; 1247 struct qed_sb_attn_info *sb_attn; 1248 struct qed_sb_info *sb_info; 1249 int arr_size; 1250 u16 rc = 0; 1251 1252 if (!p_hwfn->p_sp_sb) { 1253 DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); 1254 return; 1255 } 1256 1257 sb_info = &p_hwfn->p_sp_sb->sb_info; 1258 arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1259 if (!sb_info) { 1260 DP_ERR(p_hwfn->cdev, 1261 "Status block is NULL - cannot ack interrupts\n"); 1262 return; 1263 } 1264 1265 if (!p_hwfn->p_sb_attn) { 1266 DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); 1267 return; 1268 } 1269 sb_attn = p_hwfn->p_sb_attn; 1270 1271 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1272 p_hwfn, p_hwfn->my_id); 1273 1274 /* Disable ack for def status block. Required both for msix + 1275 * inta in non-mask mode, in inta does no harm. 1276 */ 1277 qed_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1278 1279 /* Gather Interrupts/Attentions information */ 1280 if (!sb_info->sb_virt) { 1281 DP_ERR(p_hwfn->cdev, 1282 "Interrupt Status block is NULL - cannot check for new interrupts!\n"); 1283 } else { 1284 u32 tmp_index = sb_info->sb_ack; 1285 1286 rc = qed_sb_update_sb_idx(sb_info); 1287 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 1288 "Interrupt indices: 0x%08x --> 0x%08x\n", 1289 tmp_index, sb_info->sb_ack); 1290 } 1291 1292 if (!sb_attn || !sb_attn->sb_attn) { 1293 DP_ERR(p_hwfn->cdev, 1294 "Attentions Status block is NULL - cannot check for new attentions!\n"); 1295 } else { 1296 u16 tmp_index = sb_attn->index; 1297 1298 rc |= qed_attn_update_idx(p_hwfn, sb_attn); 1299 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 1300 "Attention indices: 0x%08x --> 0x%08x\n", 1301 tmp_index, sb_attn->index); 1302 } 1303 1304 /* Check if we expect interrupts at this time. if not just ack them */ 1305 if (!(rc & QED_SB_EVENT_MASK)) { 1306 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1307 return; 1308 } 1309 1310 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1311 if (!p_hwfn->p_dpc_ptt) { 1312 DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); 1313 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1314 return; 1315 } 1316 1317 if (rc & QED_SB_ATT_IDX) 1318 qed_int_attentions(p_hwfn); 1319 1320 if (rc & QED_SB_IDX) { 1321 int pi; 1322 1323 /* Look for a free index */ 1324 for (pi = 0; pi < arr_size; pi++) { 1325 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1326 if (pi_info->comp_cb) 1327 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1328 } 1329 } 1330 1331 if (sb_attn && (rc & QED_SB_ATT_IDX)) 1332 /* This should be done before the interrupts are enabled, 1333 * since otherwise a new attention will be generated. 1334 */ 1335 qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1336 1337 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1338 } 1339 1340 static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) 1341 { 1342 struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1343 1344 if (!p_sb) 1345 return; 1346 1347 if (p_sb->sb_attn) 1348 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1349 SB_ATTN_ALIGNED_SIZE(p_hwfn), 1350 p_sb->sb_attn, p_sb->sb_phys); 1351 kfree(p_sb); 1352 p_hwfn->p_sb_attn = NULL; 1353 } 1354 1355 static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, 1356 struct qed_ptt *p_ptt) 1357 { 1358 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1359 1360 memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1361 1362 sb_info->index = 0; 1363 sb_info->known_attn = 0; 1364 1365 /* Configure Attention Status Block in IGU */ 1366 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1367 lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); 1368 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1369 upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); 1370 } 1371 1372 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, 1373 struct qed_ptt *p_ptt, 1374 void *sb_virt_addr, dma_addr_t sb_phy_addr) 1375 { 1376 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1377 int i, j, k; 1378 1379 sb_info->sb_attn = sb_virt_addr; 1380 sb_info->sb_phys = sb_phy_addr; 1381 1382 /* Set the pointer to the AEU descriptors */ 1383 sb_info->p_aeu_desc = aeu_descs; 1384 1385 /* Calculate Parity Masks */ 1386 memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1387 for (i = 0; i < NUM_ATTN_REGS; i++) { 1388 /* j is array index, k is bit index */ 1389 for (j = 0, k = 0; k < 32; j++) { 1390 struct aeu_invert_reg_bit *p_aeu; 1391 1392 p_aeu = &aeu_descs[i].bits[j]; 1393 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) 1394 sb_info->parity_mask[i] |= 1 << k; 1395 1396 k += ATTENTION_LENGTH(p_aeu->flags); 1397 } 1398 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1399 "Attn Mask [Reg %d]: 0x%08x\n", 1400 i, sb_info->parity_mask[i]); 1401 } 1402 1403 /* Set the address of cleanup for the mcp attention */ 1404 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1405 MISC_REG_AEU_GENERAL_ATTN_0; 1406 1407 qed_int_sb_attn_setup(p_hwfn, p_ptt); 1408 } 1409 1410 static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, 1411 struct qed_ptt *p_ptt) 1412 { 1413 struct qed_dev *cdev = p_hwfn->cdev; 1414 struct qed_sb_attn_info *p_sb; 1415 dma_addr_t p_phys = 0; 1416 void *p_virt; 1417 1418 /* SB struct */ 1419 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 1420 if (!p_sb) 1421 return -ENOMEM; 1422 1423 /* SB ring */ 1424 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 1425 SB_ATTN_ALIGNED_SIZE(p_hwfn), 1426 &p_phys, GFP_KERNEL); 1427 1428 if (!p_virt) { 1429 kfree(p_sb); 1430 return -ENOMEM; 1431 } 1432 1433 /* Attention setup */ 1434 p_hwfn->p_sb_attn = p_sb; 1435 qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1436 1437 return 0; 1438 } 1439 1440 /* coalescing timeout = timeset << (timer_res + 1) */ 1441 #define QED_CAU_DEF_RX_USECS 24 1442 #define QED_CAU_DEF_TX_USECS 48 1443 1444 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, 1445 struct cau_sb_entry *p_sb_entry, 1446 u8 pf_id, u16 vf_number, u8 vf_valid) 1447 { 1448 struct qed_dev *cdev = p_hwfn->cdev; 1449 u32 cau_state; 1450 u8 timer_res; 1451 1452 memset(p_sb_entry, 0, sizeof(*p_sb_entry)); 1453 1454 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1455 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1456 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1457 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1458 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1459 1460 cau_state = CAU_HC_DISABLE_STATE; 1461 1462 if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 1463 cau_state = CAU_HC_ENABLE_STATE; 1464 if (!cdev->rx_coalesce_usecs) 1465 cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS; 1466 if (!cdev->tx_coalesce_usecs) 1467 cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; 1468 } 1469 1470 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1471 if (cdev->rx_coalesce_usecs <= 0x7F) 1472 timer_res = 0; 1473 else if (cdev->rx_coalesce_usecs <= 0xFF) 1474 timer_res = 1; 1475 else 1476 timer_res = 2; 1477 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1478 1479 if (cdev->tx_coalesce_usecs <= 0x7F) 1480 timer_res = 0; 1481 else if (cdev->tx_coalesce_usecs <= 0xFF) 1482 timer_res = 1; 1483 else 1484 timer_res = 2; 1485 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1486 1487 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1488 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1489 } 1490 1491 static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, 1492 struct qed_ptt *p_ptt, 1493 u16 igu_sb_id, 1494 u32 pi_index, 1495 enum qed_coalescing_fsm coalescing_fsm, 1496 u8 timeset) 1497 { 1498 struct cau_pi_entry pi_entry; 1499 u32 sb_offset, pi_offset; 1500 1501 if (IS_VF(p_hwfn->cdev)) 1502 return; 1503 1504 sb_offset = igu_sb_id * PIS_PER_SB_E4; 1505 memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1506 1507 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1508 if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) 1509 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1510 else 1511 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1512 1513 pi_offset = sb_offset + pi_index; 1514 if (p_hwfn->hw_init_done) { 1515 qed_wr(p_hwfn, p_ptt, 1516 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1517 *((u32 *)&(pi_entry))); 1518 } else { 1519 STORE_RT_REG(p_hwfn, 1520 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1521 *((u32 *)&(pi_entry))); 1522 } 1523 } 1524 1525 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, 1526 struct qed_ptt *p_ptt, 1527 dma_addr_t sb_phys, 1528 u16 igu_sb_id, u16 vf_number, u8 vf_valid) 1529 { 1530 struct cau_sb_entry sb_entry; 1531 1532 qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1533 vf_number, vf_valid); 1534 1535 if (p_hwfn->hw_init_done) { 1536 /* Wide-bus, initialize via DMAE */ 1537 u64 phys_addr = (u64)sb_phys; 1538 1539 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr, 1540 CAU_REG_SB_ADDR_MEMORY + 1541 igu_sb_id * sizeof(u64), 2, NULL); 1542 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, 1543 CAU_REG_SB_VAR_MEMORY + 1544 igu_sb_id * sizeof(u64), 2, NULL); 1545 } else { 1546 /* Initialize Status Block Address */ 1547 STORE_RT_REG_AGG(p_hwfn, 1548 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 1549 igu_sb_id * 2, 1550 sb_phys); 1551 1552 STORE_RT_REG_AGG(p_hwfn, 1553 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 1554 igu_sb_id * 2, 1555 sb_entry); 1556 } 1557 1558 /* Configure pi coalescing if set */ 1559 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 1560 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1561 u8 timeset, timer_res; 1562 u8 i; 1563 1564 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1565 if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F) 1566 timer_res = 0; 1567 else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF) 1568 timer_res = 1; 1569 else 1570 timer_res = 2; 1571 timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res); 1572 qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1573 QED_COAL_RX_STATE_MACHINE, timeset); 1574 1575 if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F) 1576 timer_res = 0; 1577 else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF) 1578 timer_res = 1; 1579 else 1580 timer_res = 2; 1581 timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res); 1582 for (i = 0; i < num_tc; i++) { 1583 qed_int_cau_conf_pi(p_hwfn, p_ptt, 1584 igu_sb_id, TX_PI(i), 1585 QED_COAL_TX_STATE_MACHINE, 1586 timeset); 1587 } 1588 } 1589 } 1590 1591 void qed_int_sb_setup(struct qed_hwfn *p_hwfn, 1592 struct qed_ptt *p_ptt, struct qed_sb_info *sb_info) 1593 { 1594 /* zero status block and ack counter */ 1595 sb_info->sb_ack = 0; 1596 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1597 1598 if (IS_PF(p_hwfn->cdev)) 1599 qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1600 sb_info->igu_sb_id, 0, 0); 1601 } 1602 1603 struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf) 1604 { 1605 struct qed_igu_block *p_block; 1606 u16 igu_id; 1607 1608 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 1609 igu_id++) { 1610 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1611 1612 if (!(p_block->status & QED_IGU_STATUS_VALID) || 1613 !(p_block->status & QED_IGU_STATUS_FREE)) 1614 continue; 1615 1616 if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf) 1617 return p_block; 1618 } 1619 1620 return NULL; 1621 } 1622 1623 static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id) 1624 { 1625 struct qed_igu_block *p_block; 1626 u16 igu_id; 1627 1628 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 1629 igu_id++) { 1630 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1631 1632 if (!(p_block->status & QED_IGU_STATUS_VALID) || 1633 !p_block->is_pf || 1634 p_block->vector_number != vector_id) 1635 continue; 1636 1637 return igu_id; 1638 } 1639 1640 return QED_SB_INVALID_IDX; 1641 } 1642 1643 u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 1644 { 1645 u16 igu_sb_id; 1646 1647 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1648 if (sb_id == QED_SP_SB_ID) 1649 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1650 else if (IS_PF(p_hwfn->cdev)) 1651 igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1652 else 1653 igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); 1654 1655 if (sb_id == QED_SP_SB_ID) 1656 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1657 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1658 else 1659 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1660 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1661 1662 return igu_sb_id; 1663 } 1664 1665 int qed_int_sb_init(struct qed_hwfn *p_hwfn, 1666 struct qed_ptt *p_ptt, 1667 struct qed_sb_info *sb_info, 1668 void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id) 1669 { 1670 sb_info->sb_virt = sb_virt_addr; 1671 sb_info->sb_phys = sb_phy_addr; 1672 1673 sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); 1674 1675 if (sb_id != QED_SP_SB_ID) { 1676 if (IS_PF(p_hwfn->cdev)) { 1677 struct qed_igu_info *p_info; 1678 struct qed_igu_block *p_block; 1679 1680 p_info = p_hwfn->hw_info.p_igu_info; 1681 p_block = &p_info->entry[sb_info->igu_sb_id]; 1682 1683 p_block->sb_info = sb_info; 1684 p_block->status &= ~QED_IGU_STATUS_FREE; 1685 p_info->usage.free_cnt--; 1686 } else { 1687 qed_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1688 } 1689 } 1690 1691 sb_info->cdev = p_hwfn->cdev; 1692 1693 /* The igu address will hold the absolute address that needs to be 1694 * written to for a specific status block 1695 */ 1696 if (IS_PF(p_hwfn->cdev)) { 1697 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 1698 GTT_BAR0_MAP_REG_IGU_CMD + 1699 (sb_info->igu_sb_id << 3); 1700 } else { 1701 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 1702 PXP_VF_BAR0_START_IGU + 1703 ((IGU_CMD_INT_ACK_BASE + 1704 sb_info->igu_sb_id) << 3); 1705 } 1706 1707 sb_info->flags |= QED_SB_INFO_INIT; 1708 1709 qed_int_sb_setup(p_hwfn, p_ptt, sb_info); 1710 1711 return 0; 1712 } 1713 1714 int qed_int_sb_release(struct qed_hwfn *p_hwfn, 1715 struct qed_sb_info *sb_info, u16 sb_id) 1716 { 1717 struct qed_igu_block *p_block; 1718 struct qed_igu_info *p_info; 1719 1720 if (!sb_info) 1721 return 0; 1722 1723 /* zero status block and ack counter */ 1724 sb_info->sb_ack = 0; 1725 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1726 1727 if (IS_VF(p_hwfn->cdev)) { 1728 qed_vf_set_sb_info(p_hwfn, sb_id, NULL); 1729 return 0; 1730 } 1731 1732 p_info = p_hwfn->hw_info.p_igu_info; 1733 p_block = &p_info->entry[sb_info->igu_sb_id]; 1734 1735 /* Vector 0 is reserved to Default SB */ 1736 if (!p_block->vector_number) { 1737 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1738 return -EINVAL; 1739 } 1740 1741 /* Lose reference to client's SB info, and fix counters */ 1742 p_block->sb_info = NULL; 1743 p_block->status |= QED_IGU_STATUS_FREE; 1744 p_info->usage.free_cnt++; 1745 1746 return 0; 1747 } 1748 1749 static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) 1750 { 1751 struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1752 1753 if (!p_sb) 1754 return; 1755 1756 if (p_sb->sb_info.sb_virt) 1757 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1758 SB_ALIGNED_SIZE(p_hwfn), 1759 p_sb->sb_info.sb_virt, 1760 p_sb->sb_info.sb_phys); 1761 kfree(p_sb); 1762 p_hwfn->p_sp_sb = NULL; 1763 } 1764 1765 static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1766 { 1767 struct qed_sb_sp_info *p_sb; 1768 dma_addr_t p_phys = 0; 1769 void *p_virt; 1770 1771 /* SB struct */ 1772 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 1773 if (!p_sb) 1774 return -ENOMEM; 1775 1776 /* SB ring */ 1777 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1778 SB_ALIGNED_SIZE(p_hwfn), 1779 &p_phys, GFP_KERNEL); 1780 if (!p_virt) { 1781 kfree(p_sb); 1782 return -ENOMEM; 1783 } 1784 1785 /* Status Block setup */ 1786 p_hwfn->p_sp_sb = p_sb; 1787 qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt, 1788 p_phys, QED_SP_SB_ID); 1789 1790 memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1791 1792 return 0; 1793 } 1794 1795 int qed_int_register_cb(struct qed_hwfn *p_hwfn, 1796 qed_int_comp_cb_t comp_cb, 1797 void *cookie, u8 *sb_idx, __le16 **p_fw_cons) 1798 { 1799 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1800 int rc = -ENOMEM; 1801 u8 pi; 1802 1803 /* Look for a free index */ 1804 for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1805 if (p_sp_sb->pi_info_arr[pi].comp_cb) 1806 continue; 1807 1808 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1809 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1810 *sb_idx = pi; 1811 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1812 rc = 0; 1813 break; 1814 } 1815 1816 return rc; 1817 } 1818 1819 int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) 1820 { 1821 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1822 1823 if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL) 1824 return -ENOMEM; 1825 1826 p_sp_sb->pi_info_arr[pi].comp_cb = NULL; 1827 p_sp_sb->pi_info_arr[pi].cookie = NULL; 1828 1829 return 0; 1830 } 1831 1832 u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) 1833 { 1834 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1835 } 1836 1837 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, 1838 struct qed_ptt *p_ptt, enum qed_int_mode int_mode) 1839 { 1840 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1841 1842 p_hwfn->cdev->int_mode = int_mode; 1843 switch (p_hwfn->cdev->int_mode) { 1844 case QED_INT_MODE_INTA: 1845 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1846 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1847 break; 1848 1849 case QED_INT_MODE_MSI: 1850 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1851 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1852 break; 1853 1854 case QED_INT_MODE_MSIX: 1855 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1856 break; 1857 case QED_INT_MODE_POLL: 1858 break; 1859 } 1860 1861 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1862 } 1863 1864 static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn, 1865 struct qed_ptt *p_ptt) 1866 { 1867 1868 /* Configure AEU signal change to produce attentions */ 1869 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1870 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1871 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1872 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1873 1874 /* Unmask AEU signals toward IGU */ 1875 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1876 } 1877 1878 int 1879 qed_int_igu_enable(struct qed_hwfn *p_hwfn, 1880 struct qed_ptt *p_ptt, enum qed_int_mode int_mode) 1881 { 1882 int rc = 0; 1883 1884 qed_int_igu_enable_attn(p_hwfn, p_ptt); 1885 1886 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1887 rc = qed_slowpath_irq_req(p_hwfn); 1888 if (rc) { 1889 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); 1890 return -EINVAL; 1891 } 1892 p_hwfn->b_int_requested = true; 1893 } 1894 /* Enable interrupt Generation */ 1895 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1896 p_hwfn->b_int_enabled = 1; 1897 1898 return rc; 1899 } 1900 1901 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1902 { 1903 p_hwfn->b_int_enabled = 0; 1904 1905 if (IS_VF(p_hwfn->cdev)) 1906 return; 1907 1908 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1909 } 1910 1911 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1912 static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, 1913 struct qed_ptt *p_ptt, 1914 u16 igu_sb_id, 1915 bool cleanup_set, u16 opaque_fid) 1916 { 1917 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1918 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 1919 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1920 1921 /* Set the data field */ 1922 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1923 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0); 1924 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1925 1926 /* Set the control register */ 1927 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1928 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1929 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 1930 1931 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 1932 1933 barrier(); 1934 1935 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 1936 1937 /* calculate where to read the status bit from */ 1938 sb_bit = 1 << (igu_sb_id % 32); 1939 sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 1940 1941 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; 1942 1943 /* Now wait for the command to complete */ 1944 do { 1945 val = qed_rd(p_hwfn, p_ptt, sb_bit_addr); 1946 1947 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 1948 break; 1949 1950 usleep_range(5000, 10000); 1951 } while (--sleep_cnt); 1952 1953 if (!sleep_cnt) 1954 DP_NOTICE(p_hwfn, 1955 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 1956 val, igu_sb_id); 1957 } 1958 1959 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, 1960 struct qed_ptt *p_ptt, 1961 u16 igu_sb_id, u16 opaque, bool b_set) 1962 { 1963 struct qed_igu_block *p_block; 1964 int pi, i; 1965 1966 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 1967 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1968 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 1969 igu_sb_id, 1970 p_block->function_id, 1971 p_block->is_pf, p_block->vector_number); 1972 1973 /* Set */ 1974 if (b_set) 1975 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 1976 1977 /* Clear */ 1978 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 1979 1980 /* Wait for the IGU SB to cleanup */ 1981 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 1982 u32 val; 1983 1984 val = qed_rd(p_hwfn, p_ptt, 1985 IGU_REG_WRITE_DONE_PENDING + 1986 ((igu_sb_id / 32) * 4)); 1987 if (val & BIT((igu_sb_id % 32))) 1988 usleep_range(10, 20); 1989 else 1990 break; 1991 } 1992 if (i == IGU_CLEANUP_SLEEP_LENGTH) 1993 DP_NOTICE(p_hwfn, 1994 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 1995 igu_sb_id); 1996 1997 /* Clear the CAU for the SB */ 1998 for (pi = 0; pi < 12; pi++) 1999 qed_wr(p_hwfn, p_ptt, 2000 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); 2001 } 2002 2003 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, 2004 struct qed_ptt *p_ptt, 2005 bool b_set, bool b_slowpath) 2006 { 2007 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2008 struct qed_igu_block *p_block; 2009 u16 igu_sb_id = 0; 2010 u32 val = 0; 2011 2012 val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 2013 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 2014 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 2015 qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 2016 2017 for (igu_sb_id = 0; 2018 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 2019 p_block = &p_info->entry[igu_sb_id]; 2020 2021 if (!(p_block->status & QED_IGU_STATUS_VALID) || 2022 !p_block->is_pf || 2023 (p_block->status & QED_IGU_STATUS_DSB)) 2024 continue; 2025 2026 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 2027 p_hwfn->hw_info.opaque_fid, 2028 b_set); 2029 } 2030 2031 if (b_slowpath) 2032 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2033 p_info->igu_dsb_id, 2034 p_hwfn->hw_info.opaque_fid, 2035 b_set); 2036 } 2037 2038 int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2039 { 2040 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2041 struct qed_igu_block *p_block; 2042 int pf_sbs, vf_sbs; 2043 u16 igu_sb_id; 2044 u32 val, rval; 2045 2046 if (!RESC_NUM(p_hwfn, QED_SB)) { 2047 p_info->b_allow_pf_vf_change = false; 2048 } else { 2049 /* Use the numbers the MFW have provided - 2050 * don't forget MFW accounts for the default SB as well. 2051 */ 2052 p_info->b_allow_pf_vf_change = true; 2053 2054 if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) { 2055 DP_INFO(p_hwfn, 2056 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 2057 RESC_NUM(p_hwfn, QED_SB) - 1, 2058 p_info->usage.cnt); 2059 p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1; 2060 } 2061 2062 if (IS_PF_SRIOV(p_hwfn)) { 2063 u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs; 2064 2065 if (vfs != p_info->usage.iov_cnt) 2066 DP_VERBOSE(p_hwfn, 2067 NETIF_MSG_INTR, 2068 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2069 p_info->usage.iov_cnt, vfs); 2070 2071 /* At this point we know how many SBs we have totally 2072 * in IGU + number of PF SBs. So we can validate that 2073 * we'd have sufficient for VF. 2074 */ 2075 if (vfs > p_info->usage.free_cnt + 2076 p_info->usage.free_cnt_iov - p_info->usage.cnt) { 2077 DP_NOTICE(p_hwfn, 2078 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2079 p_info->usage.free_cnt + 2080 p_info->usage.free_cnt_iov, 2081 p_info->usage.cnt, vfs); 2082 return -EINVAL; 2083 } 2084 2085 /* Currently cap the number of VFs SBs by the 2086 * number of VFs. 2087 */ 2088 p_info->usage.iov_cnt = vfs; 2089 } 2090 } 2091 2092 /* Mark all SBs as free, now in the right PF/VFs division */ 2093 p_info->usage.free_cnt = p_info->usage.cnt; 2094 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2095 p_info->usage.orig = p_info->usage.cnt; 2096 p_info->usage.iov_orig = p_info->usage.iov_cnt; 2097 2098 /* We now proceed to re-configure the IGU cam to reflect the initial 2099 * configuration. We can start with the Default SB. 2100 */ 2101 pf_sbs = p_info->usage.cnt; 2102 vf_sbs = p_info->usage.iov_cnt; 2103 2104 for (igu_sb_id = p_info->igu_dsb_id; 2105 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 2106 p_block = &p_info->entry[igu_sb_id]; 2107 val = 0; 2108 2109 if (!(p_block->status & QED_IGU_STATUS_VALID)) 2110 continue; 2111 2112 if (p_block->status & QED_IGU_STATUS_DSB) { 2113 p_block->function_id = p_hwfn->rel_pf_id; 2114 p_block->is_pf = 1; 2115 p_block->vector_number = 0; 2116 p_block->status = QED_IGU_STATUS_VALID | 2117 QED_IGU_STATUS_PF | 2118 QED_IGU_STATUS_DSB; 2119 } else if (pf_sbs) { 2120 pf_sbs--; 2121 p_block->function_id = p_hwfn->rel_pf_id; 2122 p_block->is_pf = 1; 2123 p_block->vector_number = p_info->usage.cnt - pf_sbs; 2124 p_block->status = QED_IGU_STATUS_VALID | 2125 QED_IGU_STATUS_PF | 2126 QED_IGU_STATUS_FREE; 2127 } else if (vf_sbs) { 2128 p_block->function_id = 2129 p_hwfn->cdev->p_iov_info->first_vf_in_pf + 2130 p_info->usage.iov_cnt - vf_sbs; 2131 p_block->is_pf = 0; 2132 p_block->vector_number = 0; 2133 p_block->status = QED_IGU_STATUS_VALID | 2134 QED_IGU_STATUS_FREE; 2135 vf_sbs--; 2136 } else { 2137 p_block->function_id = 0; 2138 p_block->is_pf = 0; 2139 p_block->vector_number = 0; 2140 } 2141 2142 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2143 p_block->function_id); 2144 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2145 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2146 p_block->vector_number); 2147 2148 /* VF entries would be enabled when VF is initializaed */ 2149 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2150 2151 rval = qed_rd(p_hwfn, p_ptt, 2152 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2153 2154 if (rval != val) { 2155 qed_wr(p_hwfn, p_ptt, 2156 IGU_REG_MAPPING_MEMORY + 2157 sizeof(u32) * igu_sb_id, val); 2158 2159 DP_VERBOSE(p_hwfn, 2160 NETIF_MSG_INTR, 2161 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2162 igu_sb_id, 2163 p_block->function_id, 2164 p_block->is_pf, 2165 p_block->vector_number, rval, val); 2166 } 2167 } 2168 2169 return 0; 2170 } 2171 2172 static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, 2173 struct qed_ptt *p_ptt, u16 igu_sb_id) 2174 { 2175 u32 val = qed_rd(p_hwfn, p_ptt, 2176 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2177 struct qed_igu_block *p_block; 2178 2179 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2180 2181 /* Fill the block information */ 2182 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); 2183 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2184 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); 2185 p_block->igu_sb_id = igu_sb_id; 2186 } 2187 2188 int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2189 { 2190 struct qed_igu_info *p_igu_info; 2191 struct qed_igu_block *p_block; 2192 u32 min_vf = 0, max_vf = 0; 2193 u16 igu_sb_id; 2194 2195 p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); 2196 if (!p_hwfn->hw_info.p_igu_info) 2197 return -ENOMEM; 2198 2199 p_igu_info = p_hwfn->hw_info.p_igu_info; 2200 2201 /* Distinguish between existent and non-existent default SB */ 2202 p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX; 2203 2204 /* Find the range of VF ids whose SB belong to this PF */ 2205 if (p_hwfn->cdev->p_iov_info) { 2206 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 2207 2208 min_vf = p_iov->first_vf_in_pf; 2209 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2210 } 2211 2212 for (igu_sb_id = 0; 2213 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 2214 /* Read current entry; Notice it might not belong to this PF */ 2215 qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2216 p_block = &p_igu_info->entry[igu_sb_id]; 2217 2218 if ((p_block->is_pf) && 2219 (p_block->function_id == p_hwfn->rel_pf_id)) { 2220 p_block->status = QED_IGU_STATUS_PF | 2221 QED_IGU_STATUS_VALID | 2222 QED_IGU_STATUS_FREE; 2223 2224 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) 2225 p_igu_info->usage.cnt++; 2226 } else if (!(p_block->is_pf) && 2227 (p_block->function_id >= min_vf) && 2228 (p_block->function_id < max_vf)) { 2229 /* Available for VFs of this PF */ 2230 p_block->status = QED_IGU_STATUS_VALID | 2231 QED_IGU_STATUS_FREE; 2232 2233 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) 2234 p_igu_info->usage.iov_cnt++; 2235 } 2236 2237 /* Mark the First entry belonging to the PF or its VFs 2238 * as the default SB [we'll reset IGU prior to first usage]. 2239 */ 2240 if ((p_block->status & QED_IGU_STATUS_VALID) && 2241 (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) { 2242 p_igu_info->igu_dsb_id = igu_sb_id; 2243 p_block->status |= QED_IGU_STATUS_DSB; 2244 } 2245 2246 /* limit number of prints by having each PF print only its 2247 * entries with the exception of PF0 which would print 2248 * everything. 2249 */ 2250 if ((p_block->status & QED_IGU_STATUS_VALID) || 2251 (p_hwfn->abs_pf_id == 0)) { 2252 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2253 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2254 igu_sb_id, p_block->function_id, 2255 p_block->is_pf, p_block->vector_number); 2256 } 2257 } 2258 2259 if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) { 2260 DP_NOTICE(p_hwfn, 2261 "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2262 p_igu_info->igu_dsb_id); 2263 return -EINVAL; 2264 } 2265 2266 /* All non default SB are considered free at this point */ 2267 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2268 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2269 2270 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2271 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2272 p_igu_info->igu_dsb_id, 2273 p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt); 2274 2275 return 0; 2276 } 2277 2278 /** 2279 * @brief Initialize igu runtime registers 2280 * 2281 * @param p_hwfn 2282 */ 2283 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) 2284 { 2285 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2286 2287 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2288 } 2289 2290 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) 2291 { 2292 u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - 2293 IGU_CMD_INT_ACK_BASE; 2294 u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - 2295 IGU_CMD_INT_ACK_BASE; 2296 u32 intr_status_hi = 0, intr_status_lo = 0; 2297 u64 intr_status = 0; 2298 2299 intr_status_lo = REG_RD(p_hwfn, 2300 GTT_BAR0_MAP_REG_IGU_CMD + 2301 lsb_igu_cmd_addr * 8); 2302 intr_status_hi = REG_RD(p_hwfn, 2303 GTT_BAR0_MAP_REG_IGU_CMD + 2304 msb_igu_cmd_addr * 8); 2305 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2306 2307 return intr_status; 2308 } 2309 2310 static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) 2311 { 2312 tasklet_init(p_hwfn->sp_dpc, 2313 qed_int_sp_dpc, (unsigned long)p_hwfn); 2314 p_hwfn->b_sp_dpc_enabled = true; 2315 } 2316 2317 static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) 2318 { 2319 p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL); 2320 if (!p_hwfn->sp_dpc) 2321 return -ENOMEM; 2322 2323 return 0; 2324 } 2325 2326 static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) 2327 { 2328 kfree(p_hwfn->sp_dpc); 2329 p_hwfn->sp_dpc = NULL; 2330 } 2331 2332 int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2333 { 2334 int rc = 0; 2335 2336 rc = qed_int_sp_dpc_alloc(p_hwfn); 2337 if (rc) 2338 return rc; 2339 2340 rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt); 2341 if (rc) 2342 return rc; 2343 2344 rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); 2345 2346 return rc; 2347 } 2348 2349 void qed_int_free(struct qed_hwfn *p_hwfn) 2350 { 2351 qed_int_sp_sb_free(p_hwfn); 2352 qed_int_sb_attn_free(p_hwfn); 2353 qed_int_sp_dpc_free(p_hwfn); 2354 } 2355 2356 void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2357 { 2358 qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2359 qed_int_sb_attn_setup(p_hwfn, p_ptt); 2360 qed_int_sp_dpc_setup(p_hwfn); 2361 } 2362 2363 void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, 2364 struct qed_sb_cnt_info *p_sb_cnt_info) 2365 { 2366 struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; 2367 2368 if (!info || !p_sb_cnt_info) 2369 return; 2370 2371 memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info)); 2372 } 2373 2374 void qed_int_disable_post_isr_release(struct qed_dev *cdev) 2375 { 2376 int i; 2377 2378 for_each_hwfn(cdev, i) 2379 cdev->hwfns[i].b_int_requested = false; 2380 } 2381 2382 void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable) 2383 { 2384 cdev->attn_clr_en = clr_enable; 2385 } 2386 2387 int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 2388 u8 timer_res, u16 sb_id, bool tx) 2389 { 2390 struct cau_sb_entry sb_entry; 2391 int rc; 2392 2393 if (!p_hwfn->hw_init_done) { 2394 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2395 return -EINVAL; 2396 } 2397 2398 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2399 sb_id * sizeof(u64), 2400 (u64)(uintptr_t)&sb_entry, 2, NULL); 2401 if (rc) { 2402 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2403 return rc; 2404 } 2405 2406 if (tx) 2407 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2408 else 2409 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2410 2411 rc = qed_dmae_host2grc(p_hwfn, p_ptt, 2412 (u64)(uintptr_t)&sb_entry, 2413 CAU_REG_SB_VAR_MEMORY + 2414 sb_id * sizeof(u64), 2, NULL); 2415 if (rc) { 2416 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2417 return rc; 2418 } 2419 2420 return rc; 2421 } 2422