1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <asm/byteorder.h> 35 #include <linux/io.h> 36 #include <linux/bitops.h> 37 #include <linux/delay.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/errno.h> 40 #include <linux/interrupt.h> 41 #include <linux/kernel.h> 42 #include <linux/pci.h> 43 #include <linux/slab.h> 44 #include <linux/string.h> 45 #include "qed.h" 46 #include "qed_hsi.h" 47 #include "qed_hw.h" 48 #include "qed_init_ops.h" 49 #include "qed_int.h" 50 #include "qed_mcp.h" 51 #include "qed_reg_addr.h" 52 #include "qed_sp.h" 53 #include "qed_sriov.h" 54 #include "qed_vf.h" 55 56 struct qed_pi_info { 57 qed_int_comp_cb_t comp_cb; 58 void *cookie; 59 }; 60 61 struct qed_sb_sp_info { 62 struct qed_sb_info sb_info; 63 64 /* per protocol index data */ 65 struct qed_pi_info pi_info_arr[PIS_PER_SB_E4]; 66 }; 67 68 enum qed_attention_type { 69 QED_ATTN_TYPE_ATTN, 70 QED_ATTN_TYPE_PARITY, 71 }; 72 73 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 74 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 75 76 struct aeu_invert_reg_bit { 77 char bit_name[30]; 78 79 #define ATTENTION_PARITY (1 << 0) 80 81 #define ATTENTION_LENGTH_MASK (0x00000ff0) 82 #define ATTENTION_LENGTH_SHIFT (4) 83 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 84 ATTENTION_LENGTH_SHIFT) 85 #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT) 86 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 87 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 88 ATTENTION_PARITY) 89 90 /* Multiple bits start with this offset */ 91 #define ATTENTION_OFFSET_MASK (0x000ff000) 92 #define ATTENTION_OFFSET_SHIFT (12) 93 94 #define ATTENTION_BB_MASK (0x00700000) 95 #define ATTENTION_BB_SHIFT (20) 96 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT) 97 #define ATTENTION_BB_DIFFERENT BIT(23) 98 99 #define ATTENTION_CLEAR_ENABLE BIT(28) 100 unsigned int flags; 101 102 /* Callback to call if attention will be triggered */ 103 int (*cb)(struct qed_hwfn *p_hwfn); 104 105 enum block_id block_index; 106 }; 107 108 struct aeu_invert_reg { 109 struct aeu_invert_reg_bit bits[32]; 110 }; 111 112 #define MAX_ATTN_GRPS (8) 113 #define NUM_ATTN_REGS (9) 114 115 /* Specific HW attention callbacks */ 116 static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn) 117 { 118 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 119 120 /* This might occur on certain instances; Log it once then mask it */ 121 DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n", 122 tmp); 123 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 124 0xffffffff); 125 126 return 0; 127 } 128 129 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 130 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 131 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 132 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf) 133 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 134 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1) 135 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 136 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff) 137 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 138 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf) 139 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 140 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff) 141 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 142 static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn) 143 { 144 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 145 PSWHST_REG_INCORRECT_ACCESS_VALID); 146 147 if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) { 148 u32 addr, data, length; 149 150 addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 151 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 152 data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 153 PSWHST_REG_INCORRECT_ACCESS_DATA); 154 length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 155 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 156 157 DP_INFO(p_hwfn->cdev, 158 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", 159 addr, length, 160 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID), 161 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID), 162 (u8) GET_FIELD(data, 163 ATTENTION_INCORRECT_ACCESS_VF_VALID), 164 (u8) GET_FIELD(data, 165 ATTENTION_INCORRECT_ACCESS_CLIENT), 166 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR), 167 (u8) GET_FIELD(data, 168 ATTENTION_INCORRECT_ACCESS_BYTE_EN), 169 data); 170 } 171 172 return 0; 173 } 174 175 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0) 176 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff) 177 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0) 178 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23) 179 #define QED_GRC_ATTENTION_MASTER_MASK (0xf) 180 #define QED_GRC_ATTENTION_MASTER_SHIFT (24) 181 #define QED_GRC_ATTENTION_PF_MASK (0xf) 182 #define QED_GRC_ATTENTION_PF_SHIFT (0) 183 #define QED_GRC_ATTENTION_VF_MASK (0xff) 184 #define QED_GRC_ATTENTION_VF_SHIFT (4) 185 #define QED_GRC_ATTENTION_PRIV_MASK (0x3) 186 #define QED_GRC_ATTENTION_PRIV_SHIFT (14) 187 #define QED_GRC_ATTENTION_PRIV_VF (0) 188 static const char *attn_master_to_str(u8 master) 189 { 190 switch (master) { 191 case 1: return "PXP"; 192 case 2: return "MCP"; 193 case 3: return "MSDM"; 194 case 4: return "PSDM"; 195 case 5: return "YSDM"; 196 case 6: return "USDM"; 197 case 7: return "TSDM"; 198 case 8: return "XSDM"; 199 case 9: return "DBU"; 200 case 10: return "DMAE"; 201 default: 202 return "Unknown"; 203 } 204 } 205 206 static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) 207 { 208 u32 tmp, tmp2; 209 210 /* We've already cleared the timeout interrupt register, so we learn 211 * of interrupts via the validity register 212 */ 213 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 214 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 215 if (!(tmp & QED_GRC_ATTENTION_VALID_BIT)) 216 goto out; 217 218 /* Read the GRC timeout information */ 219 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 220 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 221 tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 222 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 223 224 DP_INFO(p_hwfn->cdev, 225 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 226 tmp2, tmp, 227 (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from", 228 GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2, 229 attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), 230 GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), 231 (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == 232 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)", 233 GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); 234 235 out: 236 /* Regardles of anything else, clean the validity bit */ 237 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 238 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 239 return 0; 240 } 241 242 #define PGLUE_ATTENTION_VALID (1 << 29) 243 #define PGLUE_ATTENTION_RD_VALID (1 << 26) 244 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf) 245 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 246 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1) 247 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19) 248 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff) 249 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 250 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1) 251 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21) 252 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1) 253 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22) 254 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1) 255 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23) 256 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23) 257 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) 258 #define PGLUE_ATTENTION_ILT_VALID (1 << 23) 259 260 int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, 261 struct qed_ptt *p_ptt) 262 { 263 u32 tmp; 264 265 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); 266 if (tmp & PGLUE_ATTENTION_VALID) { 267 u32 addr_lo, addr_hi, details; 268 269 addr_lo = qed_rd(p_hwfn, p_ptt, 270 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 271 addr_hi = qed_rd(p_hwfn, p_ptt, 272 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 273 details = qed_rd(p_hwfn, p_ptt, 274 PGLUE_B_REG_TX_ERR_WR_DETAILS); 275 276 DP_NOTICE(p_hwfn, 277 "Illegal write by chip to [%08x:%08x] blocked.\n" 278 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 279 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 280 addr_hi, addr_lo, details, 281 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 282 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 283 GET_FIELD(details, 284 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 285 tmp, 286 GET_FIELD(tmp, 287 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, 288 GET_FIELD(tmp, 289 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 290 GET_FIELD(tmp, 291 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); 292 } 293 294 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); 295 if (tmp & PGLUE_ATTENTION_RD_VALID) { 296 u32 addr_lo, addr_hi, details; 297 298 addr_lo = qed_rd(p_hwfn, p_ptt, 299 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 300 addr_hi = qed_rd(p_hwfn, p_ptt, 301 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 302 details = qed_rd(p_hwfn, p_ptt, 303 PGLUE_B_REG_TX_ERR_RD_DETAILS); 304 305 DP_NOTICE(p_hwfn, 306 "Illegal read by chip from [%08x:%08x] blocked.\n" 307 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 308 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 309 addr_hi, addr_lo, details, 310 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 311 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 312 GET_FIELD(details, 313 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 314 tmp, 315 GET_FIELD(tmp, 316 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, 317 GET_FIELD(tmp, 318 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 319 GET_FIELD(tmp, 320 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); 321 } 322 323 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 324 if (tmp & PGLUE_ATTENTION_ICPL_VALID) 325 DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp); 326 327 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 328 if (tmp & PGLUE_ATTENTION_ZLR_VALID) { 329 u32 addr_hi, addr_lo; 330 331 addr_lo = qed_rd(p_hwfn, p_ptt, 332 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 333 addr_hi = qed_rd(p_hwfn, p_ptt, 334 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 335 336 DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n", 337 tmp, addr_hi, addr_lo); 338 } 339 340 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 341 if (tmp & PGLUE_ATTENTION_ILT_VALID) { 342 u32 addr_hi, addr_lo, details; 343 344 addr_lo = qed_rd(p_hwfn, p_ptt, 345 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 346 addr_hi = qed_rd(p_hwfn, p_ptt, 347 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 348 details = qed_rd(p_hwfn, p_ptt, 349 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 350 351 DP_NOTICE(p_hwfn, 352 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 353 details, tmp, addr_hi, addr_lo); 354 } 355 356 /* Clear the indications */ 357 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2)); 358 359 return 0; 360 } 361 362 static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) 363 { 364 return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); 365 } 366 367 static int qed_fw_assertion(struct qed_hwfn *p_hwfn) 368 { 369 qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT, 370 "FW assertion!\n"); 371 372 return -EINVAL; 373 } 374 375 static int qed_general_attention_35(struct qed_hwfn *p_hwfn) 376 { 377 DP_INFO(p_hwfn, "General attention 35!\n"); 378 379 return 0; 380 } 381 382 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) 383 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 384 #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 385 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) 386 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16) 387 388 #define QED_DB_REC_COUNT 1000 389 #define QED_DB_REC_INTERVAL 100 390 391 static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, 392 struct qed_ptt *p_ptt) 393 { 394 u32 count = QED_DB_REC_COUNT; 395 u32 usage = 1; 396 397 /* Flush any pending (e)dpms as they may never arrive */ 398 qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 399 400 /* wait for usage to zero or count to run out. This is necessary since 401 * EDPM doorbell transactions can take multiple 64b cycles, and as such 402 * can "split" over the pci. Possibly, the doorbell drop can happen with 403 * half an EDPM in the queue and other half dropped. Another EDPM 404 * doorbell to the same address (from doorbell recovery mechanism or 405 * from the doorbelling entity) could have first half dropped and second 406 * half interpreted as continuation of the first. To prevent such 407 * malformed doorbells from reaching the device, flush the queue before 408 * releasing the overflow sticky indication. 409 */ 410 while (count-- && usage) { 411 usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 412 udelay(QED_DB_REC_INTERVAL); 413 } 414 415 /* should have been depleted by now */ 416 if (usage) { 417 DP_NOTICE(p_hwfn->cdev, 418 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 419 QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage); 420 return -EBUSY; 421 } 422 423 return 0; 424 } 425 426 int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 427 { 428 u32 attn_ovfl, cur_ovfl; 429 int rc; 430 431 attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT, 432 &p_hwfn->db_recovery_info.overflow); 433 cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); 434 if (!cur_ovfl && !attn_ovfl) 435 return 0; 436 437 DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n", 438 attn_ovfl, cur_ovfl); 439 440 if (cur_ovfl && !p_hwfn->db_bar_no_edpm) { 441 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); 442 if (rc) 443 return rc; 444 } 445 446 /* Release overflow sticky indication (stop silently dropping everything) */ 447 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 448 449 /* Repeat all last doorbells (doorbell drop recovery) */ 450 qed_db_recovery_execute(p_hwfn); 451 452 return 0; 453 } 454 455 static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn) 456 { 457 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; 458 u32 overflow; 459 int rc; 460 461 overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); 462 if (!overflow) 463 goto out; 464 465 /* Run PF doorbell recovery in next periodic handler */ 466 set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow); 467 468 if (!p_hwfn->db_bar_no_edpm) { 469 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); 470 if (rc) 471 goto out; 472 } 473 474 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 475 out: 476 /* Schedule the handler even if overflow was not detected */ 477 qed_periodic_db_rec_start(p_hwfn); 478 } 479 480 static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) 481 { 482 u32 int_sts, first_drop_reason, details, address, all_drops_reason; 483 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; 484 485 /* int_sts may be zero since all PFs were interrupted for doorbell 486 * overflow but another one already handled it. Can abort here. If 487 * This PF also requires overflow recovery we will be interrupted again. 488 * The masked almost full indication may also be set. Ignoring. 489 */ 490 int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 491 if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) 492 return 0; 493 494 DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); 495 496 /* check if db_drop or overflow happened */ 497 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 498 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 499 /* Obtain data about db drop/overflow */ 500 first_drop_reason = qed_rd(p_hwfn, p_ptt, 501 DORQ_REG_DB_DROP_REASON) & 502 QED_DORQ_ATTENTION_REASON_MASK; 503 details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS); 504 address = qed_rd(p_hwfn, p_ptt, 505 DORQ_REG_DB_DROP_DETAILS_ADDRESS); 506 all_drops_reason = qed_rd(p_hwfn, p_ptt, 507 DORQ_REG_DB_DROP_DETAILS_REASON); 508 509 /* Log info */ 510 DP_NOTICE(p_hwfn->cdev, 511 "Doorbell drop occurred\n" 512 "Address\t\t0x%08x\t(second BAR address)\n" 513 "FID\t\t0x%04x\t\t(Opaque FID)\n" 514 "Size\t\t0x%04x\t\t(in bytes)\n" 515 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 516 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", 517 address, 518 GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE), 519 GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, 520 first_drop_reason, all_drops_reason); 521 522 /* Clear the doorbell drop details and prepare for next drop */ 523 qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 524 525 /* Mark interrupt as handled (note: even if drop was due to a different 526 * reason than overflow we mark as handled) 527 */ 528 qed_wr(p_hwfn, 529 p_ptt, 530 DORQ_REG_INT_STS_WR, 531 DORQ_REG_INT_STS_DB_DROP | 532 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 533 534 /* If there are no indications other than drop indications, success */ 535 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 536 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 537 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 538 return 0; 539 } 540 541 /* Some other indication was present - non recoverable */ 542 DP_INFO(p_hwfn, "DORQ fatal attention\n"); 543 544 return -EINVAL; 545 } 546 547 static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) 548 { 549 p_hwfn->db_recovery_info.dorq_attn = true; 550 qed_dorq_attn_overflow(p_hwfn); 551 552 return qed_dorq_attn_int_sts(p_hwfn); 553 } 554 555 static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn) 556 { 557 if (p_hwfn->db_recovery_info.dorq_attn) 558 goto out; 559 560 /* Call DORQ callback if the attention was missed */ 561 qed_dorq_attn_cb(p_hwfn); 562 out: 563 p_hwfn->db_recovery_info.dorq_attn = false; 564 } 565 566 /* Instead of major changes to the data-structure, we have a some 'special' 567 * identifiers for sources that changed meaning between adapters. 568 */ 569 enum aeu_invert_reg_special_type { 570 AEU_INVERT_REG_SPECIAL_CNIG_0, 571 AEU_INVERT_REG_SPECIAL_CNIG_1, 572 AEU_INVERT_REG_SPECIAL_CNIG_2, 573 AEU_INVERT_REG_SPECIAL_CNIG_3, 574 AEU_INVERT_REG_SPECIAL_MAX, 575 }; 576 577 static struct aeu_invert_reg_bit 578 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 579 {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 580 {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 581 {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 582 {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 583 }; 584 585 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 586 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 587 { 588 { /* After Invert 1 */ 589 {"GPIO0 function%d", 590 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 591 } 592 }, 593 594 { 595 { /* After Invert 2 */ 596 {"PGLUE config_space", ATTENTION_SINGLE, 597 NULL, MAX_BLOCK_ID}, 598 {"PGLUE misc_flr", ATTENTION_SINGLE, 599 NULL, MAX_BLOCK_ID}, 600 {"PGLUE B RBC", ATTENTION_PAR_INT, 601 qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B}, 602 {"PGLUE misc_mctp", ATTENTION_SINGLE, 603 NULL, MAX_BLOCK_ID}, 604 {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 605 {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 606 {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 607 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | 608 (1 << ATTENTION_OFFSET_SHIFT), 609 NULL, MAX_BLOCK_ID}, 610 {"PCIE glue/PXP VPD %d", 611 (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS}, 612 } 613 }, 614 615 { 616 { /* After Invert 3 */ 617 {"General Attention %d", 618 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 619 } 620 }, 621 622 { 623 { /* After Invert 4 */ 624 {"General Attention 32", ATTENTION_SINGLE | 625 ATTENTION_CLEAR_ENABLE, qed_fw_assertion, 626 MAX_BLOCK_ID}, 627 {"General Attention %d", 628 (2 << ATTENTION_LENGTH_SHIFT) | 629 (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID}, 630 {"General Attention 35", ATTENTION_SINGLE | 631 ATTENTION_CLEAR_ENABLE, qed_general_attention_35, 632 MAX_BLOCK_ID}, 633 {"NWS Parity", 634 ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 635 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), 636 NULL, BLOCK_NWS}, 637 {"NWS Interrupt", 638 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 639 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), 640 NULL, BLOCK_NWS}, 641 {"NWM Parity", 642 ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 643 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), 644 NULL, BLOCK_NWM}, 645 {"NWM Interrupt", 646 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 647 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), 648 NULL, BLOCK_NWM}, 649 {"MCP CPU", ATTENTION_SINGLE, 650 qed_mcp_attn_cb, MAX_BLOCK_ID}, 651 {"MCP Watchdog timer", ATTENTION_SINGLE, 652 NULL, MAX_BLOCK_ID}, 653 {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 654 {"AVS stop status ready", ATTENTION_SINGLE, 655 NULL, MAX_BLOCK_ID}, 656 {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 657 {"MSTAT per-path", ATTENTION_PAR_INT, 658 NULL, MAX_BLOCK_ID}, 659 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), 660 NULL, MAX_BLOCK_ID}, 661 {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG}, 662 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB}, 663 {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB}, 664 {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB}, 665 {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS}, 666 } 667 }, 668 669 { 670 { /* After Invert 5 */ 671 {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC}, 672 {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1}, 673 {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2}, 674 {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB}, 675 {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF}, 676 {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM}, 677 {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM}, 678 {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM}, 679 {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM}, 680 {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM}, 681 {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM}, 682 {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM}, 683 {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM}, 684 {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM}, 685 {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM}, 686 {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM}, 687 } 688 }, 689 690 { 691 { /* After Invert 6 */ 692 {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM}, 693 {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM}, 694 {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM}, 695 {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM}, 696 {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM}, 697 {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM}, 698 {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM}, 699 {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM}, 700 {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM}, 701 {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD}, 702 {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD}, 703 {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD}, 704 {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD}, 705 {"DORQ", ATTENTION_PAR_INT, 706 qed_dorq_attn_cb, BLOCK_DORQ}, 707 {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG}, 708 {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC}, 709 } 710 }, 711 712 { 713 { /* After Invert 7 */ 714 {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC}, 715 {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU}, 716 {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE}, 717 {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU}, 718 {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 719 {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU}, 720 {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU}, 721 {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM}, 722 {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC}, 723 {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF}, 724 {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF}, 725 {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS}, 726 {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC}, 727 {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS}, 728 {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE}, 729 {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS}, 730 {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ}, 731 } 732 }, 733 734 { 735 { /* After Invert 8 */ 736 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, 737 NULL, BLOCK_PSWRQ2}, 738 {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR}, 739 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, 740 NULL, BLOCK_PSWWR2}, 741 {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD}, 742 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, 743 NULL, BLOCK_PSWRD2}, 744 {"PSWHST", ATTENTION_PAR_INT, 745 qed_pswhst_attn_cb, BLOCK_PSWHST}, 746 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, 747 NULL, BLOCK_PSWHST2}, 748 {"GRC", ATTENTION_PAR_INT, 749 qed_grc_attn_cb, BLOCK_GRC}, 750 {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU}, 751 {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI}, 752 {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 753 {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 754 {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 755 {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 756 {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 757 {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 758 {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS}, 759 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, 760 NULL, BLOCK_PGLCS}, 761 {"PERST_B assertion", ATTENTION_SINGLE, 762 NULL, MAX_BLOCK_ID}, 763 {"PERST_B deassertion", ATTENTION_SINGLE, 764 NULL, MAX_BLOCK_ID}, 765 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), 766 NULL, MAX_BLOCK_ID}, 767 } 768 }, 769 770 { 771 { /* After Invert 9 */ 772 {"MCP Latched memory", ATTENTION_PAR, 773 NULL, MAX_BLOCK_ID}, 774 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, 775 NULL, MAX_BLOCK_ID}, 776 {"MCP Latched ump_tx", ATTENTION_PAR, 777 NULL, MAX_BLOCK_ID}, 778 {"MCP Latched scratchpad", ATTENTION_PAR, 779 NULL, MAX_BLOCK_ID}, 780 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), 781 NULL, MAX_BLOCK_ID}, 782 } 783 }, 784 }; 785 786 static struct aeu_invert_reg_bit * 787 qed_int_aeu_translate(struct qed_hwfn *p_hwfn, 788 struct aeu_invert_reg_bit *p_bit) 789 { 790 if (!QED_IS_BB(p_hwfn->cdev)) 791 return p_bit; 792 793 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 794 return p_bit; 795 796 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 797 ATTENTION_BB_SHIFT]; 798 } 799 800 static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn, 801 struct aeu_invert_reg_bit *p_bit) 802 { 803 return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags & 804 ATTENTION_PARITY); 805 } 806 807 #define ATTN_STATE_BITS (0xfff) 808 #define ATTN_BITS_MASKABLE (0x3ff) 809 struct qed_sb_attn_info { 810 /* Virtual & Physical address of the SB */ 811 struct atten_status_block *sb_attn; 812 dma_addr_t sb_phys; 813 814 /* Last seen running index */ 815 u16 index; 816 817 /* A mask of the AEU bits resulting in a parity error */ 818 u32 parity_mask[NUM_ATTN_REGS]; 819 820 /* A pointer to the attention description structure */ 821 struct aeu_invert_reg *p_aeu_desc; 822 823 /* Previously asserted attentions, which are still unasserted */ 824 u16 known_attn; 825 826 /* Cleanup address for the link's general hw attention */ 827 u32 mfw_attn_addr; 828 }; 829 830 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, 831 struct qed_sb_attn_info *p_sb_desc) 832 { 833 u16 rc = 0, index; 834 835 index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); 836 if (p_sb_desc->index != index) { 837 p_sb_desc->index = index; 838 rc = QED_SB_ATT_IDX; 839 } 840 841 return rc; 842 } 843 844 /** 845 * @brief qed_int_assertion - handles asserted attention bits 846 * 847 * @param p_hwfn 848 * @param asserted_bits newly asserted bits 849 * @return int 850 */ 851 static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits) 852 { 853 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 854 u32 igu_mask; 855 856 /* Mask the source of the attention in the IGU */ 857 igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); 858 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 859 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 860 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 861 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 862 863 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 864 "inner known ATTN state: 0x%04x --> 0x%04x\n", 865 sb_attn_sw->known_attn, 866 sb_attn_sw->known_attn | asserted_bits); 867 sb_attn_sw->known_attn |= asserted_bits; 868 869 /* Handle MCP events */ 870 if (asserted_bits & 0x100) { 871 qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 872 /* Clean the MCP attention */ 873 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 874 sb_attn_sw->mfw_attn_addr, 0); 875 } 876 877 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 878 GTT_BAR0_MAP_REG_IGU_CMD + 879 ((IGU_CMD_ATTN_BIT_SET_UPPER - 880 IGU_CMD_INT_ACK_BASE) << 3), 881 (u32)asserted_bits); 882 883 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", 884 asserted_bits); 885 886 return 0; 887 } 888 889 static void qed_int_attn_print(struct qed_hwfn *p_hwfn, 890 enum block_id id, 891 enum dbg_attn_type type, bool b_clear) 892 { 893 struct dbg_attn_block_result attn_results; 894 enum dbg_status status; 895 896 memset(&attn_results, 0, sizeof(attn_results)); 897 898 status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type, 899 b_clear, &attn_results); 900 if (status != DBG_STATUS_OK) 901 DP_NOTICE(p_hwfn, 902 "Failed to parse attention information [status: %s]\n", 903 qed_dbg_get_status_str(status)); 904 else 905 qed_dbg_parse_attn(p_hwfn, &attn_results); 906 } 907 908 /** 909 * @brief qed_int_deassertion_aeu_bit - handles the effects of a single 910 * cause of the attention 911 * 912 * @param p_hwfn 913 * @param p_aeu - descriptor of an AEU bit which caused the attention 914 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 915 * this bit to this group. 916 * @param bit_index - index of this bit in the aeu_en_reg 917 * 918 * @return int 919 */ 920 static int 921 qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, 922 struct aeu_invert_reg_bit *p_aeu, 923 u32 aeu_en_reg, 924 const char *p_bit_name, u32 bitmask) 925 { 926 bool b_fatal = false; 927 int rc = -EINVAL; 928 u32 val; 929 930 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 931 p_bit_name, bitmask); 932 933 /* Call callback before clearing the interrupt status */ 934 if (p_aeu->cb) { 935 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 936 p_bit_name); 937 rc = p_aeu->cb(p_hwfn); 938 } 939 940 if (rc) 941 b_fatal = true; 942 943 /* Print HW block interrupt registers */ 944 if (p_aeu->block_index != MAX_BLOCK_ID) 945 qed_int_attn_print(p_hwfn, p_aeu->block_index, 946 ATTN_TYPE_INTERRUPT, !b_fatal); 947 948 /* Reach assertion if attention is fatal */ 949 if (b_fatal) 950 qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_HW_ATTN, 951 "`%s': Fatal attention\n", 952 p_bit_name); 953 else /* If the attention is benign, no need to prevent it */ 954 goto out; 955 956 /* Prevent this Attention from being asserted in the future */ 957 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 958 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask)); 959 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", 960 p_bit_name); 961 962 out: 963 return rc; 964 } 965 966 /** 967 * @brief qed_int_deassertion_parity - handle a single parity AEU source 968 * 969 * @param p_hwfn 970 * @param p_aeu - descriptor of an AEU bit which caused the parity 971 * @param aeu_en_reg - address of the AEU enable register 972 * @param bit_index 973 */ 974 static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, 975 struct aeu_invert_reg_bit *p_aeu, 976 u32 aeu_en_reg, u8 bit_index) 977 { 978 u32 block_id = p_aeu->block_index, mask, val; 979 980 DP_NOTICE(p_hwfn->cdev, 981 "%s parity attention is set [address 0x%08x, bit %d]\n", 982 p_aeu->bit_name, aeu_en_reg, bit_index); 983 984 if (block_id != MAX_BLOCK_ID) { 985 qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 986 987 /* In BB, there's a single parity bit for several blocks */ 988 if (block_id == BLOCK_BTB) { 989 qed_int_attn_print(p_hwfn, BLOCK_OPTE, 990 ATTN_TYPE_PARITY, false); 991 qed_int_attn_print(p_hwfn, BLOCK_MCP, 992 ATTN_TYPE_PARITY, false); 993 } 994 } 995 996 /* Prevent this parity error from being re-asserted */ 997 mask = ~BIT(bit_index); 998 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 999 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 1000 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 1001 p_aeu->bit_name); 1002 } 1003 1004 /** 1005 * @brief - handles deassertion of previously asserted attentions. 1006 * 1007 * @param p_hwfn 1008 * @param deasserted_bits - newly deasserted bits 1009 * @return int 1010 * 1011 */ 1012 static int qed_int_deassertion(struct qed_hwfn *p_hwfn, 1013 u16 deasserted_bits) 1014 { 1015 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 1016 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 1017 u8 i, j, k, bit_idx; 1018 int rc = 0; 1019 1020 /* Read the attention registers in the AEU */ 1021 for (i = 0; i < NUM_ATTN_REGS; i++) { 1022 aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1023 MISC_REG_AEU_AFTER_INVERT_1_IGU + 1024 i * 0x4); 1025 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1026 "Deasserted bits [%d]: %08x\n", 1027 i, aeu_inv_arr[i]); 1028 } 1029 1030 /* Find parity attentions first */ 1031 for (i = 0; i < NUM_ATTN_REGS; i++) { 1032 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 1033 u32 parities; 1034 1035 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); 1036 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1037 1038 /* Skip register in which no parity bit is currently set */ 1039 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 1040 if (!parities) 1041 continue; 1042 1043 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1044 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 1045 1046 if (qed_int_is_parity_flag(p_hwfn, p_bit) && 1047 !!(parities & BIT(bit_idx))) 1048 qed_int_deassertion_parity(p_hwfn, p_bit, 1049 aeu_en, bit_idx); 1050 1051 bit_idx += ATTENTION_LENGTH(p_bit->flags); 1052 } 1053 } 1054 1055 /* Find non-parity cause for attention and act */ 1056 for (k = 0; k < MAX_ATTN_GRPS; k++) { 1057 struct aeu_invert_reg_bit *p_aeu; 1058 1059 /* Handle only groups whose attention is currently deasserted */ 1060 if (!(deasserted_bits & (1 << k))) 1061 continue; 1062 1063 for (i = 0; i < NUM_ATTN_REGS; i++) { 1064 u32 bits; 1065 1066 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 1067 i * sizeof(u32) + 1068 k * sizeof(u32) * NUM_ATTN_REGS; 1069 1070 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1071 bits = aeu_inv_arr[i] & en; 1072 1073 /* Skip if no bit from this group is currently set */ 1074 if (!bits) 1075 continue; 1076 1077 /* Find all set bits from current register which belong 1078 * to current group, making them responsible for the 1079 * previous assertion. 1080 */ 1081 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1082 long unsigned int bitmask; 1083 u8 bit, bit_len; 1084 1085 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1086 p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu); 1087 1088 bit = bit_idx; 1089 bit_len = ATTENTION_LENGTH(p_aeu->flags); 1090 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) { 1091 /* Skip Parity */ 1092 bit++; 1093 bit_len--; 1094 } 1095 1096 bitmask = bits & (((1 << bit_len) - 1) << bit); 1097 bitmask >>= bit; 1098 1099 if (bitmask) { 1100 u32 flags = p_aeu->flags; 1101 char bit_name[30]; 1102 u8 num; 1103 1104 num = (u8)find_first_bit(&bitmask, 1105 bit_len); 1106 1107 /* Some bits represent more than a 1108 * a single interrupt. Correctly print 1109 * their name. 1110 */ 1111 if (ATTENTION_LENGTH(flags) > 2 || 1112 ((flags & ATTENTION_PAR_INT) && 1113 ATTENTION_LENGTH(flags) > 1)) 1114 snprintf(bit_name, 30, 1115 p_aeu->bit_name, num); 1116 else 1117 strlcpy(bit_name, 1118 p_aeu->bit_name, 30); 1119 1120 /* We now need to pass bitmask in its 1121 * correct position. 1122 */ 1123 bitmask <<= bit; 1124 1125 /* Handle source of the attention */ 1126 qed_int_deassertion_aeu_bit(p_hwfn, 1127 p_aeu, 1128 aeu_en, 1129 bit_name, 1130 bitmask); 1131 } 1132 1133 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1134 } 1135 } 1136 } 1137 1138 /* Handle missed DORQ attention */ 1139 qed_dorq_attn_handler(p_hwfn); 1140 1141 /* Clear IGU indication for the deasserted bits */ 1142 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 1143 GTT_BAR0_MAP_REG_IGU_CMD + 1144 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1145 IGU_CMD_INT_ACK_BASE) << 3), 1146 ~((u32)deasserted_bits)); 1147 1148 /* Unmask deasserted attentions in IGU */ 1149 aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); 1150 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1151 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1152 1153 /* Clear deassertion from inner state */ 1154 sb_attn_sw->known_attn &= ~deasserted_bits; 1155 1156 return rc; 1157 } 1158 1159 static int qed_int_attentions(struct qed_hwfn *p_hwfn) 1160 { 1161 struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1162 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1163 u32 attn_bits = 0, attn_acks = 0; 1164 u16 asserted_bits, deasserted_bits; 1165 __le16 index; 1166 int rc = 0; 1167 1168 /* Read current attention bits/acks - safeguard against attentions 1169 * by guaranting work on a synchronized timeframe 1170 */ 1171 do { 1172 index = p_sb_attn->sb_index; 1173 /* finish reading index before the loop condition */ 1174 dma_rmb(); 1175 attn_bits = le32_to_cpu(p_sb_attn->atten_bits); 1176 attn_acks = le32_to_cpu(p_sb_attn->atten_ack); 1177 } while (index != p_sb_attn->sb_index); 1178 p_sb_attn->sb_index = index; 1179 1180 /* Attention / Deassertion are meaningful (and in correct state) 1181 * only when they differ and consistent with known state - deassertion 1182 * when previous attention & current ack, and assertion when current 1183 * attention with no previous attention 1184 */ 1185 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1186 ~p_sb_attn_sw->known_attn; 1187 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1188 p_sb_attn_sw->known_attn; 1189 1190 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) { 1191 DP_INFO(p_hwfn, 1192 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1193 index, attn_bits, attn_acks, asserted_bits, 1194 deasserted_bits, p_sb_attn_sw->known_attn); 1195 } else if (asserted_bits == 0x100) { 1196 DP_INFO(p_hwfn, "MFW indication via attention\n"); 1197 } else { 1198 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1199 "MFW indication [deassertion]\n"); 1200 } 1201 1202 if (asserted_bits) { 1203 rc = qed_int_assertion(p_hwfn, asserted_bits); 1204 if (rc) 1205 return rc; 1206 } 1207 1208 if (deasserted_bits) 1209 rc = qed_int_deassertion(p_hwfn, deasserted_bits); 1210 1211 return rc; 1212 } 1213 1214 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, 1215 void __iomem *igu_addr, u32 ack_cons) 1216 { 1217 struct igu_prod_cons_update igu_ack = { 0 }; 1218 1219 igu_ack.sb_id_and_flags = 1220 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1221 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1222 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1223 (IGU_SEG_ACCESS_ATTN << 1224 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1225 1226 DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags); 1227 1228 /* Both segments (interrupts & acks) are written to same place address; 1229 * Need to guarantee all commands will be received (in-order) by HW. 1230 */ 1231 barrier(); 1232 } 1233 1234 void qed_int_sp_dpc(unsigned long hwfn_cookie) 1235 { 1236 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie; 1237 struct qed_pi_info *pi_info = NULL; 1238 struct qed_sb_attn_info *sb_attn; 1239 struct qed_sb_info *sb_info; 1240 int arr_size; 1241 u16 rc = 0; 1242 1243 if (!p_hwfn->p_sp_sb) { 1244 DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); 1245 return; 1246 } 1247 1248 sb_info = &p_hwfn->p_sp_sb->sb_info; 1249 arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1250 if (!sb_info) { 1251 DP_ERR(p_hwfn->cdev, 1252 "Status block is NULL - cannot ack interrupts\n"); 1253 return; 1254 } 1255 1256 if (!p_hwfn->p_sb_attn) { 1257 DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); 1258 return; 1259 } 1260 sb_attn = p_hwfn->p_sb_attn; 1261 1262 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1263 p_hwfn, p_hwfn->my_id); 1264 1265 /* Disable ack for def status block. Required both for msix + 1266 * inta in non-mask mode, in inta does no harm. 1267 */ 1268 qed_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1269 1270 /* Gather Interrupts/Attentions information */ 1271 if (!sb_info->sb_virt) { 1272 DP_ERR(p_hwfn->cdev, 1273 "Interrupt Status block is NULL - cannot check for new interrupts!\n"); 1274 } else { 1275 u32 tmp_index = sb_info->sb_ack; 1276 1277 rc = qed_sb_update_sb_idx(sb_info); 1278 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 1279 "Interrupt indices: 0x%08x --> 0x%08x\n", 1280 tmp_index, sb_info->sb_ack); 1281 } 1282 1283 if (!sb_attn || !sb_attn->sb_attn) { 1284 DP_ERR(p_hwfn->cdev, 1285 "Attentions Status block is NULL - cannot check for new attentions!\n"); 1286 } else { 1287 u16 tmp_index = sb_attn->index; 1288 1289 rc |= qed_attn_update_idx(p_hwfn, sb_attn); 1290 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 1291 "Attention indices: 0x%08x --> 0x%08x\n", 1292 tmp_index, sb_attn->index); 1293 } 1294 1295 /* Check if we expect interrupts at this time. if not just ack them */ 1296 if (!(rc & QED_SB_EVENT_MASK)) { 1297 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1298 return; 1299 } 1300 1301 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1302 if (!p_hwfn->p_dpc_ptt) { 1303 DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); 1304 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1305 return; 1306 } 1307 1308 if (rc & QED_SB_ATT_IDX) 1309 qed_int_attentions(p_hwfn); 1310 1311 if (rc & QED_SB_IDX) { 1312 int pi; 1313 1314 /* Look for a free index */ 1315 for (pi = 0; pi < arr_size; pi++) { 1316 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1317 if (pi_info->comp_cb) 1318 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1319 } 1320 } 1321 1322 if (sb_attn && (rc & QED_SB_ATT_IDX)) 1323 /* This should be done before the interrupts are enabled, 1324 * since otherwise a new attention will be generated. 1325 */ 1326 qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1327 1328 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1329 } 1330 1331 static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) 1332 { 1333 struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1334 1335 if (!p_sb) 1336 return; 1337 1338 if (p_sb->sb_attn) 1339 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1340 SB_ATTN_ALIGNED_SIZE(p_hwfn), 1341 p_sb->sb_attn, p_sb->sb_phys); 1342 kfree(p_sb); 1343 p_hwfn->p_sb_attn = NULL; 1344 } 1345 1346 static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, 1347 struct qed_ptt *p_ptt) 1348 { 1349 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1350 1351 memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1352 1353 sb_info->index = 0; 1354 sb_info->known_attn = 0; 1355 1356 /* Configure Attention Status Block in IGU */ 1357 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1358 lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); 1359 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1360 upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); 1361 } 1362 1363 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, 1364 struct qed_ptt *p_ptt, 1365 void *sb_virt_addr, dma_addr_t sb_phy_addr) 1366 { 1367 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1368 int i, j, k; 1369 1370 sb_info->sb_attn = sb_virt_addr; 1371 sb_info->sb_phys = sb_phy_addr; 1372 1373 /* Set the pointer to the AEU descriptors */ 1374 sb_info->p_aeu_desc = aeu_descs; 1375 1376 /* Calculate Parity Masks */ 1377 memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1378 for (i = 0; i < NUM_ATTN_REGS; i++) { 1379 /* j is array index, k is bit index */ 1380 for (j = 0, k = 0; k < 32; j++) { 1381 struct aeu_invert_reg_bit *p_aeu; 1382 1383 p_aeu = &aeu_descs[i].bits[j]; 1384 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) 1385 sb_info->parity_mask[i] |= 1 << k; 1386 1387 k += ATTENTION_LENGTH(p_aeu->flags); 1388 } 1389 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1390 "Attn Mask [Reg %d]: 0x%08x\n", 1391 i, sb_info->parity_mask[i]); 1392 } 1393 1394 /* Set the address of cleanup for the mcp attention */ 1395 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1396 MISC_REG_AEU_GENERAL_ATTN_0; 1397 1398 qed_int_sb_attn_setup(p_hwfn, p_ptt); 1399 } 1400 1401 static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, 1402 struct qed_ptt *p_ptt) 1403 { 1404 struct qed_dev *cdev = p_hwfn->cdev; 1405 struct qed_sb_attn_info *p_sb; 1406 dma_addr_t p_phys = 0; 1407 void *p_virt; 1408 1409 /* SB struct */ 1410 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 1411 if (!p_sb) 1412 return -ENOMEM; 1413 1414 /* SB ring */ 1415 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 1416 SB_ATTN_ALIGNED_SIZE(p_hwfn), 1417 &p_phys, GFP_KERNEL); 1418 1419 if (!p_virt) { 1420 kfree(p_sb); 1421 return -ENOMEM; 1422 } 1423 1424 /* Attention setup */ 1425 p_hwfn->p_sb_attn = p_sb; 1426 qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1427 1428 return 0; 1429 } 1430 1431 /* coalescing timeout = timeset << (timer_res + 1) */ 1432 #define QED_CAU_DEF_RX_USECS 24 1433 #define QED_CAU_DEF_TX_USECS 48 1434 1435 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, 1436 struct cau_sb_entry *p_sb_entry, 1437 u8 pf_id, u16 vf_number, u8 vf_valid) 1438 { 1439 struct qed_dev *cdev = p_hwfn->cdev; 1440 u32 cau_state; 1441 u8 timer_res; 1442 1443 memset(p_sb_entry, 0, sizeof(*p_sb_entry)); 1444 1445 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1446 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1447 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1448 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1449 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1450 1451 cau_state = CAU_HC_DISABLE_STATE; 1452 1453 if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 1454 cau_state = CAU_HC_ENABLE_STATE; 1455 if (!cdev->rx_coalesce_usecs) 1456 cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS; 1457 if (!cdev->tx_coalesce_usecs) 1458 cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; 1459 } 1460 1461 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1462 if (cdev->rx_coalesce_usecs <= 0x7F) 1463 timer_res = 0; 1464 else if (cdev->rx_coalesce_usecs <= 0xFF) 1465 timer_res = 1; 1466 else 1467 timer_res = 2; 1468 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1469 1470 if (cdev->tx_coalesce_usecs <= 0x7F) 1471 timer_res = 0; 1472 else if (cdev->tx_coalesce_usecs <= 0xFF) 1473 timer_res = 1; 1474 else 1475 timer_res = 2; 1476 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1477 1478 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1479 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1480 } 1481 1482 static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, 1483 struct qed_ptt *p_ptt, 1484 u16 igu_sb_id, 1485 u32 pi_index, 1486 enum qed_coalescing_fsm coalescing_fsm, 1487 u8 timeset) 1488 { 1489 struct cau_pi_entry pi_entry; 1490 u32 sb_offset, pi_offset; 1491 1492 if (IS_VF(p_hwfn->cdev)) 1493 return; 1494 1495 sb_offset = igu_sb_id * PIS_PER_SB_E4; 1496 memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1497 1498 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1499 if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) 1500 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1501 else 1502 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1503 1504 pi_offset = sb_offset + pi_index; 1505 if (p_hwfn->hw_init_done) { 1506 qed_wr(p_hwfn, p_ptt, 1507 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1508 *((u32 *)&(pi_entry))); 1509 } else { 1510 STORE_RT_REG(p_hwfn, 1511 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1512 *((u32 *)&(pi_entry))); 1513 } 1514 } 1515 1516 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, 1517 struct qed_ptt *p_ptt, 1518 dma_addr_t sb_phys, 1519 u16 igu_sb_id, u16 vf_number, u8 vf_valid) 1520 { 1521 struct cau_sb_entry sb_entry; 1522 1523 qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1524 vf_number, vf_valid); 1525 1526 if (p_hwfn->hw_init_done) { 1527 /* Wide-bus, initialize via DMAE */ 1528 u64 phys_addr = (u64)sb_phys; 1529 1530 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr, 1531 CAU_REG_SB_ADDR_MEMORY + 1532 igu_sb_id * sizeof(u64), 2, NULL); 1533 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, 1534 CAU_REG_SB_VAR_MEMORY + 1535 igu_sb_id * sizeof(u64), 2, NULL); 1536 } else { 1537 /* Initialize Status Block Address */ 1538 STORE_RT_REG_AGG(p_hwfn, 1539 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 1540 igu_sb_id * 2, 1541 sb_phys); 1542 1543 STORE_RT_REG_AGG(p_hwfn, 1544 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 1545 igu_sb_id * 2, 1546 sb_entry); 1547 } 1548 1549 /* Configure pi coalescing if set */ 1550 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 1551 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1552 u8 timeset, timer_res; 1553 u8 i; 1554 1555 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1556 if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F) 1557 timer_res = 0; 1558 else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF) 1559 timer_res = 1; 1560 else 1561 timer_res = 2; 1562 timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res); 1563 qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1564 QED_COAL_RX_STATE_MACHINE, timeset); 1565 1566 if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F) 1567 timer_res = 0; 1568 else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF) 1569 timer_res = 1; 1570 else 1571 timer_res = 2; 1572 timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res); 1573 for (i = 0; i < num_tc; i++) { 1574 qed_int_cau_conf_pi(p_hwfn, p_ptt, 1575 igu_sb_id, TX_PI(i), 1576 QED_COAL_TX_STATE_MACHINE, 1577 timeset); 1578 } 1579 } 1580 } 1581 1582 void qed_int_sb_setup(struct qed_hwfn *p_hwfn, 1583 struct qed_ptt *p_ptt, struct qed_sb_info *sb_info) 1584 { 1585 /* zero status block and ack counter */ 1586 sb_info->sb_ack = 0; 1587 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1588 1589 if (IS_PF(p_hwfn->cdev)) 1590 qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1591 sb_info->igu_sb_id, 0, 0); 1592 } 1593 1594 struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf) 1595 { 1596 struct qed_igu_block *p_block; 1597 u16 igu_id; 1598 1599 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 1600 igu_id++) { 1601 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1602 1603 if (!(p_block->status & QED_IGU_STATUS_VALID) || 1604 !(p_block->status & QED_IGU_STATUS_FREE)) 1605 continue; 1606 1607 if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf) 1608 return p_block; 1609 } 1610 1611 return NULL; 1612 } 1613 1614 static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id) 1615 { 1616 struct qed_igu_block *p_block; 1617 u16 igu_id; 1618 1619 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 1620 igu_id++) { 1621 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1622 1623 if (!(p_block->status & QED_IGU_STATUS_VALID) || 1624 !p_block->is_pf || 1625 p_block->vector_number != vector_id) 1626 continue; 1627 1628 return igu_id; 1629 } 1630 1631 return QED_SB_INVALID_IDX; 1632 } 1633 1634 u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 1635 { 1636 u16 igu_sb_id; 1637 1638 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1639 if (sb_id == QED_SP_SB_ID) 1640 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1641 else if (IS_PF(p_hwfn->cdev)) 1642 igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1643 else 1644 igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); 1645 1646 if (sb_id == QED_SP_SB_ID) 1647 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1648 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1649 else 1650 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1651 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1652 1653 return igu_sb_id; 1654 } 1655 1656 int qed_int_sb_init(struct qed_hwfn *p_hwfn, 1657 struct qed_ptt *p_ptt, 1658 struct qed_sb_info *sb_info, 1659 void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id) 1660 { 1661 sb_info->sb_virt = sb_virt_addr; 1662 sb_info->sb_phys = sb_phy_addr; 1663 1664 sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); 1665 1666 if (sb_id != QED_SP_SB_ID) { 1667 if (IS_PF(p_hwfn->cdev)) { 1668 struct qed_igu_info *p_info; 1669 struct qed_igu_block *p_block; 1670 1671 p_info = p_hwfn->hw_info.p_igu_info; 1672 p_block = &p_info->entry[sb_info->igu_sb_id]; 1673 1674 p_block->sb_info = sb_info; 1675 p_block->status &= ~QED_IGU_STATUS_FREE; 1676 p_info->usage.free_cnt--; 1677 } else { 1678 qed_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1679 } 1680 } 1681 1682 sb_info->cdev = p_hwfn->cdev; 1683 1684 /* The igu address will hold the absolute address that needs to be 1685 * written to for a specific status block 1686 */ 1687 if (IS_PF(p_hwfn->cdev)) { 1688 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 1689 GTT_BAR0_MAP_REG_IGU_CMD + 1690 (sb_info->igu_sb_id << 3); 1691 } else { 1692 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 1693 PXP_VF_BAR0_START_IGU + 1694 ((IGU_CMD_INT_ACK_BASE + 1695 sb_info->igu_sb_id) << 3); 1696 } 1697 1698 sb_info->flags |= QED_SB_INFO_INIT; 1699 1700 qed_int_sb_setup(p_hwfn, p_ptt, sb_info); 1701 1702 return 0; 1703 } 1704 1705 int qed_int_sb_release(struct qed_hwfn *p_hwfn, 1706 struct qed_sb_info *sb_info, u16 sb_id) 1707 { 1708 struct qed_igu_block *p_block; 1709 struct qed_igu_info *p_info; 1710 1711 if (!sb_info) 1712 return 0; 1713 1714 /* zero status block and ack counter */ 1715 sb_info->sb_ack = 0; 1716 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1717 1718 if (IS_VF(p_hwfn->cdev)) { 1719 qed_vf_set_sb_info(p_hwfn, sb_id, NULL); 1720 return 0; 1721 } 1722 1723 p_info = p_hwfn->hw_info.p_igu_info; 1724 p_block = &p_info->entry[sb_info->igu_sb_id]; 1725 1726 /* Vector 0 is reserved to Default SB */ 1727 if (!p_block->vector_number) { 1728 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1729 return -EINVAL; 1730 } 1731 1732 /* Lose reference to client's SB info, and fix counters */ 1733 p_block->sb_info = NULL; 1734 p_block->status |= QED_IGU_STATUS_FREE; 1735 p_info->usage.free_cnt++; 1736 1737 return 0; 1738 } 1739 1740 static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) 1741 { 1742 struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1743 1744 if (!p_sb) 1745 return; 1746 1747 if (p_sb->sb_info.sb_virt) 1748 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1749 SB_ALIGNED_SIZE(p_hwfn), 1750 p_sb->sb_info.sb_virt, 1751 p_sb->sb_info.sb_phys); 1752 kfree(p_sb); 1753 p_hwfn->p_sp_sb = NULL; 1754 } 1755 1756 static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1757 { 1758 struct qed_sb_sp_info *p_sb; 1759 dma_addr_t p_phys = 0; 1760 void *p_virt; 1761 1762 /* SB struct */ 1763 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 1764 if (!p_sb) 1765 return -ENOMEM; 1766 1767 /* SB ring */ 1768 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1769 SB_ALIGNED_SIZE(p_hwfn), 1770 &p_phys, GFP_KERNEL); 1771 if (!p_virt) { 1772 kfree(p_sb); 1773 return -ENOMEM; 1774 } 1775 1776 /* Status Block setup */ 1777 p_hwfn->p_sp_sb = p_sb; 1778 qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt, 1779 p_phys, QED_SP_SB_ID); 1780 1781 memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1782 1783 return 0; 1784 } 1785 1786 int qed_int_register_cb(struct qed_hwfn *p_hwfn, 1787 qed_int_comp_cb_t comp_cb, 1788 void *cookie, u8 *sb_idx, __le16 **p_fw_cons) 1789 { 1790 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1791 int rc = -ENOMEM; 1792 u8 pi; 1793 1794 /* Look for a free index */ 1795 for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1796 if (p_sp_sb->pi_info_arr[pi].comp_cb) 1797 continue; 1798 1799 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1800 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1801 *sb_idx = pi; 1802 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1803 rc = 0; 1804 break; 1805 } 1806 1807 return rc; 1808 } 1809 1810 int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) 1811 { 1812 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1813 1814 if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL) 1815 return -ENOMEM; 1816 1817 p_sp_sb->pi_info_arr[pi].comp_cb = NULL; 1818 p_sp_sb->pi_info_arr[pi].cookie = NULL; 1819 1820 return 0; 1821 } 1822 1823 u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) 1824 { 1825 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1826 } 1827 1828 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, 1829 struct qed_ptt *p_ptt, enum qed_int_mode int_mode) 1830 { 1831 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1832 1833 p_hwfn->cdev->int_mode = int_mode; 1834 switch (p_hwfn->cdev->int_mode) { 1835 case QED_INT_MODE_INTA: 1836 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1837 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1838 break; 1839 1840 case QED_INT_MODE_MSI: 1841 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1842 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1843 break; 1844 1845 case QED_INT_MODE_MSIX: 1846 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1847 break; 1848 case QED_INT_MODE_POLL: 1849 break; 1850 } 1851 1852 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1853 } 1854 1855 static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn, 1856 struct qed_ptt *p_ptt) 1857 { 1858 1859 /* Configure AEU signal change to produce attentions */ 1860 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1861 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1862 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1863 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1864 1865 /* Unmask AEU signals toward IGU */ 1866 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1867 } 1868 1869 int 1870 qed_int_igu_enable(struct qed_hwfn *p_hwfn, 1871 struct qed_ptt *p_ptt, enum qed_int_mode int_mode) 1872 { 1873 int rc = 0; 1874 1875 qed_int_igu_enable_attn(p_hwfn, p_ptt); 1876 1877 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1878 rc = qed_slowpath_irq_req(p_hwfn); 1879 if (rc) { 1880 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); 1881 return -EINVAL; 1882 } 1883 p_hwfn->b_int_requested = true; 1884 } 1885 /* Enable interrupt Generation */ 1886 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1887 p_hwfn->b_int_enabled = 1; 1888 1889 return rc; 1890 } 1891 1892 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1893 { 1894 p_hwfn->b_int_enabled = 0; 1895 1896 if (IS_VF(p_hwfn->cdev)) 1897 return; 1898 1899 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1900 } 1901 1902 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1903 static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, 1904 struct qed_ptt *p_ptt, 1905 u16 igu_sb_id, 1906 bool cleanup_set, u16 opaque_fid) 1907 { 1908 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1909 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 1910 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1911 1912 /* Set the data field */ 1913 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1914 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0); 1915 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1916 1917 /* Set the control register */ 1918 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1919 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1920 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 1921 1922 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 1923 1924 barrier(); 1925 1926 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 1927 1928 /* calculate where to read the status bit from */ 1929 sb_bit = 1 << (igu_sb_id % 32); 1930 sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 1931 1932 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; 1933 1934 /* Now wait for the command to complete */ 1935 do { 1936 val = qed_rd(p_hwfn, p_ptt, sb_bit_addr); 1937 1938 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 1939 break; 1940 1941 usleep_range(5000, 10000); 1942 } while (--sleep_cnt); 1943 1944 if (!sleep_cnt) 1945 DP_NOTICE(p_hwfn, 1946 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 1947 val, igu_sb_id); 1948 } 1949 1950 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, 1951 struct qed_ptt *p_ptt, 1952 u16 igu_sb_id, u16 opaque, bool b_set) 1953 { 1954 struct qed_igu_block *p_block; 1955 int pi, i; 1956 1957 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 1958 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1959 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 1960 igu_sb_id, 1961 p_block->function_id, 1962 p_block->is_pf, p_block->vector_number); 1963 1964 /* Set */ 1965 if (b_set) 1966 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 1967 1968 /* Clear */ 1969 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 1970 1971 /* Wait for the IGU SB to cleanup */ 1972 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 1973 u32 val; 1974 1975 val = qed_rd(p_hwfn, p_ptt, 1976 IGU_REG_WRITE_DONE_PENDING + 1977 ((igu_sb_id / 32) * 4)); 1978 if (val & BIT((igu_sb_id % 32))) 1979 usleep_range(10, 20); 1980 else 1981 break; 1982 } 1983 if (i == IGU_CLEANUP_SLEEP_LENGTH) 1984 DP_NOTICE(p_hwfn, 1985 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 1986 igu_sb_id); 1987 1988 /* Clear the CAU for the SB */ 1989 for (pi = 0; pi < 12; pi++) 1990 qed_wr(p_hwfn, p_ptt, 1991 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); 1992 } 1993 1994 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, 1995 struct qed_ptt *p_ptt, 1996 bool b_set, bool b_slowpath) 1997 { 1998 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 1999 struct qed_igu_block *p_block; 2000 u16 igu_sb_id = 0; 2001 u32 val = 0; 2002 2003 val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 2004 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 2005 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 2006 qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 2007 2008 for (igu_sb_id = 0; 2009 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 2010 p_block = &p_info->entry[igu_sb_id]; 2011 2012 if (!(p_block->status & QED_IGU_STATUS_VALID) || 2013 !p_block->is_pf || 2014 (p_block->status & QED_IGU_STATUS_DSB)) 2015 continue; 2016 2017 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 2018 p_hwfn->hw_info.opaque_fid, 2019 b_set); 2020 } 2021 2022 if (b_slowpath) 2023 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2024 p_info->igu_dsb_id, 2025 p_hwfn->hw_info.opaque_fid, 2026 b_set); 2027 } 2028 2029 int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2030 { 2031 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2032 struct qed_igu_block *p_block; 2033 int pf_sbs, vf_sbs; 2034 u16 igu_sb_id; 2035 u32 val, rval; 2036 2037 if (!RESC_NUM(p_hwfn, QED_SB)) { 2038 p_info->b_allow_pf_vf_change = false; 2039 } else { 2040 /* Use the numbers the MFW have provided - 2041 * don't forget MFW accounts for the default SB as well. 2042 */ 2043 p_info->b_allow_pf_vf_change = true; 2044 2045 if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) { 2046 DP_INFO(p_hwfn, 2047 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 2048 RESC_NUM(p_hwfn, QED_SB) - 1, 2049 p_info->usage.cnt); 2050 p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1; 2051 } 2052 2053 if (IS_PF_SRIOV(p_hwfn)) { 2054 u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs; 2055 2056 if (vfs != p_info->usage.iov_cnt) 2057 DP_VERBOSE(p_hwfn, 2058 NETIF_MSG_INTR, 2059 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2060 p_info->usage.iov_cnt, vfs); 2061 2062 /* At this point we know how many SBs we have totally 2063 * in IGU + number of PF SBs. So we can validate that 2064 * we'd have sufficient for VF. 2065 */ 2066 if (vfs > p_info->usage.free_cnt + 2067 p_info->usage.free_cnt_iov - p_info->usage.cnt) { 2068 DP_NOTICE(p_hwfn, 2069 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2070 p_info->usage.free_cnt + 2071 p_info->usage.free_cnt_iov, 2072 p_info->usage.cnt, vfs); 2073 return -EINVAL; 2074 } 2075 2076 /* Currently cap the number of VFs SBs by the 2077 * number of VFs. 2078 */ 2079 p_info->usage.iov_cnt = vfs; 2080 } 2081 } 2082 2083 /* Mark all SBs as free, now in the right PF/VFs division */ 2084 p_info->usage.free_cnt = p_info->usage.cnt; 2085 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2086 p_info->usage.orig = p_info->usage.cnt; 2087 p_info->usage.iov_orig = p_info->usage.iov_cnt; 2088 2089 /* We now proceed to re-configure the IGU cam to reflect the initial 2090 * configuration. We can start with the Default SB. 2091 */ 2092 pf_sbs = p_info->usage.cnt; 2093 vf_sbs = p_info->usage.iov_cnt; 2094 2095 for (igu_sb_id = p_info->igu_dsb_id; 2096 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 2097 p_block = &p_info->entry[igu_sb_id]; 2098 val = 0; 2099 2100 if (!(p_block->status & QED_IGU_STATUS_VALID)) 2101 continue; 2102 2103 if (p_block->status & QED_IGU_STATUS_DSB) { 2104 p_block->function_id = p_hwfn->rel_pf_id; 2105 p_block->is_pf = 1; 2106 p_block->vector_number = 0; 2107 p_block->status = QED_IGU_STATUS_VALID | 2108 QED_IGU_STATUS_PF | 2109 QED_IGU_STATUS_DSB; 2110 } else if (pf_sbs) { 2111 pf_sbs--; 2112 p_block->function_id = p_hwfn->rel_pf_id; 2113 p_block->is_pf = 1; 2114 p_block->vector_number = p_info->usage.cnt - pf_sbs; 2115 p_block->status = QED_IGU_STATUS_VALID | 2116 QED_IGU_STATUS_PF | 2117 QED_IGU_STATUS_FREE; 2118 } else if (vf_sbs) { 2119 p_block->function_id = 2120 p_hwfn->cdev->p_iov_info->first_vf_in_pf + 2121 p_info->usage.iov_cnt - vf_sbs; 2122 p_block->is_pf = 0; 2123 p_block->vector_number = 0; 2124 p_block->status = QED_IGU_STATUS_VALID | 2125 QED_IGU_STATUS_FREE; 2126 vf_sbs--; 2127 } else { 2128 p_block->function_id = 0; 2129 p_block->is_pf = 0; 2130 p_block->vector_number = 0; 2131 } 2132 2133 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2134 p_block->function_id); 2135 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2136 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2137 p_block->vector_number); 2138 2139 /* VF entries would be enabled when VF is initializaed */ 2140 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2141 2142 rval = qed_rd(p_hwfn, p_ptt, 2143 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2144 2145 if (rval != val) { 2146 qed_wr(p_hwfn, p_ptt, 2147 IGU_REG_MAPPING_MEMORY + 2148 sizeof(u32) * igu_sb_id, val); 2149 2150 DP_VERBOSE(p_hwfn, 2151 NETIF_MSG_INTR, 2152 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2153 igu_sb_id, 2154 p_block->function_id, 2155 p_block->is_pf, 2156 p_block->vector_number, rval, val); 2157 } 2158 } 2159 2160 return 0; 2161 } 2162 2163 static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, 2164 struct qed_ptt *p_ptt, u16 igu_sb_id) 2165 { 2166 u32 val = qed_rd(p_hwfn, p_ptt, 2167 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2168 struct qed_igu_block *p_block; 2169 2170 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2171 2172 /* Fill the block information */ 2173 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); 2174 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2175 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); 2176 p_block->igu_sb_id = igu_sb_id; 2177 } 2178 2179 int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2180 { 2181 struct qed_igu_info *p_igu_info; 2182 struct qed_igu_block *p_block; 2183 u32 min_vf = 0, max_vf = 0; 2184 u16 igu_sb_id; 2185 2186 p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); 2187 if (!p_hwfn->hw_info.p_igu_info) 2188 return -ENOMEM; 2189 2190 p_igu_info = p_hwfn->hw_info.p_igu_info; 2191 2192 /* Distinguish between existent and non-existent default SB */ 2193 p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX; 2194 2195 /* Find the range of VF ids whose SB belong to this PF */ 2196 if (p_hwfn->cdev->p_iov_info) { 2197 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 2198 2199 min_vf = p_iov->first_vf_in_pf; 2200 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2201 } 2202 2203 for (igu_sb_id = 0; 2204 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 2205 /* Read current entry; Notice it might not belong to this PF */ 2206 qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2207 p_block = &p_igu_info->entry[igu_sb_id]; 2208 2209 if ((p_block->is_pf) && 2210 (p_block->function_id == p_hwfn->rel_pf_id)) { 2211 p_block->status = QED_IGU_STATUS_PF | 2212 QED_IGU_STATUS_VALID | 2213 QED_IGU_STATUS_FREE; 2214 2215 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) 2216 p_igu_info->usage.cnt++; 2217 } else if (!(p_block->is_pf) && 2218 (p_block->function_id >= min_vf) && 2219 (p_block->function_id < max_vf)) { 2220 /* Available for VFs of this PF */ 2221 p_block->status = QED_IGU_STATUS_VALID | 2222 QED_IGU_STATUS_FREE; 2223 2224 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) 2225 p_igu_info->usage.iov_cnt++; 2226 } 2227 2228 /* Mark the First entry belonging to the PF or its VFs 2229 * as the default SB [we'll reset IGU prior to first usage]. 2230 */ 2231 if ((p_block->status & QED_IGU_STATUS_VALID) && 2232 (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) { 2233 p_igu_info->igu_dsb_id = igu_sb_id; 2234 p_block->status |= QED_IGU_STATUS_DSB; 2235 } 2236 2237 /* limit number of prints by having each PF print only its 2238 * entries with the exception of PF0 which would print 2239 * everything. 2240 */ 2241 if ((p_block->status & QED_IGU_STATUS_VALID) || 2242 (p_hwfn->abs_pf_id == 0)) { 2243 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2244 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2245 igu_sb_id, p_block->function_id, 2246 p_block->is_pf, p_block->vector_number); 2247 } 2248 } 2249 2250 if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) { 2251 DP_NOTICE(p_hwfn, 2252 "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2253 p_igu_info->igu_dsb_id); 2254 return -EINVAL; 2255 } 2256 2257 /* All non default SB are considered free at this point */ 2258 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2259 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2260 2261 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2262 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2263 p_igu_info->igu_dsb_id, 2264 p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt); 2265 2266 return 0; 2267 } 2268 2269 /** 2270 * @brief Initialize igu runtime registers 2271 * 2272 * @param p_hwfn 2273 */ 2274 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) 2275 { 2276 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2277 2278 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2279 } 2280 2281 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) 2282 { 2283 u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - 2284 IGU_CMD_INT_ACK_BASE; 2285 u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - 2286 IGU_CMD_INT_ACK_BASE; 2287 u32 intr_status_hi = 0, intr_status_lo = 0; 2288 u64 intr_status = 0; 2289 2290 intr_status_lo = REG_RD(p_hwfn, 2291 GTT_BAR0_MAP_REG_IGU_CMD + 2292 lsb_igu_cmd_addr * 8); 2293 intr_status_hi = REG_RD(p_hwfn, 2294 GTT_BAR0_MAP_REG_IGU_CMD + 2295 msb_igu_cmd_addr * 8); 2296 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2297 2298 return intr_status; 2299 } 2300 2301 static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) 2302 { 2303 tasklet_init(p_hwfn->sp_dpc, 2304 qed_int_sp_dpc, (unsigned long)p_hwfn); 2305 p_hwfn->b_sp_dpc_enabled = true; 2306 } 2307 2308 static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) 2309 { 2310 p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL); 2311 if (!p_hwfn->sp_dpc) 2312 return -ENOMEM; 2313 2314 return 0; 2315 } 2316 2317 static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) 2318 { 2319 kfree(p_hwfn->sp_dpc); 2320 p_hwfn->sp_dpc = NULL; 2321 } 2322 2323 int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2324 { 2325 int rc = 0; 2326 2327 rc = qed_int_sp_dpc_alloc(p_hwfn); 2328 if (rc) 2329 return rc; 2330 2331 rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt); 2332 if (rc) 2333 return rc; 2334 2335 rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); 2336 2337 return rc; 2338 } 2339 2340 void qed_int_free(struct qed_hwfn *p_hwfn) 2341 { 2342 qed_int_sp_sb_free(p_hwfn); 2343 qed_int_sb_attn_free(p_hwfn); 2344 qed_int_sp_dpc_free(p_hwfn); 2345 } 2346 2347 void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2348 { 2349 qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2350 qed_int_sb_attn_setup(p_hwfn, p_ptt); 2351 qed_int_sp_dpc_setup(p_hwfn); 2352 } 2353 2354 void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, 2355 struct qed_sb_cnt_info *p_sb_cnt_info) 2356 { 2357 struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; 2358 2359 if (!info || !p_sb_cnt_info) 2360 return; 2361 2362 memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info)); 2363 } 2364 2365 void qed_int_disable_post_isr_release(struct qed_dev *cdev) 2366 { 2367 int i; 2368 2369 for_each_hwfn(cdev, i) 2370 cdev->hwfns[i].b_int_requested = false; 2371 } 2372 2373 void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable) 2374 { 2375 cdev->attn_clr_en = clr_enable; 2376 } 2377 2378 int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 2379 u8 timer_res, u16 sb_id, bool tx) 2380 { 2381 struct cau_sb_entry sb_entry; 2382 int rc; 2383 2384 if (!p_hwfn->hw_init_done) { 2385 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2386 return -EINVAL; 2387 } 2388 2389 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2390 sb_id * sizeof(u64), 2391 (u64)(uintptr_t)&sb_entry, 2, NULL); 2392 if (rc) { 2393 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2394 return rc; 2395 } 2396 2397 if (tx) 2398 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2399 else 2400 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2401 2402 rc = qed_dmae_host2grc(p_hwfn, p_ptt, 2403 (u64)(uintptr_t)&sb_entry, 2404 CAU_REG_SB_VAR_MEMORY + 2405 sb_id * sizeof(u64), 2, NULL); 2406 if (rc) { 2407 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2408 return rc; 2409 } 2410 2411 return rc; 2412 } 2413