1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <asm/byteorder.h> 35 #include <linux/io.h> 36 #include <linux/bitops.h> 37 #include <linux/delay.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/errno.h> 40 #include <linux/interrupt.h> 41 #include <linux/kernel.h> 42 #include <linux/pci.h> 43 #include <linux/slab.h> 44 #include <linux/string.h> 45 #include "qed.h" 46 #include "qed_hsi.h" 47 #include "qed_hw.h" 48 #include "qed_init_ops.h" 49 #include "qed_int.h" 50 #include "qed_mcp.h" 51 #include "qed_reg_addr.h" 52 #include "qed_sp.h" 53 #include "qed_sriov.h" 54 #include "qed_vf.h" 55 56 struct qed_pi_info { 57 qed_int_comp_cb_t comp_cb; 58 void *cookie; 59 }; 60 61 struct qed_sb_sp_info { 62 struct qed_sb_info sb_info; 63 64 /* per protocol index data */ 65 struct qed_pi_info pi_info_arr[PIS_PER_SB]; 66 }; 67 68 enum qed_attention_type { 69 QED_ATTN_TYPE_ATTN, 70 QED_ATTN_TYPE_PARITY, 71 }; 72 73 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 74 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 75 76 struct aeu_invert_reg_bit { 77 char bit_name[30]; 78 79 #define ATTENTION_PARITY (1 << 0) 80 81 #define ATTENTION_LENGTH_MASK (0x00000ff0) 82 #define ATTENTION_LENGTH_SHIFT (4) 83 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 84 ATTENTION_LENGTH_SHIFT) 85 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) 86 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 87 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 88 ATTENTION_PARITY) 89 90 /* Multiple bits start with this offset */ 91 #define ATTENTION_OFFSET_MASK (0x000ff000) 92 #define ATTENTION_OFFSET_SHIFT (12) 93 94 #define ATTENTION_BB_MASK (0x00700000) 95 #define ATTENTION_BB_SHIFT (20) 96 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT) 97 #define ATTENTION_BB_DIFFERENT BIT(23) 98 99 unsigned int flags; 100 101 /* Callback to call if attention will be triggered */ 102 int (*cb)(struct qed_hwfn *p_hwfn); 103 104 enum block_id block_index; 105 }; 106 107 struct aeu_invert_reg { 108 struct aeu_invert_reg_bit bits[32]; 109 }; 110 111 #define MAX_ATTN_GRPS (8) 112 #define NUM_ATTN_REGS (9) 113 114 /* Specific HW attention callbacks */ 115 static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn) 116 { 117 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 118 119 /* This might occur on certain instances; Log it once then mask it */ 120 DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n", 121 tmp); 122 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 123 0xffffffff); 124 125 return 0; 126 } 127 128 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 129 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 130 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 131 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf) 132 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 133 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1) 134 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 135 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff) 136 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 137 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf) 138 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 139 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff) 140 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 141 static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn) 142 { 143 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 144 PSWHST_REG_INCORRECT_ACCESS_VALID); 145 146 if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) { 147 u32 addr, data, length; 148 149 addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 150 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 151 data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 152 PSWHST_REG_INCORRECT_ACCESS_DATA); 153 length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 154 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 155 156 DP_INFO(p_hwfn->cdev, 157 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", 158 addr, length, 159 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID), 160 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID), 161 (u8) GET_FIELD(data, 162 ATTENTION_INCORRECT_ACCESS_VF_VALID), 163 (u8) GET_FIELD(data, 164 ATTENTION_INCORRECT_ACCESS_CLIENT), 165 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR), 166 (u8) GET_FIELD(data, 167 ATTENTION_INCORRECT_ACCESS_BYTE_EN), 168 data); 169 } 170 171 return 0; 172 } 173 174 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0) 175 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff) 176 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0) 177 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23) 178 #define QED_GRC_ATTENTION_MASTER_MASK (0xf) 179 #define QED_GRC_ATTENTION_MASTER_SHIFT (24) 180 #define QED_GRC_ATTENTION_PF_MASK (0xf) 181 #define QED_GRC_ATTENTION_PF_SHIFT (0) 182 #define QED_GRC_ATTENTION_VF_MASK (0xff) 183 #define QED_GRC_ATTENTION_VF_SHIFT (4) 184 #define QED_GRC_ATTENTION_PRIV_MASK (0x3) 185 #define QED_GRC_ATTENTION_PRIV_SHIFT (14) 186 #define QED_GRC_ATTENTION_PRIV_VF (0) 187 static const char *attn_master_to_str(u8 master) 188 { 189 switch (master) { 190 case 1: return "PXP"; 191 case 2: return "MCP"; 192 case 3: return "MSDM"; 193 case 4: return "PSDM"; 194 case 5: return "YSDM"; 195 case 6: return "USDM"; 196 case 7: return "TSDM"; 197 case 8: return "XSDM"; 198 case 9: return "DBU"; 199 case 10: return "DMAE"; 200 default: 201 return "Unknown"; 202 } 203 } 204 205 static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) 206 { 207 u32 tmp, tmp2; 208 209 /* We've already cleared the timeout interrupt register, so we learn 210 * of interrupts via the validity register 211 */ 212 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 213 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 214 if (!(tmp & QED_GRC_ATTENTION_VALID_BIT)) 215 goto out; 216 217 /* Read the GRC timeout information */ 218 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 219 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 220 tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 221 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 222 223 DP_INFO(p_hwfn->cdev, 224 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 225 tmp2, tmp, 226 (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from", 227 GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2, 228 attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), 229 GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), 230 (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == 231 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)", 232 GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); 233 234 out: 235 /* Regardles of anything else, clean the validity bit */ 236 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 237 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 238 return 0; 239 } 240 241 #define PGLUE_ATTENTION_VALID (1 << 29) 242 #define PGLUE_ATTENTION_RD_VALID (1 << 26) 243 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf) 244 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 245 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1) 246 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19) 247 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff) 248 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 249 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1) 250 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21) 251 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1) 252 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22) 253 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1) 254 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23) 255 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23) 256 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) 257 #define PGLUE_ATTENTION_ILT_VALID (1 << 23) 258 static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn) 259 { 260 u32 tmp; 261 262 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 263 PGLUE_B_REG_TX_ERR_WR_DETAILS2); 264 if (tmp & PGLUE_ATTENTION_VALID) { 265 u32 addr_lo, addr_hi, details; 266 267 addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 268 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 269 addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 270 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 271 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 272 PGLUE_B_REG_TX_ERR_WR_DETAILS); 273 274 DP_INFO(p_hwfn, 275 "Illegal write by chip to [%08x:%08x] blocked.\n" 276 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 277 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 278 addr_hi, addr_lo, details, 279 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 280 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 281 GET_FIELD(details, 282 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 283 tmp, 284 GET_FIELD(tmp, 285 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, 286 GET_FIELD(tmp, 287 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 288 GET_FIELD(tmp, 289 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); 290 } 291 292 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 293 PGLUE_B_REG_TX_ERR_RD_DETAILS2); 294 if (tmp & PGLUE_ATTENTION_RD_VALID) { 295 u32 addr_lo, addr_hi, details; 296 297 addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 298 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 299 addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 300 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 301 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 302 PGLUE_B_REG_TX_ERR_RD_DETAILS); 303 304 DP_INFO(p_hwfn, 305 "Illegal read by chip from [%08x:%08x] blocked.\n" 306 " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 307 " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 308 addr_hi, addr_lo, details, 309 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 310 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 311 GET_FIELD(details, 312 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 313 tmp, 314 GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 315 : 0, 316 GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 317 GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 318 : 0); 319 } 320 321 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 322 PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 323 if (tmp & PGLUE_ATTENTION_ICPL_VALID) 324 DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp); 325 326 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 327 PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 328 if (tmp & PGLUE_ATTENTION_ZLR_VALID) { 329 u32 addr_hi, addr_lo; 330 331 addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 332 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 333 addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 334 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 335 336 DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n", 337 tmp, addr_hi, addr_lo); 338 } 339 340 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 341 PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 342 if (tmp & PGLUE_ATTENTION_ILT_VALID) { 343 u32 addr_hi, addr_lo, details; 344 345 addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 346 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 347 addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 348 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 349 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 350 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 351 352 DP_INFO(p_hwfn, 353 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 354 details, tmp, addr_hi, addr_lo); 355 } 356 357 /* Clear the indications */ 358 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 359 PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); 360 361 return 0; 362 } 363 364 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) 365 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 366 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) 367 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16) 368 static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) 369 { 370 u32 reason; 371 372 reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) & 373 QED_DORQ_ATTENTION_REASON_MASK; 374 if (reason) { 375 u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 376 DORQ_REG_DB_DROP_DETAILS); 377 378 DP_INFO(p_hwfn->cdev, 379 "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n", 380 qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 381 DORQ_REG_DB_DROP_DETAILS_ADDRESS), 382 (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK), 383 GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, 384 reason); 385 } 386 387 return -EINVAL; 388 } 389 390 /* Instead of major changes to the data-structure, we have a some 'special' 391 * identifiers for sources that changed meaning between adapters. 392 */ 393 enum aeu_invert_reg_special_type { 394 AEU_INVERT_REG_SPECIAL_CNIG_0, 395 AEU_INVERT_REG_SPECIAL_CNIG_1, 396 AEU_INVERT_REG_SPECIAL_CNIG_2, 397 AEU_INVERT_REG_SPECIAL_CNIG_3, 398 AEU_INVERT_REG_SPECIAL_MAX, 399 }; 400 401 static struct aeu_invert_reg_bit 402 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 403 {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 404 {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 405 {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 406 {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 407 }; 408 409 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 410 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 411 { 412 { /* After Invert 1 */ 413 {"GPIO0 function%d", 414 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 415 } 416 }, 417 418 { 419 { /* After Invert 2 */ 420 {"PGLUE config_space", ATTENTION_SINGLE, 421 NULL, MAX_BLOCK_ID}, 422 {"PGLUE misc_flr", ATTENTION_SINGLE, 423 NULL, MAX_BLOCK_ID}, 424 {"PGLUE B RBC", ATTENTION_PAR_INT, 425 qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B}, 426 {"PGLUE misc_mctp", ATTENTION_SINGLE, 427 NULL, MAX_BLOCK_ID}, 428 {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 429 {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 430 {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 431 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | 432 (1 << ATTENTION_OFFSET_SHIFT), 433 NULL, MAX_BLOCK_ID}, 434 {"PCIE glue/PXP VPD %d", 435 (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS}, 436 } 437 }, 438 439 { 440 { /* After Invert 3 */ 441 {"General Attention %d", 442 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 443 } 444 }, 445 446 { 447 { /* After Invert 4 */ 448 {"General Attention 32", ATTENTION_SINGLE, 449 NULL, MAX_BLOCK_ID}, 450 {"General Attention %d", 451 (2 << ATTENTION_LENGTH_SHIFT) | 452 (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID}, 453 {"General Attention 35", ATTENTION_SINGLE, 454 NULL, MAX_BLOCK_ID}, 455 {"NWS Parity", 456 ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 457 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), 458 NULL, BLOCK_NWS}, 459 {"NWS Interrupt", 460 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 461 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), 462 NULL, BLOCK_NWS}, 463 {"NWM Parity", 464 ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 465 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), 466 NULL, BLOCK_NWM}, 467 {"NWM Interrupt", 468 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 469 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), 470 NULL, BLOCK_NWM}, 471 {"MCP CPU", ATTENTION_SINGLE, 472 qed_mcp_attn_cb, MAX_BLOCK_ID}, 473 {"MCP Watchdog timer", ATTENTION_SINGLE, 474 NULL, MAX_BLOCK_ID}, 475 {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 476 {"AVS stop status ready", ATTENTION_SINGLE, 477 NULL, MAX_BLOCK_ID}, 478 {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 479 {"MSTAT per-path", ATTENTION_PAR_INT, 480 NULL, MAX_BLOCK_ID}, 481 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), 482 NULL, MAX_BLOCK_ID}, 483 {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG}, 484 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB}, 485 {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB}, 486 {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB}, 487 {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS}, 488 } 489 }, 490 491 { 492 { /* After Invert 5 */ 493 {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC}, 494 {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1}, 495 {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2}, 496 {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB}, 497 {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF}, 498 {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM}, 499 {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM}, 500 {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM}, 501 {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM}, 502 {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM}, 503 {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM}, 504 {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM}, 505 {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM}, 506 {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM}, 507 {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM}, 508 {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM}, 509 } 510 }, 511 512 { 513 { /* After Invert 6 */ 514 {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM}, 515 {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM}, 516 {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM}, 517 {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM}, 518 {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM}, 519 {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM}, 520 {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM}, 521 {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM}, 522 {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM}, 523 {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD}, 524 {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD}, 525 {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD}, 526 {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD}, 527 {"DORQ", ATTENTION_PAR_INT, 528 qed_dorq_attn_cb, BLOCK_DORQ}, 529 {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG}, 530 {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC}, 531 } 532 }, 533 534 { 535 { /* After Invert 7 */ 536 {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC}, 537 {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU}, 538 {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE}, 539 {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU}, 540 {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 541 {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU}, 542 {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU}, 543 {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM}, 544 {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC}, 545 {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF}, 546 {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF}, 547 {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS}, 548 {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC}, 549 {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS}, 550 {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE}, 551 {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS}, 552 {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ}, 553 } 554 }, 555 556 { 557 { /* After Invert 8 */ 558 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, 559 NULL, BLOCK_PSWRQ2}, 560 {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR}, 561 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, 562 NULL, BLOCK_PSWWR2}, 563 {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD}, 564 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, 565 NULL, BLOCK_PSWRD2}, 566 {"PSWHST", ATTENTION_PAR_INT, 567 qed_pswhst_attn_cb, BLOCK_PSWHST}, 568 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, 569 NULL, BLOCK_PSWHST2}, 570 {"GRC", ATTENTION_PAR_INT, 571 qed_grc_attn_cb, BLOCK_GRC}, 572 {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU}, 573 {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI}, 574 {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 575 {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 576 {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 577 {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 578 {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 579 {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 580 {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS}, 581 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, 582 NULL, BLOCK_PGLCS}, 583 {"PERST_B assertion", ATTENTION_SINGLE, 584 NULL, MAX_BLOCK_ID}, 585 {"PERST_B deassertion", ATTENTION_SINGLE, 586 NULL, MAX_BLOCK_ID}, 587 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), 588 NULL, MAX_BLOCK_ID}, 589 } 590 }, 591 592 { 593 { /* After Invert 9 */ 594 {"MCP Latched memory", ATTENTION_PAR, 595 NULL, MAX_BLOCK_ID}, 596 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, 597 NULL, MAX_BLOCK_ID}, 598 {"MCP Latched ump_tx", ATTENTION_PAR, 599 NULL, MAX_BLOCK_ID}, 600 {"MCP Latched scratchpad", ATTENTION_PAR, 601 NULL, MAX_BLOCK_ID}, 602 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), 603 NULL, MAX_BLOCK_ID}, 604 } 605 }, 606 }; 607 608 static struct aeu_invert_reg_bit * 609 qed_int_aeu_translate(struct qed_hwfn *p_hwfn, 610 struct aeu_invert_reg_bit *p_bit) 611 { 612 if (!QED_IS_BB(p_hwfn->cdev)) 613 return p_bit; 614 615 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 616 return p_bit; 617 618 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 619 ATTENTION_BB_SHIFT]; 620 } 621 622 static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn, 623 struct aeu_invert_reg_bit *p_bit) 624 { 625 return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags & 626 ATTENTION_PARITY); 627 } 628 629 #define ATTN_STATE_BITS (0xfff) 630 #define ATTN_BITS_MASKABLE (0x3ff) 631 struct qed_sb_attn_info { 632 /* Virtual & Physical address of the SB */ 633 struct atten_status_block *sb_attn; 634 dma_addr_t sb_phys; 635 636 /* Last seen running index */ 637 u16 index; 638 639 /* A mask of the AEU bits resulting in a parity error */ 640 u32 parity_mask[NUM_ATTN_REGS]; 641 642 /* A pointer to the attention description structure */ 643 struct aeu_invert_reg *p_aeu_desc; 644 645 /* Previously asserted attentions, which are still unasserted */ 646 u16 known_attn; 647 648 /* Cleanup address for the link's general hw attention */ 649 u32 mfw_attn_addr; 650 }; 651 652 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, 653 struct qed_sb_attn_info *p_sb_desc) 654 { 655 u16 rc = 0, index; 656 657 /* Make certain HW write took affect */ 658 mmiowb(); 659 660 index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); 661 if (p_sb_desc->index != index) { 662 p_sb_desc->index = index; 663 rc = QED_SB_ATT_IDX; 664 } 665 666 /* Make certain we got a consistent view with HW */ 667 mmiowb(); 668 669 return rc; 670 } 671 672 /** 673 * @brief qed_int_assertion - handles asserted attention bits 674 * 675 * @param p_hwfn 676 * @param asserted_bits newly asserted bits 677 * @return int 678 */ 679 static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits) 680 { 681 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 682 u32 igu_mask; 683 684 /* Mask the source of the attention in the IGU */ 685 igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); 686 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 687 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 688 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 689 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 690 691 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 692 "inner known ATTN state: 0x%04x --> 0x%04x\n", 693 sb_attn_sw->known_attn, 694 sb_attn_sw->known_attn | asserted_bits); 695 sb_attn_sw->known_attn |= asserted_bits; 696 697 /* Handle MCP events */ 698 if (asserted_bits & 0x100) { 699 qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 700 /* Clean the MCP attention */ 701 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 702 sb_attn_sw->mfw_attn_addr, 0); 703 } 704 705 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 706 GTT_BAR0_MAP_REG_IGU_CMD + 707 ((IGU_CMD_ATTN_BIT_SET_UPPER - 708 IGU_CMD_INT_ACK_BASE) << 3), 709 (u32)asserted_bits); 710 711 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", 712 asserted_bits); 713 714 return 0; 715 } 716 717 static void qed_int_attn_print(struct qed_hwfn *p_hwfn, 718 enum block_id id, 719 enum dbg_attn_type type, bool b_clear) 720 { 721 struct dbg_attn_block_result attn_results; 722 enum dbg_status status; 723 724 memset(&attn_results, 0, sizeof(attn_results)); 725 726 status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type, 727 b_clear, &attn_results); 728 if (status != DBG_STATUS_OK) 729 DP_NOTICE(p_hwfn, 730 "Failed to parse attention information [status: %s]\n", 731 qed_dbg_get_status_str(status)); 732 else 733 qed_dbg_parse_attn(p_hwfn, &attn_results); 734 } 735 736 /** 737 * @brief qed_int_deassertion_aeu_bit - handles the effects of a single 738 * cause of the attention 739 * 740 * @param p_hwfn 741 * @param p_aeu - descriptor of an AEU bit which caused the attention 742 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 743 * this bit to this group. 744 * @param bit_index - index of this bit in the aeu_en_reg 745 * 746 * @return int 747 */ 748 static int 749 qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, 750 struct aeu_invert_reg_bit *p_aeu, 751 u32 aeu_en_reg, 752 u32 bitmask) 753 { 754 bool b_fatal = false; 755 int rc = -EINVAL; 756 u32 val; 757 758 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 759 p_aeu->bit_name, bitmask); 760 761 /* Call callback before clearing the interrupt status */ 762 if (p_aeu->cb) { 763 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 764 p_aeu->bit_name); 765 rc = p_aeu->cb(p_hwfn); 766 } 767 768 if (rc) 769 b_fatal = true; 770 771 /* Print HW block interrupt registers */ 772 if (p_aeu->block_index != MAX_BLOCK_ID) 773 qed_int_attn_print(p_hwfn, p_aeu->block_index, 774 ATTN_TYPE_INTERRUPT, !b_fatal); 775 776 777 /* If the attention is benign, no need to prevent it */ 778 if (!rc) 779 goto out; 780 781 /* Prevent this Attention from being asserted in the future */ 782 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 783 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask)); 784 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", 785 p_aeu->bit_name); 786 787 out: 788 return rc; 789 } 790 791 /** 792 * @brief qed_int_deassertion_parity - handle a single parity AEU source 793 * 794 * @param p_hwfn 795 * @param p_aeu - descriptor of an AEU bit which caused the parity 796 * @param bit_index 797 */ 798 static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, 799 struct aeu_invert_reg_bit *p_aeu, 800 u8 bit_index) 801 { 802 u32 block_id = p_aeu->block_index; 803 804 DP_INFO(p_hwfn->cdev, "%s[%d] parity attention is set\n", 805 p_aeu->bit_name, bit_index); 806 807 if (block_id != MAX_BLOCK_ID) { 808 qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 809 810 /* In BB, there's a single parity bit for several blocks */ 811 if (block_id == BLOCK_BTB) { 812 qed_int_attn_print(p_hwfn, BLOCK_OPTE, 813 ATTN_TYPE_PARITY, false); 814 qed_int_attn_print(p_hwfn, BLOCK_MCP, 815 ATTN_TYPE_PARITY, false); 816 } 817 } 818 } 819 820 /** 821 * @brief - handles deassertion of previously asserted attentions. 822 * 823 * @param p_hwfn 824 * @param deasserted_bits - newly deasserted bits 825 * @return int 826 * 827 */ 828 static int qed_int_deassertion(struct qed_hwfn *p_hwfn, 829 u16 deasserted_bits) 830 { 831 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 832 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask; 833 u8 i, j, k, bit_idx; 834 int rc = 0; 835 836 /* Read the attention registers in the AEU */ 837 for (i = 0; i < NUM_ATTN_REGS; i++) { 838 aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 839 MISC_REG_AEU_AFTER_INVERT_1_IGU + 840 i * 0x4); 841 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 842 "Deasserted bits [%d]: %08x\n", 843 i, aeu_inv_arr[i]); 844 } 845 846 /* Find parity attentions first */ 847 for (i = 0; i < NUM_ATTN_REGS; i++) { 848 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 849 u32 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 850 MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 851 i * sizeof(u32)); 852 u32 parities; 853 854 /* Skip register in which no parity bit is currently set */ 855 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 856 if (!parities) 857 continue; 858 859 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 860 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 861 862 if (qed_int_is_parity_flag(p_hwfn, p_bit) && 863 !!(parities & BIT(bit_idx))) 864 qed_int_deassertion_parity(p_hwfn, p_bit, 865 bit_idx); 866 867 bit_idx += ATTENTION_LENGTH(p_bit->flags); 868 } 869 } 870 871 /* Find non-parity cause for attention and act */ 872 for (k = 0; k < MAX_ATTN_GRPS; k++) { 873 struct aeu_invert_reg_bit *p_aeu; 874 875 /* Handle only groups whose attention is currently deasserted */ 876 if (!(deasserted_bits & (1 << k))) 877 continue; 878 879 for (i = 0; i < NUM_ATTN_REGS; i++) { 880 u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 881 i * sizeof(u32) + 882 k * sizeof(u32) * NUM_ATTN_REGS; 883 u32 en, bits; 884 885 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 886 bits = aeu_inv_arr[i] & en; 887 888 /* Skip if no bit from this group is currently set */ 889 if (!bits) 890 continue; 891 892 /* Find all set bits from current register which belong 893 * to current group, making them responsible for the 894 * previous assertion. 895 */ 896 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 897 u8 bit, bit_len; 898 u32 bitmask; 899 900 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 901 p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu); 902 903 bit = bit_idx; 904 bit_len = ATTENTION_LENGTH(p_aeu->flags); 905 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) { 906 /* Skip Parity */ 907 bit++; 908 bit_len--; 909 } 910 911 bitmask = bits & (((1 << bit_len) - 1) << bit); 912 if (bitmask) { 913 /* Handle source of the attention */ 914 qed_int_deassertion_aeu_bit(p_hwfn, 915 p_aeu, 916 aeu_en, 917 bitmask); 918 } 919 920 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 921 } 922 } 923 } 924 925 /* Clear IGU indication for the deasserted bits */ 926 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 927 GTT_BAR0_MAP_REG_IGU_CMD + 928 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 929 IGU_CMD_INT_ACK_BASE) << 3), 930 ~((u32)deasserted_bits)); 931 932 /* Unmask deasserted attentions in IGU */ 933 aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); 934 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 935 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 936 937 /* Clear deassertion from inner state */ 938 sb_attn_sw->known_attn &= ~deasserted_bits; 939 940 return rc; 941 } 942 943 static int qed_int_attentions(struct qed_hwfn *p_hwfn) 944 { 945 struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 946 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 947 u32 attn_bits = 0, attn_acks = 0; 948 u16 asserted_bits, deasserted_bits; 949 __le16 index; 950 int rc = 0; 951 952 /* Read current attention bits/acks - safeguard against attentions 953 * by guaranting work on a synchronized timeframe 954 */ 955 do { 956 index = p_sb_attn->sb_index; 957 attn_bits = le32_to_cpu(p_sb_attn->atten_bits); 958 attn_acks = le32_to_cpu(p_sb_attn->atten_ack); 959 } while (index != p_sb_attn->sb_index); 960 p_sb_attn->sb_index = index; 961 962 /* Attention / Deassertion are meaningful (and in correct state) 963 * only when they differ and consistent with known state - deassertion 964 * when previous attention & current ack, and assertion when current 965 * attention with no previous attention 966 */ 967 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 968 ~p_sb_attn_sw->known_attn; 969 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 970 p_sb_attn_sw->known_attn; 971 972 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) { 973 DP_INFO(p_hwfn, 974 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 975 index, attn_bits, attn_acks, asserted_bits, 976 deasserted_bits, p_sb_attn_sw->known_attn); 977 } else if (asserted_bits == 0x100) { 978 DP_INFO(p_hwfn, "MFW indication via attention\n"); 979 } else { 980 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 981 "MFW indication [deassertion]\n"); 982 } 983 984 if (asserted_bits) { 985 rc = qed_int_assertion(p_hwfn, asserted_bits); 986 if (rc) 987 return rc; 988 } 989 990 if (deasserted_bits) 991 rc = qed_int_deassertion(p_hwfn, deasserted_bits); 992 993 return rc; 994 } 995 996 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, 997 void __iomem *igu_addr, u32 ack_cons) 998 { 999 struct igu_prod_cons_update igu_ack = { 0 }; 1000 1001 igu_ack.sb_id_and_flags = 1002 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1003 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1004 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1005 (IGU_SEG_ACCESS_ATTN << 1006 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1007 1008 DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags); 1009 1010 /* Both segments (interrupts & acks) are written to same place address; 1011 * Need to guarantee all commands will be received (in-order) by HW. 1012 */ 1013 mmiowb(); 1014 barrier(); 1015 } 1016 1017 void qed_int_sp_dpc(unsigned long hwfn_cookie) 1018 { 1019 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie; 1020 struct qed_pi_info *pi_info = NULL; 1021 struct qed_sb_attn_info *sb_attn; 1022 struct qed_sb_info *sb_info; 1023 int arr_size; 1024 u16 rc = 0; 1025 1026 if (!p_hwfn->p_sp_sb) { 1027 DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); 1028 return; 1029 } 1030 1031 sb_info = &p_hwfn->p_sp_sb->sb_info; 1032 arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1033 if (!sb_info) { 1034 DP_ERR(p_hwfn->cdev, 1035 "Status block is NULL - cannot ack interrupts\n"); 1036 return; 1037 } 1038 1039 if (!p_hwfn->p_sb_attn) { 1040 DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); 1041 return; 1042 } 1043 sb_attn = p_hwfn->p_sb_attn; 1044 1045 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1046 p_hwfn, p_hwfn->my_id); 1047 1048 /* Disable ack for def status block. Required both for msix + 1049 * inta in non-mask mode, in inta does no harm. 1050 */ 1051 qed_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1052 1053 /* Gather Interrupts/Attentions information */ 1054 if (!sb_info->sb_virt) { 1055 DP_ERR(p_hwfn->cdev, 1056 "Interrupt Status block is NULL - cannot check for new interrupts!\n"); 1057 } else { 1058 u32 tmp_index = sb_info->sb_ack; 1059 1060 rc = qed_sb_update_sb_idx(sb_info); 1061 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 1062 "Interrupt indices: 0x%08x --> 0x%08x\n", 1063 tmp_index, sb_info->sb_ack); 1064 } 1065 1066 if (!sb_attn || !sb_attn->sb_attn) { 1067 DP_ERR(p_hwfn->cdev, 1068 "Attentions Status block is NULL - cannot check for new attentions!\n"); 1069 } else { 1070 u16 tmp_index = sb_attn->index; 1071 1072 rc |= qed_attn_update_idx(p_hwfn, sb_attn); 1073 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 1074 "Attention indices: 0x%08x --> 0x%08x\n", 1075 tmp_index, sb_attn->index); 1076 } 1077 1078 /* Check if we expect interrupts at this time. if not just ack them */ 1079 if (!(rc & QED_SB_EVENT_MASK)) { 1080 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1081 return; 1082 } 1083 1084 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1085 if (!p_hwfn->p_dpc_ptt) { 1086 DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); 1087 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1088 return; 1089 } 1090 1091 if (rc & QED_SB_ATT_IDX) 1092 qed_int_attentions(p_hwfn); 1093 1094 if (rc & QED_SB_IDX) { 1095 int pi; 1096 1097 /* Look for a free index */ 1098 for (pi = 0; pi < arr_size; pi++) { 1099 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1100 if (pi_info->comp_cb) 1101 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1102 } 1103 } 1104 1105 if (sb_attn && (rc & QED_SB_ATT_IDX)) 1106 /* This should be done before the interrupts are enabled, 1107 * since otherwise a new attention will be generated. 1108 */ 1109 qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1110 1111 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1112 } 1113 1114 static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) 1115 { 1116 struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1117 1118 if (!p_sb) 1119 return; 1120 1121 if (p_sb->sb_attn) 1122 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1123 SB_ATTN_ALIGNED_SIZE(p_hwfn), 1124 p_sb->sb_attn, p_sb->sb_phys); 1125 kfree(p_sb); 1126 p_hwfn->p_sb_attn = NULL; 1127 } 1128 1129 static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, 1130 struct qed_ptt *p_ptt) 1131 { 1132 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1133 1134 memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1135 1136 sb_info->index = 0; 1137 sb_info->known_attn = 0; 1138 1139 /* Configure Attention Status Block in IGU */ 1140 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1141 lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); 1142 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1143 upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); 1144 } 1145 1146 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, 1147 struct qed_ptt *p_ptt, 1148 void *sb_virt_addr, dma_addr_t sb_phy_addr) 1149 { 1150 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1151 int i, j, k; 1152 1153 sb_info->sb_attn = sb_virt_addr; 1154 sb_info->sb_phys = sb_phy_addr; 1155 1156 /* Set the pointer to the AEU descriptors */ 1157 sb_info->p_aeu_desc = aeu_descs; 1158 1159 /* Calculate Parity Masks */ 1160 memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1161 for (i = 0; i < NUM_ATTN_REGS; i++) { 1162 /* j is array index, k is bit index */ 1163 for (j = 0, k = 0; k < 32; j++) { 1164 struct aeu_invert_reg_bit *p_aeu; 1165 1166 p_aeu = &aeu_descs[i].bits[j]; 1167 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) 1168 sb_info->parity_mask[i] |= 1 << k; 1169 1170 k += ATTENTION_LENGTH(p_aeu->flags); 1171 } 1172 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1173 "Attn Mask [Reg %d]: 0x%08x\n", 1174 i, sb_info->parity_mask[i]); 1175 } 1176 1177 /* Set the address of cleanup for the mcp attention */ 1178 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1179 MISC_REG_AEU_GENERAL_ATTN_0; 1180 1181 qed_int_sb_attn_setup(p_hwfn, p_ptt); 1182 } 1183 1184 static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, 1185 struct qed_ptt *p_ptt) 1186 { 1187 struct qed_dev *cdev = p_hwfn->cdev; 1188 struct qed_sb_attn_info *p_sb; 1189 dma_addr_t p_phys = 0; 1190 void *p_virt; 1191 1192 /* SB struct */ 1193 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 1194 if (!p_sb) 1195 return -ENOMEM; 1196 1197 /* SB ring */ 1198 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 1199 SB_ATTN_ALIGNED_SIZE(p_hwfn), 1200 &p_phys, GFP_KERNEL); 1201 1202 if (!p_virt) { 1203 kfree(p_sb); 1204 return -ENOMEM; 1205 } 1206 1207 /* Attention setup */ 1208 p_hwfn->p_sb_attn = p_sb; 1209 qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1210 1211 return 0; 1212 } 1213 1214 /* coalescing timeout = timeset << (timer_res + 1) */ 1215 #define QED_CAU_DEF_RX_USECS 24 1216 #define QED_CAU_DEF_TX_USECS 48 1217 1218 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, 1219 struct cau_sb_entry *p_sb_entry, 1220 u8 pf_id, u16 vf_number, u8 vf_valid) 1221 { 1222 struct qed_dev *cdev = p_hwfn->cdev; 1223 u32 cau_state; 1224 u8 timer_res; 1225 1226 memset(p_sb_entry, 0, sizeof(*p_sb_entry)); 1227 1228 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1229 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1230 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1231 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1232 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1233 1234 cau_state = CAU_HC_DISABLE_STATE; 1235 1236 if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 1237 cau_state = CAU_HC_ENABLE_STATE; 1238 if (!cdev->rx_coalesce_usecs) 1239 cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS; 1240 if (!cdev->tx_coalesce_usecs) 1241 cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; 1242 } 1243 1244 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1245 if (cdev->rx_coalesce_usecs <= 0x7F) 1246 timer_res = 0; 1247 else if (cdev->rx_coalesce_usecs <= 0xFF) 1248 timer_res = 1; 1249 else 1250 timer_res = 2; 1251 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1252 1253 if (cdev->tx_coalesce_usecs <= 0x7F) 1254 timer_res = 0; 1255 else if (cdev->tx_coalesce_usecs <= 0xFF) 1256 timer_res = 1; 1257 else 1258 timer_res = 2; 1259 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1260 1261 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1262 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1263 } 1264 1265 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, 1266 struct qed_ptt *p_ptt, 1267 dma_addr_t sb_phys, 1268 u16 igu_sb_id, u16 vf_number, u8 vf_valid) 1269 { 1270 struct cau_sb_entry sb_entry; 1271 1272 qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1273 vf_number, vf_valid); 1274 1275 if (p_hwfn->hw_init_done) { 1276 /* Wide-bus, initialize via DMAE */ 1277 u64 phys_addr = (u64)sb_phys; 1278 1279 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr, 1280 CAU_REG_SB_ADDR_MEMORY + 1281 igu_sb_id * sizeof(u64), 2, 0); 1282 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, 1283 CAU_REG_SB_VAR_MEMORY + 1284 igu_sb_id * sizeof(u64), 2, 0); 1285 } else { 1286 /* Initialize Status Block Address */ 1287 STORE_RT_REG_AGG(p_hwfn, 1288 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 1289 igu_sb_id * 2, 1290 sb_phys); 1291 1292 STORE_RT_REG_AGG(p_hwfn, 1293 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 1294 igu_sb_id * 2, 1295 sb_entry); 1296 } 1297 1298 /* Configure pi coalescing if set */ 1299 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 1300 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1301 u8 timeset, timer_res; 1302 u8 i; 1303 1304 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1305 if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F) 1306 timer_res = 0; 1307 else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF) 1308 timer_res = 1; 1309 else 1310 timer_res = 2; 1311 timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res); 1312 qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1313 QED_COAL_RX_STATE_MACHINE, timeset); 1314 1315 if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F) 1316 timer_res = 0; 1317 else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF) 1318 timer_res = 1; 1319 else 1320 timer_res = 2; 1321 timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res); 1322 for (i = 0; i < num_tc; i++) { 1323 qed_int_cau_conf_pi(p_hwfn, p_ptt, 1324 igu_sb_id, TX_PI(i), 1325 QED_COAL_TX_STATE_MACHINE, 1326 timeset); 1327 } 1328 } 1329 } 1330 1331 void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, 1332 struct qed_ptt *p_ptt, 1333 u16 igu_sb_id, 1334 u32 pi_index, 1335 enum qed_coalescing_fsm coalescing_fsm, 1336 u8 timeset) 1337 { 1338 struct cau_pi_entry pi_entry; 1339 u32 sb_offset, pi_offset; 1340 1341 if (IS_VF(p_hwfn->cdev)) 1342 return; 1343 1344 sb_offset = igu_sb_id * PIS_PER_SB; 1345 memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1346 1347 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1348 if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) 1349 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1350 else 1351 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1352 1353 pi_offset = sb_offset + pi_index; 1354 if (p_hwfn->hw_init_done) { 1355 qed_wr(p_hwfn, p_ptt, 1356 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1357 *((u32 *)&(pi_entry))); 1358 } else { 1359 STORE_RT_REG(p_hwfn, 1360 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1361 *((u32 *)&(pi_entry))); 1362 } 1363 } 1364 1365 void qed_int_sb_setup(struct qed_hwfn *p_hwfn, 1366 struct qed_ptt *p_ptt, struct qed_sb_info *sb_info) 1367 { 1368 /* zero status block and ack counter */ 1369 sb_info->sb_ack = 0; 1370 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1371 1372 if (IS_PF(p_hwfn->cdev)) 1373 qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1374 sb_info->igu_sb_id, 0, 0); 1375 } 1376 1377 /** 1378 * @brief qed_get_igu_sb_id - given a sw sb_id return the 1379 * igu_sb_id 1380 * 1381 * @param p_hwfn 1382 * @param sb_id 1383 * 1384 * @return u16 1385 */ 1386 static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 1387 { 1388 u16 igu_sb_id; 1389 1390 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1391 if (sb_id == QED_SP_SB_ID) 1392 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1393 else if (IS_PF(p_hwfn->cdev)) 1394 igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; 1395 else 1396 igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); 1397 1398 if (sb_id == QED_SP_SB_ID) 1399 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1400 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1401 else 1402 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1403 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1404 1405 return igu_sb_id; 1406 } 1407 1408 int qed_int_sb_init(struct qed_hwfn *p_hwfn, 1409 struct qed_ptt *p_ptt, 1410 struct qed_sb_info *sb_info, 1411 void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id) 1412 { 1413 sb_info->sb_virt = sb_virt_addr; 1414 sb_info->sb_phys = sb_phy_addr; 1415 1416 sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); 1417 1418 if (sb_id != QED_SP_SB_ID) { 1419 p_hwfn->sbs_info[sb_id] = sb_info; 1420 p_hwfn->num_sbs++; 1421 } 1422 1423 sb_info->cdev = p_hwfn->cdev; 1424 1425 /* The igu address will hold the absolute address that needs to be 1426 * written to for a specific status block 1427 */ 1428 if (IS_PF(p_hwfn->cdev)) { 1429 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 1430 GTT_BAR0_MAP_REG_IGU_CMD + 1431 (sb_info->igu_sb_id << 3); 1432 } else { 1433 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 1434 PXP_VF_BAR0_START_IGU + 1435 ((IGU_CMD_INT_ACK_BASE + 1436 sb_info->igu_sb_id) << 3); 1437 } 1438 1439 sb_info->flags |= QED_SB_INFO_INIT; 1440 1441 qed_int_sb_setup(p_hwfn, p_ptt, sb_info); 1442 1443 return 0; 1444 } 1445 1446 int qed_int_sb_release(struct qed_hwfn *p_hwfn, 1447 struct qed_sb_info *sb_info, u16 sb_id) 1448 { 1449 if (sb_id == QED_SP_SB_ID) { 1450 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1451 return -EINVAL; 1452 } 1453 1454 /* zero status block and ack counter */ 1455 sb_info->sb_ack = 0; 1456 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1457 1458 if (p_hwfn->sbs_info[sb_id] != NULL) { 1459 p_hwfn->sbs_info[sb_id] = NULL; 1460 p_hwfn->num_sbs--; 1461 } 1462 1463 return 0; 1464 } 1465 1466 static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) 1467 { 1468 struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1469 1470 if (!p_sb) 1471 return; 1472 1473 if (p_sb->sb_info.sb_virt) 1474 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1475 SB_ALIGNED_SIZE(p_hwfn), 1476 p_sb->sb_info.sb_virt, 1477 p_sb->sb_info.sb_phys); 1478 kfree(p_sb); 1479 p_hwfn->p_sp_sb = NULL; 1480 } 1481 1482 static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1483 { 1484 struct qed_sb_sp_info *p_sb; 1485 dma_addr_t p_phys = 0; 1486 void *p_virt; 1487 1488 /* SB struct */ 1489 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 1490 if (!p_sb) 1491 return -ENOMEM; 1492 1493 /* SB ring */ 1494 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1495 SB_ALIGNED_SIZE(p_hwfn), 1496 &p_phys, GFP_KERNEL); 1497 if (!p_virt) { 1498 kfree(p_sb); 1499 return -ENOMEM; 1500 } 1501 1502 /* Status Block setup */ 1503 p_hwfn->p_sp_sb = p_sb; 1504 qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt, 1505 p_phys, QED_SP_SB_ID); 1506 1507 memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1508 1509 return 0; 1510 } 1511 1512 int qed_int_register_cb(struct qed_hwfn *p_hwfn, 1513 qed_int_comp_cb_t comp_cb, 1514 void *cookie, u8 *sb_idx, __le16 **p_fw_cons) 1515 { 1516 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1517 int rc = -ENOMEM; 1518 u8 pi; 1519 1520 /* Look for a free index */ 1521 for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1522 if (p_sp_sb->pi_info_arr[pi].comp_cb) 1523 continue; 1524 1525 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1526 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1527 *sb_idx = pi; 1528 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1529 rc = 0; 1530 break; 1531 } 1532 1533 return rc; 1534 } 1535 1536 int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) 1537 { 1538 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1539 1540 if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL) 1541 return -ENOMEM; 1542 1543 p_sp_sb->pi_info_arr[pi].comp_cb = NULL; 1544 p_sp_sb->pi_info_arr[pi].cookie = NULL; 1545 1546 return 0; 1547 } 1548 1549 u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) 1550 { 1551 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1552 } 1553 1554 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, 1555 struct qed_ptt *p_ptt, enum qed_int_mode int_mode) 1556 { 1557 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1558 1559 p_hwfn->cdev->int_mode = int_mode; 1560 switch (p_hwfn->cdev->int_mode) { 1561 case QED_INT_MODE_INTA: 1562 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1563 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1564 break; 1565 1566 case QED_INT_MODE_MSI: 1567 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1568 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1569 break; 1570 1571 case QED_INT_MODE_MSIX: 1572 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1573 break; 1574 case QED_INT_MODE_POLL: 1575 break; 1576 } 1577 1578 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1579 } 1580 1581 int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1582 enum qed_int_mode int_mode) 1583 { 1584 int rc = 0; 1585 1586 /* Configure AEU signal change to produce attentions */ 1587 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1588 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1589 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1590 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1591 1592 /* Flush the writes to IGU */ 1593 mmiowb(); 1594 1595 /* Unmask AEU signals toward IGU */ 1596 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1597 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1598 rc = qed_slowpath_irq_req(p_hwfn); 1599 if (rc) { 1600 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); 1601 return -EINVAL; 1602 } 1603 p_hwfn->b_int_requested = true; 1604 } 1605 /* Enable interrupt Generation */ 1606 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1607 p_hwfn->b_int_enabled = 1; 1608 1609 return rc; 1610 } 1611 1612 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1613 { 1614 p_hwfn->b_int_enabled = 0; 1615 1616 if (IS_VF(p_hwfn->cdev)) 1617 return; 1618 1619 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1620 } 1621 1622 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1623 static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, 1624 struct qed_ptt *p_ptt, 1625 u32 sb_id, bool cleanup_set, u16 opaque_fid) 1626 { 1627 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1628 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id; 1629 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1630 1631 /* Set the data field */ 1632 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1633 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0); 1634 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1635 1636 /* Set the control register */ 1637 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1638 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1639 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 1640 1641 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 1642 1643 barrier(); 1644 1645 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 1646 1647 /* Flush the write to IGU */ 1648 mmiowb(); 1649 1650 /* calculate where to read the status bit from */ 1651 sb_bit = 1 << (sb_id % 32); 1652 sb_bit_addr = sb_id / 32 * sizeof(u32); 1653 1654 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; 1655 1656 /* Now wait for the command to complete */ 1657 do { 1658 val = qed_rd(p_hwfn, p_ptt, sb_bit_addr); 1659 1660 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 1661 break; 1662 1663 usleep_range(5000, 10000); 1664 } while (--sleep_cnt); 1665 1666 if (!sleep_cnt) 1667 DP_NOTICE(p_hwfn, 1668 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 1669 val, sb_id); 1670 } 1671 1672 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, 1673 struct qed_ptt *p_ptt, 1674 u32 sb_id, u16 opaque, bool b_set) 1675 { 1676 int pi, i; 1677 1678 /* Set */ 1679 if (b_set) 1680 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque); 1681 1682 /* Clear */ 1683 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque); 1684 1685 /* Wait for the IGU SB to cleanup */ 1686 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 1687 u32 val; 1688 1689 val = qed_rd(p_hwfn, p_ptt, 1690 IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4)); 1691 if (val & (1 << (sb_id % 32))) 1692 usleep_range(10, 20); 1693 else 1694 break; 1695 } 1696 if (i == IGU_CLEANUP_SLEEP_LENGTH) 1697 DP_NOTICE(p_hwfn, 1698 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 1699 sb_id); 1700 1701 /* Clear the CAU for the SB */ 1702 for (pi = 0; pi < 12; pi++) 1703 qed_wr(p_hwfn, p_ptt, 1704 CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0); 1705 } 1706 1707 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, 1708 struct qed_ptt *p_ptt, 1709 bool b_set, bool b_slowpath) 1710 { 1711 u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; 1712 u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; 1713 u32 sb_id = 0, val = 0; 1714 1715 val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 1716 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 1717 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 1718 qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 1719 1720 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1721 "IGU cleaning SBs [%d,...,%d]\n", 1722 igu_base_sb, igu_base_sb + igu_sb_cnt - 1); 1723 1724 for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++) 1725 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, 1726 p_hwfn->hw_info.opaque_fid, 1727 b_set); 1728 1729 if (!b_slowpath) 1730 return; 1731 1732 sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1733 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1734 "IGU cleaning slowpath SB [%d]\n", sb_id); 1735 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, 1736 p_hwfn->hw_info.opaque_fid, b_set); 1737 } 1738 1739 static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, 1740 struct qed_ptt *p_ptt, u16 sb_id) 1741 { 1742 u32 val = qed_rd(p_hwfn, p_ptt, 1743 IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id); 1744 struct qed_igu_block *p_block; 1745 1746 p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; 1747 1748 /* stop scanning when hit first invalid PF entry */ 1749 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && 1750 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) 1751 goto out; 1752 1753 /* Fill the block information */ 1754 p_block->status = QED_IGU_STATUS_VALID; 1755 p_block->function_id = GET_FIELD(val, 1756 IGU_MAPPING_LINE_FUNCTION_NUMBER); 1757 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 1758 p_block->vector_number = GET_FIELD(val, 1759 IGU_MAPPING_LINE_VECTOR_NUMBER); 1760 1761 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1762 "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n", 1763 sb_id, val, p_block->function_id, 1764 p_block->is_pf, p_block->vector_number); 1765 1766 out: 1767 return val; 1768 } 1769 1770 int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1771 { 1772 struct qed_igu_info *p_igu_info; 1773 u32 val, min_vf = 0, max_vf = 0; 1774 u16 sb_id, last_iov_sb_id = 0; 1775 struct qed_igu_block *blk; 1776 u16 prev_sb_id = 0xFF; 1777 1778 p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); 1779 if (!p_hwfn->hw_info.p_igu_info) 1780 return -ENOMEM; 1781 1782 p_igu_info = p_hwfn->hw_info.p_igu_info; 1783 1784 /* Initialize base sb / sb cnt for PFs and VFs */ 1785 p_igu_info->igu_base_sb = 0xffff; 1786 p_igu_info->igu_sb_cnt = 0; 1787 p_igu_info->igu_dsb_id = 0xffff; 1788 p_igu_info->igu_base_sb_iov = 0xffff; 1789 1790 if (p_hwfn->cdev->p_iov_info) { 1791 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 1792 1793 min_vf = p_iov->first_vf_in_pf; 1794 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 1795 } 1796 1797 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 1798 sb_id++) { 1799 blk = &p_igu_info->igu_map.igu_blocks[sb_id]; 1800 1801 val = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id); 1802 1803 /* stop scanning when hit first invalid PF entry */ 1804 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && 1805 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) 1806 break; 1807 1808 if (blk->is_pf) { 1809 if (blk->function_id == p_hwfn->rel_pf_id) { 1810 blk->status |= QED_IGU_STATUS_PF; 1811 1812 if (blk->vector_number == 0) { 1813 if (p_igu_info->igu_dsb_id == 0xffff) 1814 p_igu_info->igu_dsb_id = sb_id; 1815 } else { 1816 if (p_igu_info->igu_base_sb == 1817 0xffff) { 1818 p_igu_info->igu_base_sb = sb_id; 1819 } else if (prev_sb_id != sb_id - 1) { 1820 DP_NOTICE(p_hwfn->cdev, 1821 "consecutive igu vectors for HWFN %x broken", 1822 p_hwfn->rel_pf_id); 1823 break; 1824 } 1825 prev_sb_id = sb_id; 1826 /* we don't count the default */ 1827 (p_igu_info->igu_sb_cnt)++; 1828 } 1829 } 1830 } else { 1831 if ((blk->function_id >= min_vf) && 1832 (blk->function_id < max_vf)) { 1833 /* Available for VFs of this PF */ 1834 if (p_igu_info->igu_base_sb_iov == 0xffff) { 1835 p_igu_info->igu_base_sb_iov = sb_id; 1836 } else if (last_iov_sb_id != sb_id - 1) { 1837 if (!val) { 1838 DP_VERBOSE(p_hwfn->cdev, 1839 NETIF_MSG_INTR, 1840 "First uninitialized IGU CAM entry at index 0x%04x\n", 1841 sb_id); 1842 } else { 1843 DP_NOTICE(p_hwfn->cdev, 1844 "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n", 1845 p_hwfn->rel_pf_id, 1846 last_iov_sb_id, 1847 sb_id); } 1848 break; 1849 } 1850 blk->status |= QED_IGU_STATUS_FREE; 1851 p_hwfn->hw_info.p_igu_info->free_blks++; 1852 last_iov_sb_id = sb_id; 1853 } 1854 } 1855 } 1856 1857 /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect 1858 * the number of VF SBs [especially for first VF on engine, as we can't 1859 * differentiate between empty entries and its entries]. 1860 * Since we don't really support more SBs than VFs today, prevent any 1861 * such configuration by sanitizing the number of SBs to equal the 1862 * number of VFs. 1863 */ 1864 if (IS_PF_SRIOV(p_hwfn)) { 1865 u16 total_vfs = p_hwfn->cdev->p_iov_info->total_vfs; 1866 1867 if (total_vfs < p_igu_info->free_blks) { 1868 DP_VERBOSE(p_hwfn, 1869 (NETIF_MSG_INTR | QED_MSG_IOV), 1870 "Limiting number of SBs for IOV - %04x --> %04x\n", 1871 p_igu_info->free_blks, 1872 p_hwfn->cdev->p_iov_info->total_vfs); 1873 p_igu_info->free_blks = total_vfs; 1874 } else if (total_vfs > p_igu_info->free_blks) { 1875 DP_NOTICE(p_hwfn, 1876 "IGU has only %04x SBs for VFs while the device has %04x VFs\n", 1877 p_igu_info->free_blks, total_vfs); 1878 return -EINVAL; 1879 } 1880 } 1881 p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks; 1882 1883 DP_VERBOSE( 1884 p_hwfn, 1885 NETIF_MSG_INTR, 1886 "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n", 1887 p_igu_info->igu_base_sb, 1888 p_igu_info->igu_base_sb_iov, 1889 p_igu_info->igu_sb_cnt, 1890 p_igu_info->igu_sb_cnt_iov, 1891 p_igu_info->igu_dsb_id); 1892 1893 if (p_igu_info->igu_base_sb == 0xffff || 1894 p_igu_info->igu_dsb_id == 0xffff || 1895 p_igu_info->igu_sb_cnt == 0) { 1896 DP_NOTICE(p_hwfn, 1897 "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", 1898 p_igu_info->igu_base_sb, 1899 p_igu_info->igu_sb_cnt, 1900 p_igu_info->igu_dsb_id); 1901 return -EINVAL; 1902 } 1903 1904 return 0; 1905 } 1906 1907 /** 1908 * @brief Initialize igu runtime registers 1909 * 1910 * @param p_hwfn 1911 */ 1912 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) 1913 { 1914 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 1915 1916 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 1917 } 1918 1919 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) 1920 { 1921 u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - 1922 IGU_CMD_INT_ACK_BASE; 1923 u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - 1924 IGU_CMD_INT_ACK_BASE; 1925 u32 intr_status_hi = 0, intr_status_lo = 0; 1926 u64 intr_status = 0; 1927 1928 intr_status_lo = REG_RD(p_hwfn, 1929 GTT_BAR0_MAP_REG_IGU_CMD + 1930 lsb_igu_cmd_addr * 8); 1931 intr_status_hi = REG_RD(p_hwfn, 1932 GTT_BAR0_MAP_REG_IGU_CMD + 1933 msb_igu_cmd_addr * 8); 1934 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 1935 1936 return intr_status; 1937 } 1938 1939 static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) 1940 { 1941 tasklet_init(p_hwfn->sp_dpc, 1942 qed_int_sp_dpc, (unsigned long)p_hwfn); 1943 p_hwfn->b_sp_dpc_enabled = true; 1944 } 1945 1946 static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) 1947 { 1948 p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL); 1949 if (!p_hwfn->sp_dpc) 1950 return -ENOMEM; 1951 1952 return 0; 1953 } 1954 1955 static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) 1956 { 1957 kfree(p_hwfn->sp_dpc); 1958 p_hwfn->sp_dpc = NULL; 1959 } 1960 1961 int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1962 { 1963 int rc = 0; 1964 1965 rc = qed_int_sp_dpc_alloc(p_hwfn); 1966 if (rc) 1967 return rc; 1968 1969 rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt); 1970 if (rc) 1971 return rc; 1972 1973 rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); 1974 1975 return rc; 1976 } 1977 1978 void qed_int_free(struct qed_hwfn *p_hwfn) 1979 { 1980 qed_int_sp_sb_free(p_hwfn); 1981 qed_int_sb_attn_free(p_hwfn); 1982 qed_int_sp_dpc_free(p_hwfn); 1983 } 1984 1985 void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1986 { 1987 qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 1988 qed_int_sb_attn_setup(p_hwfn, p_ptt); 1989 qed_int_sp_dpc_setup(p_hwfn); 1990 } 1991 1992 void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, 1993 struct qed_sb_cnt_info *p_sb_cnt_info) 1994 { 1995 struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; 1996 1997 if (!info || !p_sb_cnt_info) 1998 return; 1999 2000 p_sb_cnt_info->sb_cnt = info->igu_sb_cnt; 2001 p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov; 2002 p_sb_cnt_info->sb_free_blk = info->free_blks; 2003 } 2004 2005 u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 2006 { 2007 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2008 2009 /* Determine origin of SB id */ 2010 if ((sb_id >= p_info->igu_base_sb) && 2011 (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) { 2012 return sb_id - p_info->igu_base_sb; 2013 } else if ((sb_id >= p_info->igu_base_sb_iov) && 2014 (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) { 2015 /* We want the first VF queue to be adjacent to the 2016 * last PF queue. Since L2 queues can be partial to 2017 * SBs, we'll use the feature instead. 2018 */ 2019 return sb_id - p_info->igu_base_sb_iov + 2020 FEAT_NUM(p_hwfn, QED_PF_L2_QUE); 2021 } else { 2022 DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id); 2023 return 0; 2024 } 2025 } 2026 2027 void qed_int_disable_post_isr_release(struct qed_dev *cdev) 2028 { 2029 int i; 2030 2031 for_each_hwfn(cdev, i) 2032 cdev->hwfns[i].b_int_requested = false; 2033 } 2034 2035 int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 2036 u8 timer_res, u16 sb_id, bool tx) 2037 { 2038 struct cau_sb_entry sb_entry; 2039 int rc; 2040 2041 if (!p_hwfn->hw_init_done) { 2042 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2043 return -EINVAL; 2044 } 2045 2046 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2047 sb_id * sizeof(u64), 2048 (u64)(uintptr_t)&sb_entry, 2, 0); 2049 if (rc) { 2050 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2051 return rc; 2052 } 2053 2054 if (tx) 2055 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2056 else 2057 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2058 2059 rc = qed_dmae_host2grc(p_hwfn, p_ptt, 2060 (u64)(uintptr_t)&sb_entry, 2061 CAU_REG_SB_VAR_MEMORY + 2062 sb_id * sizeof(u64), 2, 0); 2063 if (rc) { 2064 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2065 return rc; 2066 } 2067 2068 return rc; 2069 } 2070