1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/types.h> 10 #include <asm/byteorder.h> 11 #include <linux/delay.h> 12 #include <linux/errno.h> 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/spinlock.h> 16 #include <linux/string.h> 17 #include "qed.h" 18 #include "qed_dcbx.h" 19 #include "qed_hsi.h" 20 #include "qed_hw.h" 21 #include "qed_mcp.h" 22 #include "qed_reg_addr.h" 23 #include "qed_sriov.h" 24 25 #define CHIP_MCP_RESP_ITER_US 10 26 27 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ 28 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ 29 30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ 31 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ 32 _val) 33 34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ 35 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) 36 37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ 38 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ 39 offsetof(struct public_drv_mb, _field), _val) 40 41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ 42 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ 43 offsetof(struct public_drv_mb, _field)) 44 45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ 46 DRV_ID_PDA_COMP_VER_SHIFT) 47 48 #define MCP_BYTES_PER_MBIT_SHIFT 17 49 50 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn) 51 { 52 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) 53 return false; 54 return true; 55 } 56 57 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 58 { 59 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 60 PUBLIC_PORT); 61 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr); 62 63 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, 64 MFW_PORT(p_hwfn)); 65 DP_VERBOSE(p_hwfn, QED_MSG_SP, 66 "port_addr = 0x%x, port_id 0x%02x\n", 67 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); 68 } 69 70 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 71 { 72 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); 73 u32 tmp, i; 74 75 if (!p_hwfn->mcp_info->public_base) 76 return; 77 78 for (i = 0; i < length; i++) { 79 tmp = qed_rd(p_hwfn, p_ptt, 80 p_hwfn->mcp_info->mfw_mb_addr + 81 (i << 2) + sizeof(u32)); 82 83 /* The MB data is actually BE; Need to force it to cpu */ 84 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = 85 be32_to_cpu((__force __be32)tmp); 86 } 87 } 88 89 int qed_mcp_free(struct qed_hwfn *p_hwfn) 90 { 91 if (p_hwfn->mcp_info) { 92 kfree(p_hwfn->mcp_info->mfw_mb_cur); 93 kfree(p_hwfn->mcp_info->mfw_mb_shadow); 94 } 95 kfree(p_hwfn->mcp_info); 96 97 return 0; 98 } 99 100 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 101 { 102 struct qed_mcp_info *p_info = p_hwfn->mcp_info; 103 u32 drv_mb_offsize, mfw_mb_offsize; 104 u32 mcp_pf_id = MCP_PF_ID(p_hwfn); 105 106 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); 107 if (!p_info->public_base) 108 return 0; 109 110 p_info->public_base |= GRCBASE_MCP; 111 112 /* Calculate the driver and MFW mailbox address */ 113 drv_mb_offsize = qed_rd(p_hwfn, p_ptt, 114 SECTION_OFFSIZE_ADDR(p_info->public_base, 115 PUBLIC_DRV_MB)); 116 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); 117 DP_VERBOSE(p_hwfn, QED_MSG_SP, 118 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", 119 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); 120 121 /* Set the MFW MB address */ 122 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, 123 SECTION_OFFSIZE_ADDR(p_info->public_base, 124 PUBLIC_MFW_MB)); 125 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); 126 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr); 127 128 /* Get the current driver mailbox sequence before sending 129 * the first command 130 */ 131 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & 132 DRV_MSG_SEQ_NUMBER_MASK; 133 134 /* Get current FW pulse sequence */ 135 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & 136 DRV_PULSE_SEQ_MASK; 137 138 p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 139 140 return 0; 141 } 142 143 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 144 { 145 struct qed_mcp_info *p_info; 146 u32 size; 147 148 /* Allocate mcp_info structure */ 149 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL); 150 if (!p_hwfn->mcp_info) 151 goto err; 152 p_info = p_hwfn->mcp_info; 153 154 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) { 155 DP_NOTICE(p_hwfn, "MCP is not initialized\n"); 156 /* Do not free mcp_info here, since public_base indicate that 157 * the MCP is not initialized 158 */ 159 return 0; 160 } 161 162 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 163 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); 164 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); 165 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) 166 goto err; 167 168 /* Initialize the MFW spinlock */ 169 spin_lock_init(&p_info->lock); 170 171 return 0; 172 173 err: 174 qed_mcp_free(p_hwfn); 175 return -ENOMEM; 176 } 177 178 /* Locks the MFW mailbox of a PF to ensure a single access. 179 * The lock is achieved in most cases by holding a spinlock, causing other 180 * threads to wait till a previous access is done. 181 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single 182 * access is achieved by setting a blocking flag, which will fail other 183 * competing contexts to send their mailboxes. 184 */ 185 static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd) 186 { 187 spin_lock_bh(&p_hwfn->mcp_info->lock); 188 189 /* The spinlock shouldn't be acquired when the mailbox command is 190 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel 191 * pending [UN]LOAD_REQ command of another PF together with a spinlock 192 * (i.e. interrupts are disabled) - can lead to a deadlock. 193 * It is assumed that for a single PF, no other mailbox commands can be 194 * sent from another context while sending LOAD_REQ, and that any 195 * parallel commands to UNLOAD_REQ can be cancelled. 196 */ 197 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE) 198 p_hwfn->mcp_info->block_mb_sending = false; 199 200 if (p_hwfn->mcp_info->block_mb_sending) { 201 DP_NOTICE(p_hwfn, 202 "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n", 203 cmd); 204 spin_unlock_bh(&p_hwfn->mcp_info->lock); 205 return -EBUSY; 206 } 207 208 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) { 209 p_hwfn->mcp_info->block_mb_sending = true; 210 spin_unlock_bh(&p_hwfn->mcp_info->lock); 211 } 212 213 return 0; 214 } 215 216 static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd) 217 { 218 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ) 219 spin_unlock_bh(&p_hwfn->mcp_info->lock); 220 } 221 222 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 223 { 224 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq; 225 u8 delay = CHIP_MCP_RESP_ITER_US; 226 u32 org_mcp_reset_seq, cnt = 0; 227 int rc = 0; 228 229 /* Ensure that only a single thread is accessing the mailbox at a 230 * certain time. 231 */ 232 rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET); 233 if (rc != 0) 234 return rc; 235 236 /* Set drv command along with the updated sequence */ 237 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 238 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, 239 (DRV_MSG_CODE_MCP_RESET | seq)); 240 241 do { 242 /* Wait for MFW response */ 243 udelay(delay); 244 /* Give the FW up to 500 second (50*1000*10usec) */ 245 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt, 246 MISCS_REG_GENERIC_POR_0)) && 247 (cnt++ < QED_MCP_RESET_RETRIES)); 248 249 if (org_mcp_reset_seq != 250 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { 251 DP_VERBOSE(p_hwfn, QED_MSG_SP, 252 "MCP was reset after %d usec\n", cnt * delay); 253 } else { 254 DP_ERR(p_hwfn, "Failed to reset MCP\n"); 255 rc = -EAGAIN; 256 } 257 258 qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET); 259 260 return rc; 261 } 262 263 static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn, 264 struct qed_ptt *p_ptt, 265 u32 cmd, 266 u32 param, 267 u32 *o_mcp_resp, 268 u32 *o_mcp_param) 269 { 270 u8 delay = CHIP_MCP_RESP_ITER_US; 271 u32 seq, cnt = 1, actual_mb_seq; 272 int rc = 0; 273 274 /* Get actual driver mailbox sequence */ 275 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & 276 DRV_MSG_SEQ_NUMBER_MASK; 277 278 /* Use MCP history register to check if MCP reset occurred between 279 * init time and now. 280 */ 281 if (p_hwfn->mcp_info->mcp_hist != 282 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { 283 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n"); 284 qed_load_mcp_offsets(p_hwfn, p_ptt); 285 qed_mcp_cmd_port_init(p_hwfn, p_ptt); 286 } 287 seq = ++p_hwfn->mcp_info->drv_mb_seq; 288 289 /* Set drv param */ 290 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param); 291 292 /* Set drv command along with the updated sequence */ 293 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq)); 294 295 DP_VERBOSE(p_hwfn, QED_MSG_SP, 296 "wrote command (%x) to MFW MB param 0x%08x\n", 297 (cmd | seq), param); 298 299 do { 300 /* Wait for MFW response */ 301 udelay(delay); 302 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); 303 304 /* Give the FW up to 5 second (500*10ms) */ 305 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) && 306 (cnt++ < QED_DRV_MB_MAX_RETRIES)); 307 308 DP_VERBOSE(p_hwfn, QED_MSG_SP, 309 "[after %d ms] read (%x) seq is (%x) from FW MB\n", 310 cnt * delay, *o_mcp_resp, seq); 311 312 /* Is this a reply to our command? */ 313 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) { 314 *o_mcp_resp &= FW_MSG_CODE_MASK; 315 /* Get the MCP param */ 316 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); 317 } else { 318 /* FW BUG! */ 319 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n", 320 cmd, param); 321 *o_mcp_resp = 0; 322 rc = -EAGAIN; 323 } 324 return rc; 325 } 326 327 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, 328 struct qed_ptt *p_ptt, 329 struct qed_mcp_mb_params *p_mb_params) 330 { 331 u32 union_data_addr; 332 int rc; 333 334 /* MCP not initialized */ 335 if (!qed_mcp_is_init(p_hwfn)) { 336 DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); 337 return -EBUSY; 338 } 339 340 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 341 offsetof(struct public_drv_mb, union_data); 342 343 /* Ensure that only a single thread is accessing the mailbox at a 344 * certain time. 345 */ 346 rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd); 347 if (rc) 348 return rc; 349 350 if (p_mb_params->p_data_src != NULL) 351 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, 352 p_mb_params->p_data_src, 353 sizeof(*p_mb_params->p_data_src)); 354 355 rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd, 356 p_mb_params->param, &p_mb_params->mcp_resp, 357 &p_mb_params->mcp_param); 358 359 if (p_mb_params->p_data_dst != NULL) 360 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, 361 union_data_addr, 362 sizeof(*p_mb_params->p_data_dst)); 363 364 qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd); 365 366 return rc; 367 } 368 369 int qed_mcp_cmd(struct qed_hwfn *p_hwfn, 370 struct qed_ptt *p_ptt, 371 u32 cmd, 372 u32 param, 373 u32 *o_mcp_resp, 374 u32 *o_mcp_param) 375 { 376 struct qed_mcp_mb_params mb_params; 377 int rc; 378 379 memset(&mb_params, 0, sizeof(mb_params)); 380 mb_params.cmd = cmd; 381 mb_params.param = param; 382 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 383 if (rc) 384 return rc; 385 386 *o_mcp_resp = mb_params.mcp_resp; 387 *o_mcp_param = mb_params.mcp_param; 388 389 return 0; 390 } 391 392 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, 393 struct qed_ptt *p_ptt, 394 u32 cmd, 395 u32 param, 396 u32 *o_mcp_resp, 397 u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf) 398 { 399 struct qed_mcp_mb_params mb_params; 400 union drv_union_data union_data; 401 int rc; 402 403 memset(&mb_params, 0, sizeof(mb_params)); 404 mb_params.cmd = cmd; 405 mb_params.param = param; 406 mb_params.p_data_dst = &union_data; 407 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 408 if (rc) 409 return rc; 410 411 *o_mcp_resp = mb_params.mcp_resp; 412 *o_mcp_param = mb_params.mcp_param; 413 414 *o_txn_size = *o_mcp_param; 415 memcpy(o_buf, &union_data.raw_data, *o_txn_size); 416 417 return 0; 418 } 419 420 int qed_mcp_load_req(struct qed_hwfn *p_hwfn, 421 struct qed_ptt *p_ptt, u32 *p_load_code) 422 { 423 struct qed_dev *cdev = p_hwfn->cdev; 424 struct qed_mcp_mb_params mb_params; 425 union drv_union_data union_data; 426 int rc; 427 428 memset(&mb_params, 0, sizeof(mb_params)); 429 /* Load Request */ 430 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; 431 mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT | 432 cdev->drv_type; 433 memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE); 434 mb_params.p_data_src = &union_data; 435 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 436 437 /* if mcp fails to respond we must abort */ 438 if (rc) { 439 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 440 return rc; 441 } 442 443 *p_load_code = mb_params.mcp_resp; 444 445 /* If MFW refused (e.g. other port is in diagnostic mode) we 446 * must abort. This can happen in the following cases: 447 * - Other port is in diagnostic mode 448 * - Previously loaded function on the engine is not compliant with 449 * the requester. 450 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION. 451 * - 452 */ 453 if (!(*p_load_code) || 454 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) || 455 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) || 456 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) { 457 DP_ERR(p_hwfn, "MCP refused load request, aborting\n"); 458 return -EBUSY; 459 } 460 461 return 0; 462 } 463 464 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn, 465 struct qed_ptt *p_ptt) 466 { 467 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 468 PUBLIC_PATH); 469 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); 470 u32 path_addr = SECTION_ADDR(mfw_path_offsize, 471 QED_PATH_ID(p_hwfn)); 472 u32 disabled_vfs[VF_MAX_STATIC / 32]; 473 int i; 474 475 DP_VERBOSE(p_hwfn, 476 QED_MSG_SP, 477 "Reading Disabled VF information from [offset %08x], path_addr %08x\n", 478 mfw_path_offsize, path_addr); 479 480 for (i = 0; i < (VF_MAX_STATIC / 32); i++) { 481 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt, 482 path_addr + 483 offsetof(struct public_path, 484 mcp_vf_disabled) + 485 sizeof(u32) * i); 486 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), 487 "FLR-ed VFs [%08x,...,%08x] - %08x\n", 488 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); 489 } 490 491 if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs)) 492 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG); 493 } 494 495 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, 496 struct qed_ptt *p_ptt, u32 *vfs_to_ack) 497 { 498 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 499 PUBLIC_FUNC); 500 u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr); 501 u32 func_addr = SECTION_ADDR(mfw_func_offsize, 502 MCP_PF_ID(p_hwfn)); 503 struct qed_mcp_mb_params mb_params; 504 union drv_union_data union_data; 505 int rc; 506 int i; 507 508 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 509 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), 510 "Acking VFs [%08x,...,%08x] - %08x\n", 511 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); 512 513 memset(&mb_params, 0, sizeof(mb_params)); 514 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; 515 memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8); 516 mb_params.p_data_src = &union_data; 517 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 518 if (rc) { 519 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n"); 520 return -EBUSY; 521 } 522 523 /* Clear the ACK bits */ 524 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 525 qed_wr(p_hwfn, p_ptt, 526 func_addr + 527 offsetof(struct public_func, drv_ack_vf_disabled) + 528 i * sizeof(u32), 0); 529 530 return rc; 531 } 532 533 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, 534 struct qed_ptt *p_ptt) 535 { 536 u32 transceiver_state; 537 538 transceiver_state = qed_rd(p_hwfn, p_ptt, 539 p_hwfn->mcp_info->port_addr + 540 offsetof(struct public_port, 541 transceiver_data)); 542 543 DP_VERBOSE(p_hwfn, 544 (NETIF_MSG_HW | QED_MSG_SP), 545 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n", 546 transceiver_state, 547 (u32)(p_hwfn->mcp_info->port_addr + 548 offsetof(struct public_port, transceiver_data))); 549 550 transceiver_state = GET_FIELD(transceiver_state, 551 ETH_TRANSCEIVER_STATE); 552 553 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) 554 DP_NOTICE(p_hwfn, "Transceiver is present.\n"); 555 else 556 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); 557 } 558 559 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, 560 struct qed_ptt *p_ptt, bool b_reset) 561 { 562 struct qed_mcp_link_state *p_link; 563 u8 max_bw, min_bw; 564 u32 status = 0; 565 566 p_link = &p_hwfn->mcp_info->link_output; 567 memset(p_link, 0, sizeof(*p_link)); 568 if (!b_reset) { 569 status = qed_rd(p_hwfn, p_ptt, 570 p_hwfn->mcp_info->port_addr + 571 offsetof(struct public_port, link_status)); 572 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP), 573 "Received link update [0x%08x] from mfw [Addr 0x%x]\n", 574 status, 575 (u32)(p_hwfn->mcp_info->port_addr + 576 offsetof(struct public_port, link_status))); 577 } else { 578 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 579 "Resetting link indications\n"); 580 return; 581 } 582 583 if (p_hwfn->b_drv_link_init) 584 p_link->link_up = !!(status & LINK_STATUS_LINK_UP); 585 else 586 p_link->link_up = false; 587 588 p_link->full_duplex = true; 589 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { 590 case LINK_STATUS_SPEED_AND_DUPLEX_100G: 591 p_link->speed = 100000; 592 break; 593 case LINK_STATUS_SPEED_AND_DUPLEX_50G: 594 p_link->speed = 50000; 595 break; 596 case LINK_STATUS_SPEED_AND_DUPLEX_40G: 597 p_link->speed = 40000; 598 break; 599 case LINK_STATUS_SPEED_AND_DUPLEX_25G: 600 p_link->speed = 25000; 601 break; 602 case LINK_STATUS_SPEED_AND_DUPLEX_20G: 603 p_link->speed = 20000; 604 break; 605 case LINK_STATUS_SPEED_AND_DUPLEX_10G: 606 p_link->speed = 10000; 607 break; 608 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: 609 p_link->full_duplex = false; 610 /* Fall-through */ 611 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: 612 p_link->speed = 1000; 613 break; 614 default: 615 p_link->speed = 0; 616 } 617 618 if (p_link->link_up && p_link->speed) 619 p_link->line_speed = p_link->speed; 620 else 621 p_link->line_speed = 0; 622 623 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; 624 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; 625 626 /* Max bandwidth configuration */ 627 __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw); 628 629 /* Min bandwidth configuration */ 630 __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); 631 qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate); 632 633 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); 634 p_link->an_complete = !!(status & 635 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); 636 p_link->parallel_detection = !!(status & 637 LINK_STATUS_PARALLEL_DETECTION_USED); 638 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); 639 640 p_link->partner_adv_speed |= 641 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? 642 QED_LINK_PARTNER_SPEED_1G_FD : 0; 643 p_link->partner_adv_speed |= 644 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? 645 QED_LINK_PARTNER_SPEED_1G_HD : 0; 646 p_link->partner_adv_speed |= 647 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? 648 QED_LINK_PARTNER_SPEED_10G : 0; 649 p_link->partner_adv_speed |= 650 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? 651 QED_LINK_PARTNER_SPEED_20G : 0; 652 p_link->partner_adv_speed |= 653 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? 654 QED_LINK_PARTNER_SPEED_25G : 0; 655 p_link->partner_adv_speed |= 656 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? 657 QED_LINK_PARTNER_SPEED_40G : 0; 658 p_link->partner_adv_speed |= 659 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? 660 QED_LINK_PARTNER_SPEED_50G : 0; 661 p_link->partner_adv_speed |= 662 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? 663 QED_LINK_PARTNER_SPEED_100G : 0; 664 665 p_link->partner_tx_flow_ctrl_en = 666 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); 667 p_link->partner_rx_flow_ctrl_en = 668 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); 669 670 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { 671 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: 672 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE; 673 break; 674 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: 675 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE; 676 break; 677 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: 678 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE; 679 break; 680 default: 681 p_link->partner_adv_pause = 0; 682 } 683 684 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); 685 686 qed_link_update(p_hwfn); 687 } 688 689 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) 690 { 691 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; 692 struct qed_mcp_mb_params mb_params; 693 union drv_union_data union_data; 694 struct eth_phy_cfg *phy_cfg; 695 int rc = 0; 696 u32 cmd; 697 698 /* Set the shmem configuration according to params */ 699 phy_cfg = &union_data.drv_phy_cfg; 700 memset(phy_cfg, 0, sizeof(*phy_cfg)); 701 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; 702 if (!params->speed.autoneg) 703 phy_cfg->speed = params->speed.forced_speed; 704 phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; 705 phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; 706 phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; 707 phy_cfg->adv_speed = params->speed.advertised_speeds; 708 phy_cfg->loopback_mode = params->loopback_mode; 709 710 p_hwfn->b_drv_link_init = b_up; 711 712 if (b_up) { 713 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 714 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n", 715 phy_cfg->speed, 716 phy_cfg->pause, 717 phy_cfg->adv_speed, 718 phy_cfg->loopback_mode, 719 phy_cfg->feature_config_flags); 720 } else { 721 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 722 "Resetting link\n"); 723 } 724 725 memset(&mb_params, 0, sizeof(mb_params)); 726 mb_params.cmd = cmd; 727 mb_params.p_data_src = &union_data; 728 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 729 730 /* if mcp fails to respond we must abort */ 731 if (rc) { 732 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 733 return rc; 734 } 735 736 /* Reset the link status if needed */ 737 if (!b_up) 738 qed_mcp_handle_link_change(p_hwfn, p_ptt, true); 739 740 return 0; 741 } 742 743 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, 744 struct qed_ptt *p_ptt, 745 enum MFW_DRV_MSG_TYPE type) 746 { 747 enum qed_mcp_protocol_type stats_type; 748 union qed_mcp_protocol_stats stats; 749 struct qed_mcp_mb_params mb_params; 750 union drv_union_data union_data; 751 u32 hsi_param; 752 753 switch (type) { 754 case MFW_DRV_MSG_GET_LAN_STATS: 755 stats_type = QED_MCP_LAN_STATS; 756 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; 757 break; 758 case MFW_DRV_MSG_GET_FCOE_STATS: 759 stats_type = QED_MCP_FCOE_STATS; 760 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE; 761 break; 762 case MFW_DRV_MSG_GET_ISCSI_STATS: 763 stats_type = QED_MCP_ISCSI_STATS; 764 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI; 765 break; 766 case MFW_DRV_MSG_GET_RDMA_STATS: 767 stats_type = QED_MCP_RDMA_STATS; 768 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA; 769 break; 770 default: 771 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type); 772 return; 773 } 774 775 qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats); 776 777 memset(&mb_params, 0, sizeof(mb_params)); 778 mb_params.cmd = DRV_MSG_CODE_GET_STATS; 779 mb_params.param = hsi_param; 780 memcpy(&union_data, &stats, sizeof(stats)); 781 mb_params.p_data_src = &union_data; 782 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 783 } 784 785 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, 786 struct public_func *p_shmem_info) 787 { 788 struct qed_mcp_function_info *p_info; 789 790 p_info = &p_hwfn->mcp_info->func_info; 791 792 p_info->bandwidth_min = (p_shmem_info->config & 793 FUNC_MF_CFG_MIN_BW_MASK) >> 794 FUNC_MF_CFG_MIN_BW_SHIFT; 795 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { 796 DP_INFO(p_hwfn, 797 "bandwidth minimum out of bounds [%02x]. Set to 1\n", 798 p_info->bandwidth_min); 799 p_info->bandwidth_min = 1; 800 } 801 802 p_info->bandwidth_max = (p_shmem_info->config & 803 FUNC_MF_CFG_MAX_BW_MASK) >> 804 FUNC_MF_CFG_MAX_BW_SHIFT; 805 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { 806 DP_INFO(p_hwfn, 807 "bandwidth maximum out of bounds [%02x]. Set to 100\n", 808 p_info->bandwidth_max); 809 p_info->bandwidth_max = 100; 810 } 811 } 812 813 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, 814 struct qed_ptt *p_ptt, 815 struct public_func *p_data, int pfid) 816 { 817 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 818 PUBLIC_FUNC); 819 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); 820 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); 821 u32 i, size; 822 823 memset(p_data, 0, sizeof(*p_data)); 824 825 size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); 826 for (i = 0; i < size / sizeof(u32); i++) 827 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, 828 func_addr + (i << 2)); 829 return size; 830 } 831 832 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 833 { 834 struct qed_mcp_function_info *p_info; 835 struct public_func shmem_info; 836 u32 resp = 0, param = 0; 837 838 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 839 840 qed_read_pf_bandwidth(p_hwfn, &shmem_info); 841 842 p_info = &p_hwfn->mcp_info->func_info; 843 844 qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min); 845 qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max); 846 847 /* Acknowledge the MFW */ 848 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, 849 ¶m); 850 } 851 852 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, 853 struct qed_ptt *p_ptt) 854 { 855 struct qed_mcp_info *info = p_hwfn->mcp_info; 856 int rc = 0; 857 bool found = false; 858 u16 i; 859 860 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n"); 861 862 /* Read Messages from MFW */ 863 qed_mcp_read_mb(p_hwfn, p_ptt); 864 865 /* Compare current messages to old ones */ 866 for (i = 0; i < info->mfw_mb_length; i++) { 867 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) 868 continue; 869 870 found = true; 871 872 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 873 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", 874 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); 875 876 switch (i) { 877 case MFW_DRV_MSG_LINK_CHANGE: 878 qed_mcp_handle_link_change(p_hwfn, p_ptt, false); 879 break; 880 case MFW_DRV_MSG_VF_DISABLED: 881 qed_mcp_handle_vf_flr(p_hwfn, p_ptt); 882 break; 883 case MFW_DRV_MSG_LLDP_DATA_UPDATED: 884 qed_dcbx_mib_update_event(p_hwfn, p_ptt, 885 QED_DCBX_REMOTE_LLDP_MIB); 886 break; 887 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: 888 qed_dcbx_mib_update_event(p_hwfn, p_ptt, 889 QED_DCBX_REMOTE_MIB); 890 break; 891 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: 892 qed_dcbx_mib_update_event(p_hwfn, p_ptt, 893 QED_DCBX_OPERATIONAL_MIB); 894 break; 895 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: 896 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); 897 break; 898 case MFW_DRV_MSG_GET_LAN_STATS: 899 case MFW_DRV_MSG_GET_FCOE_STATS: 900 case MFW_DRV_MSG_GET_ISCSI_STATS: 901 case MFW_DRV_MSG_GET_RDMA_STATS: 902 qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i); 903 break; 904 case MFW_DRV_MSG_BW_UPDATE: 905 qed_mcp_update_bw(p_hwfn, p_ptt); 906 break; 907 default: 908 DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i); 909 rc = -EINVAL; 910 } 911 } 912 913 /* ACK everything */ 914 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { 915 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]); 916 917 /* MFW expect answer in BE, so we force write in that format */ 918 qed_wr(p_hwfn, p_ptt, 919 info->mfw_mb_addr + sizeof(u32) + 920 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * 921 sizeof(u32) + i * sizeof(u32), 922 (__force u32)val); 923 } 924 925 if (!found) { 926 DP_NOTICE(p_hwfn, 927 "Received an MFW message indication but no new message!\n"); 928 rc = -EINVAL; 929 } 930 931 /* Copy the new mfw messages into the shadow */ 932 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); 933 934 return rc; 935 } 936 937 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, 938 struct qed_ptt *p_ptt, 939 u32 *p_mfw_ver, u32 *p_running_bundle_id) 940 { 941 u32 global_offsize; 942 943 if (IS_VF(p_hwfn->cdev)) { 944 if (p_hwfn->vf_iov_info) { 945 struct pfvf_acquire_resp_tlv *p_resp; 946 947 p_resp = &p_hwfn->vf_iov_info->acquire_resp; 948 *p_mfw_ver = p_resp->pfdev_info.mfw_ver; 949 return 0; 950 } else { 951 DP_VERBOSE(p_hwfn, 952 QED_MSG_IOV, 953 "VF requested MFW version prior to ACQUIRE\n"); 954 return -EINVAL; 955 } 956 } 957 958 global_offsize = qed_rd(p_hwfn, p_ptt, 959 SECTION_OFFSIZE_ADDR(p_hwfn-> 960 mcp_info->public_base, 961 PUBLIC_GLOBAL)); 962 *p_mfw_ver = 963 qed_rd(p_hwfn, p_ptt, 964 SECTION_ADDR(global_offsize, 965 0) + offsetof(struct public_global, mfw_ver)); 966 967 if (p_running_bundle_id != NULL) { 968 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt, 969 SECTION_ADDR(global_offsize, 0) + 970 offsetof(struct public_global, 971 running_bundle_id)); 972 } 973 974 return 0; 975 } 976 977 int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) 978 { 979 struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; 980 struct qed_ptt *p_ptt; 981 982 if (IS_VF(cdev)) 983 return -EINVAL; 984 985 if (!qed_mcp_is_init(p_hwfn)) { 986 DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); 987 return -EBUSY; 988 } 989 990 *p_media_type = MEDIA_UNSPECIFIED; 991 992 p_ptt = qed_ptt_acquire(p_hwfn); 993 if (!p_ptt) 994 return -EBUSY; 995 996 *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 997 offsetof(struct public_port, media_type)); 998 999 qed_ptt_release(p_hwfn, p_ptt); 1000 1001 return 0; 1002 } 1003 1004 static int 1005 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, 1006 struct public_func *p_info, 1007 enum qed_pci_personality *p_proto) 1008 { 1009 int rc = 0; 1010 1011 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { 1012 case FUNC_MF_CFG_PROTOCOL_ETHERNET: 1013 if (test_bit(QED_DEV_CAP_ROCE, 1014 &p_hwfn->hw_info.device_capabilities)) 1015 *p_proto = QED_PCI_ETH_ROCE; 1016 else 1017 *p_proto = QED_PCI_ETH; 1018 break; 1019 case FUNC_MF_CFG_PROTOCOL_ISCSI: 1020 *p_proto = QED_PCI_ISCSI; 1021 break; 1022 case FUNC_MF_CFG_PROTOCOL_ROCE: 1023 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); 1024 rc = -EINVAL; 1025 break; 1026 default: 1027 rc = -EINVAL; 1028 } 1029 1030 return rc; 1031 } 1032 1033 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, 1034 struct qed_ptt *p_ptt) 1035 { 1036 struct qed_mcp_function_info *info; 1037 struct public_func shmem_info; 1038 1039 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 1040 info = &p_hwfn->mcp_info->func_info; 1041 1042 info->pause_on_host = (shmem_info.config & 1043 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; 1044 1045 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) { 1046 DP_ERR(p_hwfn, "Unknown personality %08x\n", 1047 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); 1048 return -EINVAL; 1049 } 1050 1051 qed_read_pf_bandwidth(p_hwfn, &shmem_info); 1052 1053 if (shmem_info.mac_upper || shmem_info.mac_lower) { 1054 info->mac[0] = (u8)(shmem_info.mac_upper >> 8); 1055 info->mac[1] = (u8)(shmem_info.mac_upper); 1056 info->mac[2] = (u8)(shmem_info.mac_lower >> 24); 1057 info->mac[3] = (u8)(shmem_info.mac_lower >> 16); 1058 info->mac[4] = (u8)(shmem_info.mac_lower >> 8); 1059 info->mac[5] = (u8)(shmem_info.mac_lower); 1060 } else { 1061 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n"); 1062 } 1063 1064 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper | 1065 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32); 1066 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper | 1067 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32); 1068 1069 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); 1070 1071 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP), 1072 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n", 1073 info->pause_on_host, info->protocol, 1074 info->bandwidth_min, info->bandwidth_max, 1075 info->mac[0], info->mac[1], info->mac[2], 1076 info->mac[3], info->mac[4], info->mac[5], 1077 info->wwn_port, info->wwn_node, info->ovlan); 1078 1079 return 0; 1080 } 1081 1082 struct qed_mcp_link_params 1083 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn) 1084 { 1085 if (!p_hwfn || !p_hwfn->mcp_info) 1086 return NULL; 1087 return &p_hwfn->mcp_info->link_input; 1088 } 1089 1090 struct qed_mcp_link_state 1091 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn) 1092 { 1093 if (!p_hwfn || !p_hwfn->mcp_info) 1094 return NULL; 1095 return &p_hwfn->mcp_info->link_output; 1096 } 1097 1098 struct qed_mcp_link_capabilities 1099 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn) 1100 { 1101 if (!p_hwfn || !p_hwfn->mcp_info) 1102 return NULL; 1103 return &p_hwfn->mcp_info->link_capabilities; 1104 } 1105 1106 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1107 { 1108 u32 resp = 0, param = 0; 1109 int rc; 1110 1111 rc = qed_mcp_cmd(p_hwfn, p_ptt, 1112 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); 1113 1114 /* Wait for the drain to complete before returning */ 1115 msleep(1020); 1116 1117 return rc; 1118 } 1119 1120 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, 1121 struct qed_ptt *p_ptt, u32 *p_flash_size) 1122 { 1123 u32 flash_size; 1124 1125 if (IS_VF(p_hwfn->cdev)) 1126 return -EINVAL; 1127 1128 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); 1129 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> 1130 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; 1131 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT)); 1132 1133 *p_flash_size = flash_size; 1134 1135 return 0; 1136 } 1137 1138 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, 1139 struct qed_ptt *p_ptt, u8 vf_id, u8 num) 1140 { 1141 u32 resp = 0, param = 0, rc_param = 0; 1142 int rc; 1143 1144 /* Only Leader can configure MSIX, and need to take CMT into account */ 1145 if (!IS_LEAD_HWFN(p_hwfn)) 1146 return 0; 1147 num *= p_hwfn->cdev->num_hwfns; 1148 1149 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) & 1150 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; 1151 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) & 1152 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; 1153 1154 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, 1155 &resp, &rc_param); 1156 1157 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { 1158 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id); 1159 rc = -EINVAL; 1160 } else { 1161 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1162 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", 1163 num, vf_id); 1164 } 1165 1166 return rc; 1167 } 1168 1169 int 1170 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, 1171 struct qed_ptt *p_ptt, 1172 struct qed_mcp_drv_version *p_ver) 1173 { 1174 struct drv_version_stc *p_drv_version; 1175 struct qed_mcp_mb_params mb_params; 1176 union drv_union_data union_data; 1177 __be32 val; 1178 u32 i; 1179 int rc; 1180 1181 p_drv_version = &union_data.drv_version; 1182 p_drv_version->version = p_ver->version; 1183 1184 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) { 1185 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)])); 1186 *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val; 1187 } 1188 1189 memset(&mb_params, 0, sizeof(mb_params)); 1190 mb_params.cmd = DRV_MSG_CODE_SET_VERSION; 1191 mb_params.p_data_src = &union_data; 1192 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1193 if (rc) 1194 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 1195 1196 return rc; 1197 } 1198 1199 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1200 { 1201 u32 resp = 0, param = 0; 1202 int rc; 1203 1204 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, 1205 ¶m); 1206 if (rc) 1207 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 1208 1209 return rc; 1210 } 1211 1212 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1213 { 1214 u32 value, cpu_mode; 1215 1216 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); 1217 1218 value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 1219 value &= ~MCP_REG_CPU_MODE_SOFT_HALT; 1220 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value); 1221 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 1222 1223 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; 1224 } 1225 1226 int qed_mcp_set_led(struct qed_hwfn *p_hwfn, 1227 struct qed_ptt *p_ptt, enum qed_led_mode mode) 1228 { 1229 u32 resp = 0, param = 0, drv_mb_param; 1230 int rc; 1231 1232 switch (mode) { 1233 case QED_LED_MODE_ON: 1234 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; 1235 break; 1236 case QED_LED_MODE_OFF: 1237 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; 1238 break; 1239 case QED_LED_MODE_RESTORE: 1240 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; 1241 break; 1242 default: 1243 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode); 1244 return -EINVAL; 1245 } 1246 1247 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, 1248 drv_mb_param, &resp, ¶m); 1249 1250 return rc; 1251 } 1252 1253 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, 1254 struct qed_ptt *p_ptt, u32 mask_parities) 1255 { 1256 u32 resp = 0, param = 0; 1257 int rc; 1258 1259 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, 1260 mask_parities, &resp, ¶m); 1261 1262 if (rc) { 1263 DP_ERR(p_hwfn, 1264 "MCP response failure for mask parities, aborting\n"); 1265 } else if (resp != FW_MSG_CODE_OK) { 1266 DP_ERR(p_hwfn, 1267 "MCP did not acknowledge mask parity request. Old MFW?\n"); 1268 rc = -EINVAL; 1269 } 1270 1271 return rc; 1272 } 1273 1274 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1275 { 1276 u32 drv_mb_param = 0, rsp, param; 1277 int rc = 0; 1278 1279 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << 1280 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); 1281 1282 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 1283 drv_mb_param, &rsp, ¶m); 1284 1285 if (rc) 1286 return rc; 1287 1288 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 1289 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 1290 rc = -EAGAIN; 1291 1292 return rc; 1293 } 1294 1295 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1296 { 1297 u32 drv_mb_param, rsp, param; 1298 int rc = 0; 1299 1300 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << 1301 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); 1302 1303 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 1304 drv_mb_param, &rsp, ¶m); 1305 1306 if (rc) 1307 return rc; 1308 1309 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 1310 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 1311 rc = -EAGAIN; 1312 1313 return rc; 1314 } 1315