1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/types.h> 10 #include <asm/byteorder.h> 11 #include <linux/delay.h> 12 #include <linux/errno.h> 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/spinlock.h> 16 #include <linux/string.h> 17 #include "qed.h" 18 #include "qed_hsi.h" 19 #include "qed_hw.h" 20 #include "qed_mcp.h" 21 #include "qed_reg_addr.h" 22 #define CHIP_MCP_RESP_ITER_US 10 23 24 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ 25 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ 26 27 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ 28 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ 29 _val) 30 31 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ 32 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) 33 34 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ 35 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ 36 offsetof(struct public_drv_mb, _field), _val) 37 38 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ 39 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ 40 offsetof(struct public_drv_mb, _field)) 41 42 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ 43 DRV_ID_PDA_COMP_VER_SHIFT) 44 45 #define MCP_BYTES_PER_MBIT_SHIFT 17 46 47 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn) 48 { 49 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) 50 return false; 51 return true; 52 } 53 54 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, 55 struct qed_ptt *p_ptt) 56 { 57 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 58 PUBLIC_PORT); 59 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr); 60 61 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, 62 MFW_PORT(p_hwfn)); 63 DP_VERBOSE(p_hwfn, QED_MSG_SP, 64 "port_addr = 0x%x, port_id 0x%02x\n", 65 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); 66 } 67 68 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, 69 struct qed_ptt *p_ptt) 70 { 71 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); 72 u32 tmp, i; 73 74 if (!p_hwfn->mcp_info->public_base) 75 return; 76 77 for (i = 0; i < length; i++) { 78 tmp = qed_rd(p_hwfn, p_ptt, 79 p_hwfn->mcp_info->mfw_mb_addr + 80 (i << 2) + sizeof(u32)); 81 82 /* The MB data is actually BE; Need to force it to cpu */ 83 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = 84 be32_to_cpu((__force __be32)tmp); 85 } 86 } 87 88 int qed_mcp_free(struct qed_hwfn *p_hwfn) 89 { 90 if (p_hwfn->mcp_info) { 91 kfree(p_hwfn->mcp_info->mfw_mb_cur); 92 kfree(p_hwfn->mcp_info->mfw_mb_shadow); 93 } 94 kfree(p_hwfn->mcp_info); 95 96 return 0; 97 } 98 99 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, 100 struct qed_ptt *p_ptt) 101 { 102 struct qed_mcp_info *p_info = p_hwfn->mcp_info; 103 u32 drv_mb_offsize, mfw_mb_offsize; 104 u32 mcp_pf_id = MCP_PF_ID(p_hwfn); 105 106 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); 107 if (!p_info->public_base) 108 return 0; 109 110 p_info->public_base |= GRCBASE_MCP; 111 112 /* Calculate the driver and MFW mailbox address */ 113 drv_mb_offsize = qed_rd(p_hwfn, p_ptt, 114 SECTION_OFFSIZE_ADDR(p_info->public_base, 115 PUBLIC_DRV_MB)); 116 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); 117 DP_VERBOSE(p_hwfn, QED_MSG_SP, 118 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", 119 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); 120 121 /* Set the MFW MB address */ 122 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, 123 SECTION_OFFSIZE_ADDR(p_info->public_base, 124 PUBLIC_MFW_MB)); 125 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); 126 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr); 127 128 /* Get the current driver mailbox sequence before sending 129 * the first command 130 */ 131 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & 132 DRV_MSG_SEQ_NUMBER_MASK; 133 134 /* Get current FW pulse sequence */ 135 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & 136 DRV_PULSE_SEQ_MASK; 137 138 p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 139 140 return 0; 141 } 142 143 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, 144 struct qed_ptt *p_ptt) 145 { 146 struct qed_mcp_info *p_info; 147 u32 size; 148 149 /* Allocate mcp_info structure */ 150 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL); 151 if (!p_hwfn->mcp_info) 152 goto err; 153 p_info = p_hwfn->mcp_info; 154 155 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) { 156 DP_NOTICE(p_hwfn, "MCP is not initialized\n"); 157 /* Do not free mcp_info here, since public_base indicate that 158 * the MCP is not initialized 159 */ 160 return 0; 161 } 162 163 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 164 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); 165 p_info->mfw_mb_shadow = 166 kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS( 167 p_info->mfw_mb_length), GFP_KERNEL); 168 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) 169 goto err; 170 171 /* Initialize the MFW spinlock */ 172 spin_lock_init(&p_info->lock); 173 174 return 0; 175 176 err: 177 DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n"); 178 qed_mcp_free(p_hwfn); 179 return -ENOMEM; 180 } 181 182 /* Locks the MFW mailbox of a PF to ensure a single access. 183 * The lock is achieved in most cases by holding a spinlock, causing other 184 * threads to wait till a previous access is done. 185 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single 186 * access is achieved by setting a blocking flag, which will fail other 187 * competing contexts to send their mailboxes. 188 */ 189 static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, 190 u32 cmd) 191 { 192 spin_lock_bh(&p_hwfn->mcp_info->lock); 193 194 /* The spinlock shouldn't be acquired when the mailbox command is 195 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel 196 * pending [UN]LOAD_REQ command of another PF together with a spinlock 197 * (i.e. interrupts are disabled) - can lead to a deadlock. 198 * It is assumed that for a single PF, no other mailbox commands can be 199 * sent from another context while sending LOAD_REQ, and that any 200 * parallel commands to UNLOAD_REQ can be cancelled. 201 */ 202 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE) 203 p_hwfn->mcp_info->block_mb_sending = false; 204 205 if (p_hwfn->mcp_info->block_mb_sending) { 206 DP_NOTICE(p_hwfn, 207 "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n", 208 cmd); 209 spin_unlock_bh(&p_hwfn->mcp_info->lock); 210 return -EBUSY; 211 } 212 213 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) { 214 p_hwfn->mcp_info->block_mb_sending = true; 215 spin_unlock_bh(&p_hwfn->mcp_info->lock); 216 } 217 218 return 0; 219 } 220 221 static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, 222 u32 cmd) 223 { 224 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ) 225 spin_unlock_bh(&p_hwfn->mcp_info->lock); 226 } 227 228 int qed_mcp_reset(struct qed_hwfn *p_hwfn, 229 struct qed_ptt *p_ptt) 230 { 231 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq; 232 u8 delay = CHIP_MCP_RESP_ITER_US; 233 u32 org_mcp_reset_seq, cnt = 0; 234 int rc = 0; 235 236 /* Ensure that only a single thread is accessing the mailbox at a 237 * certain time. 238 */ 239 rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET); 240 if (rc != 0) 241 return rc; 242 243 /* Set drv command along with the updated sequence */ 244 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 245 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, 246 (DRV_MSG_CODE_MCP_RESET | seq)); 247 248 do { 249 /* Wait for MFW response */ 250 udelay(delay); 251 /* Give the FW up to 500 second (50*1000*10usec) */ 252 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt, 253 MISCS_REG_GENERIC_POR_0)) && 254 (cnt++ < QED_MCP_RESET_RETRIES)); 255 256 if (org_mcp_reset_seq != 257 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { 258 DP_VERBOSE(p_hwfn, QED_MSG_SP, 259 "MCP was reset after %d usec\n", cnt * delay); 260 } else { 261 DP_ERR(p_hwfn, "Failed to reset MCP\n"); 262 rc = -EAGAIN; 263 } 264 265 qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET); 266 267 return rc; 268 } 269 270 static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn, 271 struct qed_ptt *p_ptt, 272 u32 cmd, 273 u32 param, 274 u32 *o_mcp_resp, 275 u32 *o_mcp_param) 276 { 277 u8 delay = CHIP_MCP_RESP_ITER_US; 278 u32 seq, cnt = 1, actual_mb_seq; 279 int rc = 0; 280 281 /* Get actual driver mailbox sequence */ 282 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & 283 DRV_MSG_SEQ_NUMBER_MASK; 284 285 /* Use MCP history register to check if MCP reset occurred between 286 * init time and now. 287 */ 288 if (p_hwfn->mcp_info->mcp_hist != 289 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { 290 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n"); 291 qed_load_mcp_offsets(p_hwfn, p_ptt); 292 qed_mcp_cmd_port_init(p_hwfn, p_ptt); 293 } 294 seq = ++p_hwfn->mcp_info->drv_mb_seq; 295 296 /* Set drv param */ 297 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param); 298 299 /* Set drv command along with the updated sequence */ 300 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq)); 301 302 DP_VERBOSE(p_hwfn, QED_MSG_SP, 303 "wrote command (%x) to MFW MB param 0x%08x\n", 304 (cmd | seq), param); 305 306 do { 307 /* Wait for MFW response */ 308 udelay(delay); 309 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); 310 311 /* Give the FW up to 5 second (500*10ms) */ 312 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) && 313 (cnt++ < QED_DRV_MB_MAX_RETRIES)); 314 315 DP_VERBOSE(p_hwfn, QED_MSG_SP, 316 "[after %d ms] read (%x) seq is (%x) from FW MB\n", 317 cnt * delay, *o_mcp_resp, seq); 318 319 /* Is this a reply to our command? */ 320 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) { 321 *o_mcp_resp &= FW_MSG_CODE_MASK; 322 /* Get the MCP param */ 323 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); 324 } else { 325 /* FW BUG! */ 326 DP_ERR(p_hwfn, "MFW failed to respond!\n"); 327 *o_mcp_resp = 0; 328 rc = -EAGAIN; 329 } 330 return rc; 331 } 332 333 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, 334 struct qed_ptt *p_ptt, 335 struct qed_mcp_mb_params *p_mb_params) 336 { 337 u32 union_data_addr; 338 int rc; 339 340 /* MCP not initialized */ 341 if (!qed_mcp_is_init(p_hwfn)) { 342 DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); 343 return -EBUSY; 344 } 345 346 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 347 offsetof(struct public_drv_mb, union_data); 348 349 /* Ensure that only a single thread is accessing the mailbox at a 350 * certain time. 351 */ 352 rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd); 353 if (rc) 354 return rc; 355 356 if (p_mb_params->p_data_src != NULL) 357 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, 358 p_mb_params->p_data_src, 359 sizeof(*p_mb_params->p_data_src)); 360 361 rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd, 362 p_mb_params->param, &p_mb_params->mcp_resp, 363 &p_mb_params->mcp_param); 364 365 if (p_mb_params->p_data_dst != NULL) 366 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, 367 union_data_addr, 368 sizeof(*p_mb_params->p_data_dst)); 369 370 qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd); 371 372 return rc; 373 } 374 375 int qed_mcp_cmd(struct qed_hwfn *p_hwfn, 376 struct qed_ptt *p_ptt, 377 u32 cmd, 378 u32 param, 379 u32 *o_mcp_resp, 380 u32 *o_mcp_param) 381 { 382 struct qed_mcp_mb_params mb_params; 383 int rc; 384 385 memset(&mb_params, 0, sizeof(mb_params)); 386 mb_params.cmd = cmd; 387 mb_params.param = param; 388 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 389 if (rc) 390 return rc; 391 392 *o_mcp_resp = mb_params.mcp_resp; 393 *o_mcp_param = mb_params.mcp_param; 394 395 return 0; 396 } 397 398 int qed_mcp_load_req(struct qed_hwfn *p_hwfn, 399 struct qed_ptt *p_ptt, 400 u32 *p_load_code) 401 { 402 struct qed_dev *cdev = p_hwfn->cdev; 403 struct qed_mcp_mb_params mb_params; 404 union drv_union_data union_data; 405 int rc; 406 407 memset(&mb_params, 0, sizeof(mb_params)); 408 /* Load Request */ 409 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; 410 mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT | 411 cdev->drv_type; 412 memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE); 413 mb_params.p_data_src = &union_data; 414 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 415 416 /* if mcp fails to respond we must abort */ 417 if (rc) { 418 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 419 return rc; 420 } 421 422 *p_load_code = mb_params.mcp_resp; 423 424 /* If MFW refused (e.g. other port is in diagnostic mode) we 425 * must abort. This can happen in the following cases: 426 * - Other port is in diagnostic mode 427 * - Previously loaded function on the engine is not compliant with 428 * the requester. 429 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION. 430 * - 431 */ 432 if (!(*p_load_code) || 433 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) || 434 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) || 435 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) { 436 DP_ERR(p_hwfn, "MCP refused load request, aborting\n"); 437 return -EBUSY; 438 } 439 440 return 0; 441 } 442 443 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, 444 struct qed_ptt *p_ptt) 445 { 446 u32 transceiver_state; 447 448 transceiver_state = qed_rd(p_hwfn, p_ptt, 449 p_hwfn->mcp_info->port_addr + 450 offsetof(struct public_port, 451 transceiver_data)); 452 453 DP_VERBOSE(p_hwfn, 454 (NETIF_MSG_HW | QED_MSG_SP), 455 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n", 456 transceiver_state, 457 (u32)(p_hwfn->mcp_info->port_addr + 458 offsetof(struct public_port, 459 transceiver_data))); 460 461 transceiver_state = GET_FIELD(transceiver_state, 462 PMM_TRANSCEIVER_STATE); 463 464 if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT) 465 DP_NOTICE(p_hwfn, "Transceiver is present.\n"); 466 else 467 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); 468 } 469 470 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, 471 struct qed_ptt *p_ptt, 472 bool b_reset) 473 { 474 struct qed_mcp_link_state *p_link; 475 u32 status = 0; 476 477 p_link = &p_hwfn->mcp_info->link_output; 478 memset(p_link, 0, sizeof(*p_link)); 479 if (!b_reset) { 480 status = qed_rd(p_hwfn, p_ptt, 481 p_hwfn->mcp_info->port_addr + 482 offsetof(struct public_port, link_status)); 483 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP), 484 "Received link update [0x%08x] from mfw [Addr 0x%x]\n", 485 status, 486 (u32)(p_hwfn->mcp_info->port_addr + 487 offsetof(struct public_port, 488 link_status))); 489 } else { 490 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 491 "Resetting link indications\n"); 492 return; 493 } 494 495 if (p_hwfn->b_drv_link_init) 496 p_link->link_up = !!(status & LINK_STATUS_LINK_UP); 497 else 498 p_link->link_up = false; 499 500 p_link->full_duplex = true; 501 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { 502 case LINK_STATUS_SPEED_AND_DUPLEX_100G: 503 p_link->speed = 100000; 504 break; 505 case LINK_STATUS_SPEED_AND_DUPLEX_50G: 506 p_link->speed = 50000; 507 break; 508 case LINK_STATUS_SPEED_AND_DUPLEX_40G: 509 p_link->speed = 40000; 510 break; 511 case LINK_STATUS_SPEED_AND_DUPLEX_25G: 512 p_link->speed = 25000; 513 break; 514 case LINK_STATUS_SPEED_AND_DUPLEX_20G: 515 p_link->speed = 20000; 516 break; 517 case LINK_STATUS_SPEED_AND_DUPLEX_10G: 518 p_link->speed = 10000; 519 break; 520 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: 521 p_link->full_duplex = false; 522 /* Fall-through */ 523 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: 524 p_link->speed = 1000; 525 break; 526 default: 527 p_link->speed = 0; 528 } 529 530 /* Correct speed according to bandwidth allocation */ 531 if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) { 532 p_link->speed = p_link->speed * 533 p_hwfn->mcp_info->func_info.bandwidth_max / 534 100; 535 qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 536 p_link->speed); 537 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 538 "Configured MAX bandwidth to be %08x Mb/sec\n", 539 p_link->speed); 540 } 541 542 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); 543 p_link->an_complete = !!(status & 544 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); 545 p_link->parallel_detection = !!(status & 546 LINK_STATUS_PARALLEL_DETECTION_USED); 547 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); 548 549 p_link->partner_adv_speed |= 550 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? 551 QED_LINK_PARTNER_SPEED_1G_FD : 0; 552 p_link->partner_adv_speed |= 553 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? 554 QED_LINK_PARTNER_SPEED_1G_HD : 0; 555 p_link->partner_adv_speed |= 556 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? 557 QED_LINK_PARTNER_SPEED_10G : 0; 558 p_link->partner_adv_speed |= 559 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? 560 QED_LINK_PARTNER_SPEED_20G : 0; 561 p_link->partner_adv_speed |= 562 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? 563 QED_LINK_PARTNER_SPEED_40G : 0; 564 p_link->partner_adv_speed |= 565 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? 566 QED_LINK_PARTNER_SPEED_50G : 0; 567 p_link->partner_adv_speed |= 568 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? 569 QED_LINK_PARTNER_SPEED_100G : 0; 570 571 p_link->partner_tx_flow_ctrl_en = 572 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); 573 p_link->partner_rx_flow_ctrl_en = 574 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); 575 576 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { 577 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: 578 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE; 579 break; 580 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: 581 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE; 582 break; 583 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: 584 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE; 585 break; 586 default: 587 p_link->partner_adv_pause = 0; 588 } 589 590 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); 591 592 qed_link_update(p_hwfn); 593 } 594 595 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, 596 struct qed_ptt *p_ptt, 597 bool b_up) 598 { 599 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; 600 struct qed_mcp_mb_params mb_params; 601 union drv_union_data union_data; 602 struct pmm_phy_cfg *phy_cfg; 603 int rc = 0; 604 u32 cmd; 605 606 /* Set the shmem configuration according to params */ 607 phy_cfg = &union_data.drv_phy_cfg; 608 memset(phy_cfg, 0, sizeof(*phy_cfg)); 609 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; 610 if (!params->speed.autoneg) 611 phy_cfg->speed = params->speed.forced_speed; 612 phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0; 613 phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0; 614 phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0; 615 phy_cfg->adv_speed = params->speed.advertised_speeds; 616 phy_cfg->loopback_mode = params->loopback_mode; 617 618 p_hwfn->b_drv_link_init = b_up; 619 620 if (b_up) { 621 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 622 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n", 623 phy_cfg->speed, 624 phy_cfg->pause, 625 phy_cfg->adv_speed, 626 phy_cfg->loopback_mode, 627 phy_cfg->feature_config_flags); 628 } else { 629 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 630 "Resetting link\n"); 631 } 632 633 memset(&mb_params, 0, sizeof(mb_params)); 634 mb_params.cmd = cmd; 635 mb_params.p_data_src = &union_data; 636 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 637 638 /* if mcp fails to respond we must abort */ 639 if (rc) { 640 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 641 return rc; 642 } 643 644 /* Reset the link status if needed */ 645 if (!b_up) 646 qed_mcp_handle_link_change(p_hwfn, p_ptt, true); 647 648 return 0; 649 } 650 651 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, 652 struct qed_ptt *p_ptt) 653 { 654 struct qed_mcp_info *info = p_hwfn->mcp_info; 655 int rc = 0; 656 bool found = false; 657 u16 i; 658 659 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n"); 660 661 /* Read Messages from MFW */ 662 qed_mcp_read_mb(p_hwfn, p_ptt); 663 664 /* Compare current messages to old ones */ 665 for (i = 0; i < info->mfw_mb_length; i++) { 666 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) 667 continue; 668 669 found = true; 670 671 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 672 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", 673 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); 674 675 switch (i) { 676 case MFW_DRV_MSG_LINK_CHANGE: 677 qed_mcp_handle_link_change(p_hwfn, p_ptt, false); 678 break; 679 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: 680 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); 681 break; 682 default: 683 DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i); 684 rc = -EINVAL; 685 } 686 } 687 688 /* ACK everything */ 689 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { 690 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]); 691 692 /* MFW expect answer in BE, so we force write in that format */ 693 qed_wr(p_hwfn, p_ptt, 694 info->mfw_mb_addr + sizeof(u32) + 695 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * 696 sizeof(u32) + i * sizeof(u32), 697 (__force u32)val); 698 } 699 700 if (!found) { 701 DP_NOTICE(p_hwfn, 702 "Received an MFW message indication but no new message!\n"); 703 rc = -EINVAL; 704 } 705 706 /* Copy the new mfw messages into the shadow */ 707 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); 708 709 return rc; 710 } 711 712 int qed_mcp_get_mfw_ver(struct qed_dev *cdev, 713 u32 *p_mfw_ver) 714 { 715 struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; 716 struct qed_ptt *p_ptt; 717 u32 global_offsize; 718 719 p_ptt = qed_ptt_acquire(p_hwfn); 720 if (!p_ptt) 721 return -EBUSY; 722 723 global_offsize = qed_rd(p_hwfn, p_ptt, 724 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> 725 public_base, 726 PUBLIC_GLOBAL)); 727 *p_mfw_ver = qed_rd(p_hwfn, p_ptt, 728 SECTION_ADDR(global_offsize, 0) + 729 offsetof(struct public_global, mfw_ver)); 730 731 qed_ptt_release(p_hwfn, p_ptt); 732 733 return 0; 734 } 735 736 int qed_mcp_get_media_type(struct qed_dev *cdev, 737 u32 *p_media_type) 738 { 739 struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; 740 struct qed_ptt *p_ptt; 741 742 if (!qed_mcp_is_init(p_hwfn)) { 743 DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); 744 return -EBUSY; 745 } 746 747 *p_media_type = MEDIA_UNSPECIFIED; 748 749 p_ptt = qed_ptt_acquire(p_hwfn); 750 if (!p_ptt) 751 return -EBUSY; 752 753 *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 754 offsetof(struct public_port, media_type)); 755 756 qed_ptt_release(p_hwfn, p_ptt); 757 758 return 0; 759 } 760 761 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, 762 struct qed_ptt *p_ptt, 763 struct public_func *p_data, 764 int pfid) 765 { 766 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 767 PUBLIC_FUNC); 768 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); 769 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); 770 u32 i, size; 771 772 memset(p_data, 0, sizeof(*p_data)); 773 774 size = min_t(u32, sizeof(*p_data), 775 QED_SECTION_SIZE(mfw_path_offsize)); 776 for (i = 0; i < size / sizeof(u32); i++) 777 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, 778 func_addr + (i << 2)); 779 780 return size; 781 } 782 783 static int 784 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, 785 struct public_func *p_info, 786 enum qed_pci_personality *p_proto) 787 { 788 int rc = 0; 789 790 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { 791 case FUNC_MF_CFG_PROTOCOL_ETHERNET: 792 *p_proto = QED_PCI_ETH; 793 break; 794 default: 795 rc = -EINVAL; 796 } 797 798 return rc; 799 } 800 801 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, 802 struct qed_ptt *p_ptt) 803 { 804 struct qed_mcp_function_info *info; 805 struct public_func shmem_info; 806 807 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 808 MCP_PF_ID(p_hwfn)); 809 info = &p_hwfn->mcp_info->func_info; 810 811 info->pause_on_host = (shmem_info.config & 812 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; 813 814 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, 815 &info->protocol)) { 816 DP_ERR(p_hwfn, "Unknown personality %08x\n", 817 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); 818 return -EINVAL; 819 } 820 821 822 info->bandwidth_min = (shmem_info.config & 823 FUNC_MF_CFG_MIN_BW_MASK) >> 824 FUNC_MF_CFG_MIN_BW_SHIFT; 825 if (info->bandwidth_min < 1 || info->bandwidth_min > 100) { 826 DP_INFO(p_hwfn, 827 "bandwidth minimum out of bounds [%02x]. Set to 1\n", 828 info->bandwidth_min); 829 info->bandwidth_min = 1; 830 } 831 832 info->bandwidth_max = (shmem_info.config & 833 FUNC_MF_CFG_MAX_BW_MASK) >> 834 FUNC_MF_CFG_MAX_BW_SHIFT; 835 if (info->bandwidth_max < 1 || info->bandwidth_max > 100) { 836 DP_INFO(p_hwfn, 837 "bandwidth maximum out of bounds [%02x]. Set to 100\n", 838 info->bandwidth_max); 839 info->bandwidth_max = 100; 840 } 841 842 if (shmem_info.mac_upper || shmem_info.mac_lower) { 843 info->mac[0] = (u8)(shmem_info.mac_upper >> 8); 844 info->mac[1] = (u8)(shmem_info.mac_upper); 845 info->mac[2] = (u8)(shmem_info.mac_lower >> 24); 846 info->mac[3] = (u8)(shmem_info.mac_lower >> 16); 847 info->mac[4] = (u8)(shmem_info.mac_lower >> 8); 848 info->mac[5] = (u8)(shmem_info.mac_lower); 849 } else { 850 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n"); 851 } 852 853 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper | 854 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32); 855 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper | 856 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32); 857 858 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); 859 860 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP), 861 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n", 862 info->pause_on_host, info->protocol, 863 info->bandwidth_min, info->bandwidth_max, 864 info->mac[0], info->mac[1], info->mac[2], 865 info->mac[3], info->mac[4], info->mac[5], 866 info->wwn_port, info->wwn_node, info->ovlan); 867 868 return 0; 869 } 870 871 struct qed_mcp_link_params 872 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn) 873 { 874 if (!p_hwfn || !p_hwfn->mcp_info) 875 return NULL; 876 return &p_hwfn->mcp_info->link_input; 877 } 878 879 struct qed_mcp_link_state 880 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn) 881 { 882 if (!p_hwfn || !p_hwfn->mcp_info) 883 return NULL; 884 return &p_hwfn->mcp_info->link_output; 885 } 886 887 struct qed_mcp_link_capabilities 888 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn) 889 { 890 if (!p_hwfn || !p_hwfn->mcp_info) 891 return NULL; 892 return &p_hwfn->mcp_info->link_capabilities; 893 } 894 895 int qed_mcp_drain(struct qed_hwfn *p_hwfn, 896 struct qed_ptt *p_ptt) 897 { 898 u32 resp = 0, param = 0; 899 int rc; 900 901 rc = qed_mcp_cmd(p_hwfn, p_ptt, 902 DRV_MSG_CODE_NIG_DRAIN, 1000, 903 &resp, ¶m); 904 905 /* Wait for the drain to complete before returning */ 906 msleep(1020); 907 908 return rc; 909 } 910 911 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, 912 struct qed_ptt *p_ptt, 913 u32 *p_flash_size) 914 { 915 u32 flash_size; 916 917 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); 918 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> 919 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; 920 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT)); 921 922 *p_flash_size = flash_size; 923 924 return 0; 925 } 926 927 int 928 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, 929 struct qed_ptt *p_ptt, 930 struct qed_mcp_drv_version *p_ver) 931 { 932 struct drv_version_stc *p_drv_version; 933 struct qed_mcp_mb_params mb_params; 934 union drv_union_data union_data; 935 __be32 val; 936 u32 i; 937 int rc; 938 939 p_drv_version = &union_data.drv_version; 940 p_drv_version->version = p_ver->version; 941 for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) { 942 val = cpu_to_be32(p_ver->name[i]); 943 *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val; 944 } 945 946 memset(&mb_params, 0, sizeof(mb_params)); 947 mb_params.cmd = DRV_MSG_CODE_SET_VERSION; 948 mb_params.p_data_src = &union_data; 949 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 950 if (rc) 951 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 952 953 return rc; 954 } 955 956 int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 957 enum qed_led_mode mode) 958 { 959 u32 resp = 0, param = 0, drv_mb_param; 960 int rc; 961 962 switch (mode) { 963 case QED_LED_MODE_ON: 964 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; 965 break; 966 case QED_LED_MODE_OFF: 967 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; 968 break; 969 case QED_LED_MODE_RESTORE: 970 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; 971 break; 972 default: 973 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode); 974 return -EINVAL; 975 } 976 977 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, 978 drv_mb_param, &resp, ¶m); 979 980 return rc; 981 } 982