1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2008-2011 Solarflare Communications Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published 7 * by the Free Software Foundation, incorporated herein by reference. 8 */ 9 10 #include <linux/delay.h> 11 #include "net_driver.h" 12 #include "nic.h" 13 #include "io.h" 14 #include "regs.h" 15 #include "mcdi_pcol.h" 16 #include "phy.h" 17 18 /************************************************************************** 19 * 20 * Management-Controller-to-Driver Interface 21 * 22 ************************************************************************** 23 */ 24 25 #define MCDI_RPC_TIMEOUT (10 * HZ) 26 27 #define MCDI_PDU(efx) \ 28 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST) 29 #define MCDI_DOORBELL(efx) \ 30 (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST) 31 #define MCDI_STATUS(efx) \ 32 (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST) 33 34 /* A reboot/assertion causes the MCDI status word to be set after the 35 * command word is set or a REBOOT event is sent. If we notice a reboot 36 * via these mechanisms then wait 10ms for the status word to be set. */ 37 #define MCDI_STATUS_DELAY_US 100 38 #define MCDI_STATUS_DELAY_COUNT 100 39 #define MCDI_STATUS_SLEEP_MS \ 40 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) 41 42 #define SEQ_MASK \ 43 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) 44 45 static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) 46 { 47 struct siena_nic_data *nic_data; 48 EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 49 nic_data = efx->nic_data; 50 return &nic_data->mcdi; 51 } 52 53 void efx_mcdi_init(struct efx_nic *efx) 54 { 55 struct efx_mcdi_iface *mcdi; 56 57 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 58 return; 59 60 mcdi = efx_mcdi(efx); 61 init_waitqueue_head(&mcdi->wq); 62 spin_lock_init(&mcdi->iface_lock); 63 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); 64 mcdi->mode = MCDI_MODE_POLL; 65 66 (void) efx_mcdi_poll_reboot(efx); 67 } 68 69 static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, 70 const u8 *inbuf, size_t inlen) 71 { 72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); 75 unsigned int i; 76 efx_dword_t hdr; 77 u32 xflags, seqno; 78 79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 80 BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN); 81 82 seqno = mcdi->seqno & SEQ_MASK; 83 xflags = 0; 84 if (mcdi->mode == MCDI_MODE_EVENTS) 85 xflags |= MCDI_HEADER_XFLAGS_EVREQ; 86 87 EFX_POPULATE_DWORD_6(hdr, 88 MCDI_HEADER_RESPONSE, 0, 89 MCDI_HEADER_RESYNC, 1, 90 MCDI_HEADER_CODE, cmd, 91 MCDI_HEADER_DATALEN, inlen, 92 MCDI_HEADER_SEQ, seqno, 93 MCDI_HEADER_XFLAGS, xflags); 94 95 efx_writed(efx, &hdr, pdu); 96 97 for (i = 0; i < inlen; i += 4) 98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); 99 100 /* Ensure the payload is written out before the header */ 101 wmb(); 102 103 /* ring the doorbell with a distinctive value */ 104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 105 } 106 107 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 108 { 109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 110 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 111 int i; 112 113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 114 BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN); 115 116 for (i = 0; i < outlen; i += 4) 117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 118 } 119 120 static int efx_mcdi_poll(struct efx_nic *efx) 121 { 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 123 unsigned long time, finish; 124 unsigned int respseq, respcmd, error; 125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 126 unsigned int rc, spins; 127 efx_dword_t reg; 128 129 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ 130 rc = -efx_mcdi_poll_reboot(efx); 131 if (rc) 132 goto out; 133 134 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, 135 * because generally mcdi responses are fast. After that, back off 136 * and poll once a jiffy (approximately) 137 */ 138 spins = TICK_USEC; 139 finish = jiffies + MCDI_RPC_TIMEOUT; 140 141 while (1) { 142 if (spins != 0) { 143 --spins; 144 udelay(1); 145 } else { 146 schedule_timeout_uninterruptible(1); 147 } 148 149 time = jiffies; 150 151 rmb(); 152 efx_readd(efx, ®, pdu); 153 154 /* All 1's indicates that shared memory is in reset (and is 155 * not a valid header). Wait for it to come out reset before 156 * completing the command */ 157 if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff && 158 EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) 159 break; 160 161 if (time_after(time, finish)) 162 return -ETIMEDOUT; 163 } 164 165 mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); 166 respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); 167 respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); 168 error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); 169 170 if (error && mcdi->resplen == 0) { 171 netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); 172 rc = EIO; 173 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { 174 netif_err(efx, hw, efx->net_dev, 175 "MC response mismatch tx seq 0x%x rx seq 0x%x\n", 176 respseq, mcdi->seqno); 177 rc = EIO; 178 } else if (error) { 179 efx_readd(efx, ®, pdu + 4); 180 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 181 #define TRANSLATE_ERROR(name) \ 182 case MC_CMD_ERR_ ## name: \ 183 rc = name; \ 184 break 185 TRANSLATE_ERROR(ENOENT); 186 TRANSLATE_ERROR(EINTR); 187 TRANSLATE_ERROR(EACCES); 188 TRANSLATE_ERROR(EBUSY); 189 TRANSLATE_ERROR(EINVAL); 190 TRANSLATE_ERROR(EDEADLK); 191 TRANSLATE_ERROR(ENOSYS); 192 TRANSLATE_ERROR(ETIME); 193 #undef TRANSLATE_ERROR 194 default: 195 rc = EIO; 196 break; 197 } 198 } else 199 rc = 0; 200 201 out: 202 mcdi->resprc = rc; 203 if (rc) 204 mcdi->resplen = 0; 205 206 /* Return rc=0 like wait_event_timeout() */ 207 return 0; 208 } 209 210 /* Test and clear MC-rebooted flag for this port/function; reset 211 * software state as necessary. 212 */ 213 int efx_mcdi_poll_reboot(struct efx_nic *efx) 214 { 215 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); 216 efx_dword_t reg; 217 uint32_t value; 218 219 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 220 return false; 221 222 efx_readd(efx, ®, addr); 223 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 224 225 if (value == 0) 226 return 0; 227 228 /* MAC statistics have been cleared on the NIC; clear our copy 229 * so that efx_update_diff_stat() can continue to work. 230 */ 231 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); 232 233 EFX_ZERO_DWORD(reg); 234 efx_writed(efx, ®, addr); 235 236 if (value == MC_STATUS_DWORD_ASSERT) 237 return -EINTR; 238 else 239 return -EIO; 240 } 241 242 static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) 243 { 244 /* Wait until the interface becomes QUIESCENT and we win the race 245 * to mark it RUNNING. */ 246 wait_event(mcdi->wq, 247 atomic_cmpxchg(&mcdi->state, 248 MCDI_STATE_QUIESCENT, 249 MCDI_STATE_RUNNING) 250 == MCDI_STATE_QUIESCENT); 251 } 252 253 static int efx_mcdi_await_completion(struct efx_nic *efx) 254 { 255 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 256 257 if (wait_event_timeout( 258 mcdi->wq, 259 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, 260 MCDI_RPC_TIMEOUT) == 0) 261 return -ETIMEDOUT; 262 263 /* Check if efx_mcdi_set_mode() switched us back to polled completions. 264 * In which case, poll for completions directly. If efx_mcdi_ev_cpl() 265 * completed the request first, then we'll just end up completing the 266 * request again, which is safe. 267 * 268 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which 269 * wait_event_timeout() implicitly provides. 270 */ 271 if (mcdi->mode == MCDI_MODE_POLL) 272 return efx_mcdi_poll(efx); 273 274 return 0; 275 } 276 277 static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) 278 { 279 /* If the interface is RUNNING, then move to COMPLETED and wake any 280 * waiters. If the interface isn't in RUNNING then we've received a 281 * duplicate completion after we've already transitioned back to 282 * QUIESCENT. [A subsequent invocation would increment seqno, so would 283 * have failed the seqno check]. 284 */ 285 if (atomic_cmpxchg(&mcdi->state, 286 MCDI_STATE_RUNNING, 287 MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { 288 wake_up(&mcdi->wq); 289 return true; 290 } 291 292 return false; 293 } 294 295 static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) 296 { 297 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); 298 wake_up(&mcdi->wq); 299 } 300 301 static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, 302 unsigned int datalen, unsigned int errno) 303 { 304 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 305 bool wake = false; 306 307 spin_lock(&mcdi->iface_lock); 308 309 if ((seqno ^ mcdi->seqno) & SEQ_MASK) { 310 if (mcdi->credits) 311 /* The request has been cancelled */ 312 --mcdi->credits; 313 else 314 netif_err(efx, hw, efx->net_dev, 315 "MC response mismatch tx seq 0x%x rx " 316 "seq 0x%x\n", seqno, mcdi->seqno); 317 } else { 318 mcdi->resprc = errno; 319 mcdi->resplen = datalen; 320 321 wake = true; 322 } 323 324 spin_unlock(&mcdi->iface_lock); 325 326 if (wake) 327 efx_mcdi_complete(mcdi); 328 } 329 330 int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, 331 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, 332 size_t *outlen_actual) 333 { 334 efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); 335 return efx_mcdi_rpc_finish(efx, cmd, inlen, 336 outbuf, outlen, outlen_actual); 337 } 338 339 void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, 340 size_t inlen) 341 { 342 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 343 344 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 345 346 efx_mcdi_acquire(mcdi); 347 348 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ 349 spin_lock_bh(&mcdi->iface_lock); 350 ++mcdi->seqno; 351 spin_unlock_bh(&mcdi->iface_lock); 352 353 efx_mcdi_copyin(efx, cmd, inbuf, inlen); 354 } 355 356 int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, 357 u8 *outbuf, size_t outlen, size_t *outlen_actual) 358 { 359 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 360 int rc; 361 362 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 363 364 if (mcdi->mode == MCDI_MODE_POLL) 365 rc = efx_mcdi_poll(efx); 366 else 367 rc = efx_mcdi_await_completion(efx); 368 369 if (rc != 0) { 370 /* Close the race with efx_mcdi_ev_cpl() executing just too late 371 * and completing a request we've just cancelled, by ensuring 372 * that the seqno check therein fails. 373 */ 374 spin_lock_bh(&mcdi->iface_lock); 375 ++mcdi->seqno; 376 ++mcdi->credits; 377 spin_unlock_bh(&mcdi->iface_lock); 378 379 netif_err(efx, hw, efx->net_dev, 380 "MC command 0x%x inlen %d mode %d timed out\n", 381 cmd, (int)inlen, mcdi->mode); 382 } else { 383 size_t resplen; 384 385 /* At the very least we need a memory barrier here to ensure 386 * we pick up changes from efx_mcdi_ev_cpl(). Protect against 387 * a spurious efx_mcdi_ev_cpl() running concurrently by 388 * acquiring the iface_lock. */ 389 spin_lock_bh(&mcdi->iface_lock); 390 rc = -mcdi->resprc; 391 resplen = mcdi->resplen; 392 spin_unlock_bh(&mcdi->iface_lock); 393 394 if (rc == 0) { 395 efx_mcdi_copyout(efx, outbuf, 396 min(outlen, mcdi->resplen + 3) & ~0x3); 397 if (outlen_actual != NULL) 398 *outlen_actual = resplen; 399 } else if (cmd == MC_CMD_REBOOT && rc == -EIO) 400 ; /* Don't reset if MC_CMD_REBOOT returns EIO */ 401 else if (rc == -EIO || rc == -EINTR) { 402 netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", 403 -rc); 404 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 405 } else 406 netif_dbg(efx, hw, efx->net_dev, 407 "MC command 0x%x inlen %d failed rc=%d\n", 408 cmd, (int)inlen, -rc); 409 410 if (rc == -EIO || rc == -EINTR) { 411 msleep(MCDI_STATUS_SLEEP_MS); 412 efx_mcdi_poll_reboot(efx); 413 } 414 } 415 416 efx_mcdi_release(mcdi); 417 return rc; 418 } 419 420 void efx_mcdi_mode_poll(struct efx_nic *efx) 421 { 422 struct efx_mcdi_iface *mcdi; 423 424 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 425 return; 426 427 mcdi = efx_mcdi(efx); 428 if (mcdi->mode == MCDI_MODE_POLL) 429 return; 430 431 /* We can switch from event completion to polled completion, because 432 * mcdi requests are always completed in shared memory. We do this by 433 * switching the mode to POLL'd then completing the request. 434 * efx_mcdi_await_completion() will then call efx_mcdi_poll(). 435 * 436 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), 437 * which efx_mcdi_complete() provides for us. 438 */ 439 mcdi->mode = MCDI_MODE_POLL; 440 441 efx_mcdi_complete(mcdi); 442 } 443 444 void efx_mcdi_mode_event(struct efx_nic *efx) 445 { 446 struct efx_mcdi_iface *mcdi; 447 448 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 449 return; 450 451 mcdi = efx_mcdi(efx); 452 453 if (mcdi->mode == MCDI_MODE_EVENTS) 454 return; 455 456 /* We can't switch from polled to event completion in the middle of a 457 * request, because the completion method is specified in the request. 458 * So acquire the interface to serialise the requestors. We don't need 459 * to acquire the iface_lock to change the mode here, but we do need a 460 * write memory barrier ensure that efx_mcdi_rpc() sees it, which 461 * efx_mcdi_acquire() provides. 462 */ 463 efx_mcdi_acquire(mcdi); 464 mcdi->mode = MCDI_MODE_EVENTS; 465 efx_mcdi_release(mcdi); 466 } 467 468 static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) 469 { 470 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 471 472 /* If there is an outstanding MCDI request, it has been terminated 473 * either by a BADASSERT or REBOOT event. If the mcdi interface is 474 * in polled mode, then do nothing because the MC reboot handler will 475 * set the header correctly. However, if the mcdi interface is waiting 476 * for a CMDDONE event it won't receive it [and since all MCDI events 477 * are sent to the same queue, we can't be racing with 478 * efx_mcdi_ev_cpl()] 479 * 480 * There's a race here with efx_mcdi_rpc(), because we might receive 481 * a REBOOT event *before* the request has been copied out. In polled 482 * mode (during startup) this is irrelevant, because efx_mcdi_complete() 483 * is ignored. In event mode, this condition is just an edge-case of 484 * receiving a REBOOT event after posting the MCDI request. Did the mc 485 * reboot before or after the copyout? The best we can do always is 486 * just return failure. 487 */ 488 spin_lock(&mcdi->iface_lock); 489 if (efx_mcdi_complete(mcdi)) { 490 if (mcdi->mode == MCDI_MODE_EVENTS) { 491 mcdi->resprc = rc; 492 mcdi->resplen = 0; 493 ++mcdi->credits; 494 } 495 } else { 496 int count; 497 498 /* Nobody was waiting for an MCDI request, so trigger a reset */ 499 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 500 501 /* Consume the status word since efx_mcdi_rpc_finish() won't */ 502 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { 503 if (efx_mcdi_poll_reboot(efx)) 504 break; 505 udelay(MCDI_STATUS_DELAY_US); 506 } 507 } 508 509 spin_unlock(&mcdi->iface_lock); 510 } 511 512 static unsigned int efx_mcdi_event_link_speed[] = { 513 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, 514 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, 515 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, 516 }; 517 518 519 static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) 520 { 521 u32 flags, fcntl, speed, lpa; 522 523 speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); 524 EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); 525 speed = efx_mcdi_event_link_speed[speed]; 526 527 flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); 528 fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); 529 lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); 530 531 /* efx->link_state is only modified by efx_mcdi_phy_get_link(), 532 * which is only run after flushing the event queues. Therefore, it 533 * is safe to modify the link state outside of the mac_lock here. 534 */ 535 efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); 536 537 efx_mcdi_phy_check_fcntl(efx, lpa); 538 539 efx_link_status_changed(efx); 540 } 541 542 /* Called from falcon_process_eventq for MCDI events */ 543 void efx_mcdi_process_event(struct efx_channel *channel, 544 efx_qword_t *event) 545 { 546 struct efx_nic *efx = channel->efx; 547 int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); 548 u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); 549 550 switch (code) { 551 case MCDI_EVENT_CODE_BADSSERT: 552 netif_err(efx, hw, efx->net_dev, 553 "MC watchdog or assertion failure at 0x%x\n", data); 554 efx_mcdi_ev_death(efx, EINTR); 555 break; 556 557 case MCDI_EVENT_CODE_PMNOTICE: 558 netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); 559 break; 560 561 case MCDI_EVENT_CODE_CMDDONE: 562 efx_mcdi_ev_cpl(efx, 563 MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), 564 MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), 565 MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); 566 break; 567 568 case MCDI_EVENT_CODE_LINKCHANGE: 569 efx_mcdi_process_link_change(efx, event); 570 break; 571 case MCDI_EVENT_CODE_SENSOREVT: 572 efx_mcdi_sensor_event(efx, event); 573 break; 574 case MCDI_EVENT_CODE_SCHEDERR: 575 netif_info(efx, hw, efx->net_dev, 576 "MC Scheduler error address=0x%x\n", data); 577 break; 578 case MCDI_EVENT_CODE_REBOOT: 579 netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); 580 efx_mcdi_ev_death(efx, EIO); 581 break; 582 case MCDI_EVENT_CODE_MAC_STATS_DMA: 583 /* MAC stats are gather lazily. We can ignore this. */ 584 break; 585 case MCDI_EVENT_CODE_FLR: 586 efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); 587 break; 588 case MCDI_EVENT_CODE_PTP_RX: 589 case MCDI_EVENT_CODE_PTP_FAULT: 590 case MCDI_EVENT_CODE_PTP_PPS: 591 efx_ptp_event(efx, event); 592 break; 593 594 default: 595 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", 596 code); 597 } 598 } 599 600 /************************************************************************** 601 * 602 * Specific request functions 603 * 604 ************************************************************************** 605 */ 606 607 void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) 608 { 609 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)]; 610 size_t outlength; 611 const __le16 *ver_words; 612 int rc; 613 614 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); 615 616 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, 617 outbuf, sizeof(outbuf), &outlength); 618 if (rc) 619 goto fail; 620 621 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { 622 rc = -EIO; 623 goto fail; 624 } 625 626 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); 627 snprintf(buf, len, "%u.%u.%u.%u", 628 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), 629 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); 630 return; 631 632 fail: 633 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 634 buf[0] = 0; 635 } 636 637 int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 638 bool *was_attached) 639 { 640 u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; 641 u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; 642 size_t outlen; 643 int rc; 644 645 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, 646 driver_operating ? 1 : 0); 647 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); 648 649 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), 650 outbuf, sizeof(outbuf), &outlen); 651 if (rc) 652 goto fail; 653 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { 654 rc = -EIO; 655 goto fail; 656 } 657 658 if (was_attached != NULL) 659 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); 660 return 0; 661 662 fail: 663 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 664 return rc; 665 } 666 667 int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 668 u16 *fw_subtype_list, u32 *capabilities) 669 { 670 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX]; 671 size_t outlen, offset, i; 672 int port_num = efx_port_num(efx); 673 int rc; 674 675 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); 676 677 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, 678 outbuf, sizeof(outbuf), &outlen); 679 if (rc) 680 goto fail; 681 682 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { 683 rc = -EIO; 684 goto fail; 685 } 686 687 offset = (port_num) 688 ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 689 : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; 690 if (mac_address) 691 memcpy(mac_address, outbuf + offset, ETH_ALEN); 692 if (fw_subtype_list) { 693 /* Byte-swap and truncate or zero-pad as necessary */ 694 offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST; 695 for (i = 0; 696 i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; 697 i++) { 698 fw_subtype_list[i] = 699 (offset + 2 <= outlen) ? 700 le16_to_cpup((__le16 *)(outbuf + offset)) : 0; 701 offset += 2; 702 } 703 } 704 if (capabilities) { 705 if (port_num) 706 *capabilities = MCDI_DWORD(outbuf, 707 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); 708 else 709 *capabilities = MCDI_DWORD(outbuf, 710 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); 711 } 712 713 return 0; 714 715 fail: 716 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", 717 __func__, rc, (int)outlen); 718 719 return rc; 720 } 721 722 int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) 723 { 724 u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; 725 u32 dest = 0; 726 int rc; 727 728 if (uart) 729 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; 730 if (evq) 731 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; 732 733 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); 734 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); 735 736 BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); 737 738 rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), 739 NULL, 0, NULL); 740 if (rc) 741 goto fail; 742 743 return 0; 744 745 fail: 746 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 747 return rc; 748 } 749 750 int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) 751 { 752 u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; 753 size_t outlen; 754 int rc; 755 756 BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); 757 758 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, 759 outbuf, sizeof(outbuf), &outlen); 760 if (rc) 761 goto fail; 762 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { 763 rc = -EIO; 764 goto fail; 765 } 766 767 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); 768 return 0; 769 770 fail: 771 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 772 __func__, rc); 773 return rc; 774 } 775 776 int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, 777 size_t *size_out, size_t *erase_size_out, 778 bool *protected_out) 779 { 780 u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; 781 u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; 782 size_t outlen; 783 int rc; 784 785 MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); 786 787 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), 788 outbuf, sizeof(outbuf), &outlen); 789 if (rc) 790 goto fail; 791 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { 792 rc = -EIO; 793 goto fail; 794 } 795 796 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); 797 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); 798 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & 799 (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN)); 800 return 0; 801 802 fail: 803 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 804 return rc; 805 } 806 807 int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) 808 { 809 u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN]; 810 int rc; 811 812 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); 813 814 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); 815 816 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), 817 NULL, 0, NULL); 818 if (rc) 819 goto fail; 820 821 return 0; 822 823 fail: 824 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 825 return rc; 826 } 827 828 int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, 829 loff_t offset, u8 *buffer, size_t length) 830 { 831 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; 832 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; 833 size_t outlen; 834 int rc; 835 836 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); 837 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); 838 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); 839 840 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), 841 outbuf, sizeof(outbuf), &outlen); 842 if (rc) 843 goto fail; 844 845 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); 846 return 0; 847 848 fail: 849 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 850 return rc; 851 } 852 853 int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 854 loff_t offset, const u8 *buffer, size_t length) 855 { 856 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; 857 int rc; 858 859 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); 860 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); 861 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); 862 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); 863 864 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); 865 866 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, 867 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), 868 NULL, 0, NULL); 869 if (rc) 870 goto fail; 871 872 return 0; 873 874 fail: 875 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 876 return rc; 877 } 878 879 int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, 880 loff_t offset, size_t length) 881 { 882 u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN]; 883 int rc; 884 885 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); 886 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); 887 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); 888 889 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); 890 891 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), 892 NULL, 0, NULL); 893 if (rc) 894 goto fail; 895 896 return 0; 897 898 fail: 899 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 900 return rc; 901 } 902 903 int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) 904 { 905 u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN]; 906 int rc; 907 908 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); 909 910 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); 911 912 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), 913 NULL, 0, NULL); 914 if (rc) 915 goto fail; 916 917 return 0; 918 919 fail: 920 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 921 return rc; 922 } 923 924 static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) 925 { 926 u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; 927 u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; 928 int rc; 929 930 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); 931 932 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), 933 outbuf, sizeof(outbuf), NULL); 934 if (rc) 935 return rc; 936 937 switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { 938 case MC_CMD_NVRAM_TEST_PASS: 939 case MC_CMD_NVRAM_TEST_NOTSUPP: 940 return 0; 941 default: 942 return -EIO; 943 } 944 } 945 946 int efx_mcdi_nvram_test_all(struct efx_nic *efx) 947 { 948 u32 nvram_types; 949 unsigned int type; 950 int rc; 951 952 rc = efx_mcdi_nvram_types(efx, &nvram_types); 953 if (rc) 954 goto fail1; 955 956 type = 0; 957 while (nvram_types != 0) { 958 if (nvram_types & 1) { 959 rc = efx_mcdi_nvram_test(efx, type); 960 if (rc) 961 goto fail2; 962 } 963 type++; 964 nvram_types >>= 1; 965 } 966 967 return 0; 968 969 fail2: 970 netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", 971 __func__, type); 972 fail1: 973 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 974 return rc; 975 } 976 977 static int efx_mcdi_read_assertion(struct efx_nic *efx) 978 { 979 u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; 980 u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; 981 unsigned int flags, index, ofst; 982 const char *reason; 983 size_t outlen; 984 int retry; 985 int rc; 986 987 /* Attempt to read any stored assertion state before we reboot 988 * the mcfw out of the assertion handler. Retry twice, once 989 * because a boot-time assertion might cause this command to fail 990 * with EINTR. And once again because GET_ASSERTS can race with 991 * MC_CMD_REBOOT running on the other port. */ 992 retry = 2; 993 do { 994 MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); 995 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, 996 inbuf, MC_CMD_GET_ASSERTS_IN_LEN, 997 outbuf, sizeof(outbuf), &outlen); 998 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); 999 1000 if (rc) 1001 return rc; 1002 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) 1003 return -EIO; 1004 1005 /* Print out any recorded assertion state */ 1006 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); 1007 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) 1008 return 0; 1009 1010 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) 1011 ? "system-level assertion" 1012 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) 1013 ? "thread-level assertion" 1014 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) 1015 ? "watchdog reset" 1016 : "unknown assertion"; 1017 netif_err(efx, hw, efx->net_dev, 1018 "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, 1019 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), 1020 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); 1021 1022 /* Print out the registers */ 1023 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; 1024 for (index = 1; index < 32; index++) { 1025 netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, 1026 MCDI_DWORD2(outbuf, ofst)); 1027 ofst += sizeof(efx_dword_t); 1028 } 1029 1030 return 0; 1031 } 1032 1033 static void efx_mcdi_exit_assertion(struct efx_nic *efx) 1034 { 1035 u8 inbuf[MC_CMD_REBOOT_IN_LEN]; 1036 1037 /* If the MC is running debug firmware, it might now be 1038 * waiting for a debugger to attach, but we just want it to 1039 * reboot. We set a flag that makes the command a no-op if it 1040 * has already done so. We don't know what return code to 1041 * expect (0 or -EIO), so ignore it. 1042 */ 1043 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1044 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 1045 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); 1046 (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, 1047 NULL, 0, NULL); 1048 } 1049 1050 int efx_mcdi_handle_assertion(struct efx_nic *efx) 1051 { 1052 int rc; 1053 1054 rc = efx_mcdi_read_assertion(efx); 1055 if (rc) 1056 return rc; 1057 1058 efx_mcdi_exit_assertion(efx); 1059 1060 return 0; 1061 } 1062 1063 void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) 1064 { 1065 u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; 1066 int rc; 1067 1068 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); 1069 BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); 1070 BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); 1071 1072 BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); 1073 1074 MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); 1075 1076 rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), 1077 NULL, 0, NULL); 1078 if (rc) 1079 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1080 __func__, rc); 1081 } 1082 1083 int efx_mcdi_reset_port(struct efx_nic *efx) 1084 { 1085 int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL); 1086 if (rc) 1087 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1088 __func__, rc); 1089 return rc; 1090 } 1091 1092 int efx_mcdi_reset_mc(struct efx_nic *efx) 1093 { 1094 u8 inbuf[MC_CMD_REBOOT_IN_LEN]; 1095 int rc; 1096 1097 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1098 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); 1099 rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), 1100 NULL, 0, NULL); 1101 /* White is black, and up is down */ 1102 if (rc == -EIO) 1103 return 0; 1104 if (rc == 0) 1105 rc = -EIO; 1106 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1107 return rc; 1108 } 1109 1110 static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, 1111 const u8 *mac, int *id_out) 1112 { 1113 u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; 1114 u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; 1115 size_t outlen; 1116 int rc; 1117 1118 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); 1119 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, 1120 MC_CMD_FILTER_MODE_SIMPLE); 1121 memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); 1122 1123 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), 1124 outbuf, sizeof(outbuf), &outlen); 1125 if (rc) 1126 goto fail; 1127 1128 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { 1129 rc = -EIO; 1130 goto fail; 1131 } 1132 1133 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); 1134 1135 return 0; 1136 1137 fail: 1138 *id_out = -1; 1139 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1140 return rc; 1141 1142 } 1143 1144 1145 int 1146 efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) 1147 { 1148 return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); 1149 } 1150 1151 1152 int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) 1153 { 1154 u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; 1155 size_t outlen; 1156 int rc; 1157 1158 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, 1159 outbuf, sizeof(outbuf), &outlen); 1160 if (rc) 1161 goto fail; 1162 1163 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { 1164 rc = -EIO; 1165 goto fail; 1166 } 1167 1168 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); 1169 1170 return 0; 1171 1172 fail: 1173 *id_out = -1; 1174 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1175 return rc; 1176 } 1177 1178 1179 int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) 1180 { 1181 u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; 1182 int rc; 1183 1184 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); 1185 1186 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), 1187 NULL, 0, NULL); 1188 if (rc) 1189 goto fail; 1190 1191 return 0; 1192 1193 fail: 1194 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1195 return rc; 1196 } 1197 1198 int efx_mcdi_flush_rxqs(struct efx_nic *efx) 1199 { 1200 struct efx_channel *channel; 1201 struct efx_rx_queue *rx_queue; 1202 __le32 *qid; 1203 int rc, count; 1204 1205 BUILD_BUG_ON(EFX_MAX_CHANNELS > 1206 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); 1207 1208 qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL); 1209 if (qid == NULL) 1210 return -ENOMEM; 1211 1212 count = 0; 1213 efx_for_each_channel(channel, efx) { 1214 efx_for_each_channel_rx_queue(rx_queue, channel) { 1215 if (rx_queue->flush_pending) { 1216 rx_queue->flush_pending = false; 1217 atomic_dec(&efx->rxq_flush_pending); 1218 qid[count++] = cpu_to_le32( 1219 efx_rx_queue_index(rx_queue)); 1220 } 1221 } 1222 } 1223 1224 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid, 1225 count * sizeof(*qid), NULL, 0, NULL); 1226 WARN_ON(rc < 0); 1227 1228 kfree(qid); 1229 1230 return rc; 1231 } 1232 1233 int efx_mcdi_wol_filter_reset(struct efx_nic *efx) 1234 { 1235 int rc; 1236 1237 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); 1238 if (rc) 1239 goto fail; 1240 1241 return 0; 1242 1243 fail: 1244 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1245 return rc; 1246 } 1247 1248