1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2008-2011 Solarflare Communications Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published 7 * by the Free Software Foundation, incorporated herein by reference. 8 */ 9 10 #include <linux/delay.h> 11 #include "net_driver.h" 12 #include "nic.h" 13 #include "io.h" 14 #include "regs.h" 15 #include "mcdi_pcol.h" 16 #include "phy.h" 17 18 /************************************************************************** 19 * 20 * Management-Controller-to-Driver Interface 21 * 22 ************************************************************************** 23 */ 24 25 #define MCDI_RPC_TIMEOUT 10 /*seconds */ 26 27 #define MCDI_PDU(efx) \ 28 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST) 29 #define MCDI_DOORBELL(efx) \ 30 (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST) 31 #define MCDI_STATUS(efx) \ 32 (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST) 33 34 /* A reboot/assertion causes the MCDI status word to be set after the 35 * command word is set or a REBOOT event is sent. If we notice a reboot 36 * via these mechanisms then wait 10ms for the status word to be set. */ 37 #define MCDI_STATUS_DELAY_US 100 38 #define MCDI_STATUS_DELAY_COUNT 100 39 #define MCDI_STATUS_SLEEP_MS \ 40 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) 41 42 #define SEQ_MASK \ 43 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) 44 45 static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) 46 { 47 struct siena_nic_data *nic_data; 48 EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 49 nic_data = efx->nic_data; 50 return &nic_data->mcdi; 51 } 52 53 void efx_mcdi_init(struct efx_nic *efx) 54 { 55 struct efx_mcdi_iface *mcdi; 56 57 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 58 return; 59 60 mcdi = efx_mcdi(efx); 61 init_waitqueue_head(&mcdi->wq); 62 spin_lock_init(&mcdi->iface_lock); 63 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); 64 mcdi->mode = MCDI_MODE_POLL; 65 66 (void) efx_mcdi_poll_reboot(efx); 67 } 68 69 static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, 70 const u8 *inbuf, size_t inlen) 71 { 72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); 75 unsigned int i; 76 efx_dword_t hdr; 77 u32 xflags, seqno; 78 79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 80 BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN); 81 82 seqno = mcdi->seqno & SEQ_MASK; 83 xflags = 0; 84 if (mcdi->mode == MCDI_MODE_EVENTS) 85 xflags |= MCDI_HEADER_XFLAGS_EVREQ; 86 87 EFX_POPULATE_DWORD_6(hdr, 88 MCDI_HEADER_RESPONSE, 0, 89 MCDI_HEADER_RESYNC, 1, 90 MCDI_HEADER_CODE, cmd, 91 MCDI_HEADER_DATALEN, inlen, 92 MCDI_HEADER_SEQ, seqno, 93 MCDI_HEADER_XFLAGS, xflags); 94 95 efx_writed(efx, &hdr, pdu); 96 97 for (i = 0; i < inlen; i += 4) 98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); 99 100 /* Ensure the payload is written out before the header */ 101 wmb(); 102 103 /* ring the doorbell with a distinctive value */ 104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 105 } 106 107 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 108 { 109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 110 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 111 int i; 112 113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 114 BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN); 115 116 for (i = 0; i < outlen; i += 4) 117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 118 } 119 120 static int efx_mcdi_poll(struct efx_nic *efx) 121 { 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 123 unsigned int time, finish; 124 unsigned int respseq, respcmd, error; 125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 126 unsigned int rc, spins; 127 efx_dword_t reg; 128 129 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ 130 rc = -efx_mcdi_poll_reboot(efx); 131 if (rc) 132 goto out; 133 134 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, 135 * because generally mcdi responses are fast. After that, back off 136 * and poll once a jiffy (approximately) 137 */ 138 spins = TICK_USEC; 139 finish = get_seconds() + MCDI_RPC_TIMEOUT; 140 141 while (1) { 142 if (spins != 0) { 143 --spins; 144 udelay(1); 145 } else { 146 schedule_timeout_uninterruptible(1); 147 } 148 149 time = get_seconds(); 150 151 rmb(); 152 efx_readd(efx, ®, pdu); 153 154 /* All 1's indicates that shared memory is in reset (and is 155 * not a valid header). Wait for it to come out reset before 156 * completing the command */ 157 if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff && 158 EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) 159 break; 160 161 if (time >= finish) 162 return -ETIMEDOUT; 163 } 164 165 mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); 166 respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); 167 respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); 168 error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); 169 170 if (error && mcdi->resplen == 0) { 171 netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); 172 rc = EIO; 173 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { 174 netif_err(efx, hw, efx->net_dev, 175 "MC response mismatch tx seq 0x%x rx seq 0x%x\n", 176 respseq, mcdi->seqno); 177 rc = EIO; 178 } else if (error) { 179 efx_readd(efx, ®, pdu + 4); 180 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 181 #define TRANSLATE_ERROR(name) \ 182 case MC_CMD_ERR_ ## name: \ 183 rc = name; \ 184 break 185 TRANSLATE_ERROR(ENOENT); 186 TRANSLATE_ERROR(EINTR); 187 TRANSLATE_ERROR(EACCES); 188 TRANSLATE_ERROR(EBUSY); 189 TRANSLATE_ERROR(EINVAL); 190 TRANSLATE_ERROR(EDEADLK); 191 TRANSLATE_ERROR(ENOSYS); 192 TRANSLATE_ERROR(ETIME); 193 #undef TRANSLATE_ERROR 194 default: 195 rc = EIO; 196 break; 197 } 198 } else 199 rc = 0; 200 201 out: 202 mcdi->resprc = rc; 203 if (rc) 204 mcdi->resplen = 0; 205 206 /* Return rc=0 like wait_event_timeout() */ 207 return 0; 208 } 209 210 /* Test and clear MC-rebooted flag for this port/function */ 211 int efx_mcdi_poll_reboot(struct efx_nic *efx) 212 { 213 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); 214 efx_dword_t reg; 215 uint32_t value; 216 217 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 218 return false; 219 220 efx_readd(efx, ®, addr); 221 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 222 223 if (value == 0) 224 return 0; 225 226 EFX_ZERO_DWORD(reg); 227 efx_writed(efx, ®, addr); 228 229 if (value == MC_STATUS_DWORD_ASSERT) 230 return -EINTR; 231 else 232 return -EIO; 233 } 234 235 static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) 236 { 237 /* Wait until the interface becomes QUIESCENT and we win the race 238 * to mark it RUNNING. */ 239 wait_event(mcdi->wq, 240 atomic_cmpxchg(&mcdi->state, 241 MCDI_STATE_QUIESCENT, 242 MCDI_STATE_RUNNING) 243 == MCDI_STATE_QUIESCENT); 244 } 245 246 static int efx_mcdi_await_completion(struct efx_nic *efx) 247 { 248 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 249 250 if (wait_event_timeout( 251 mcdi->wq, 252 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, 253 msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) 254 return -ETIMEDOUT; 255 256 /* Check if efx_mcdi_set_mode() switched us back to polled completions. 257 * In which case, poll for completions directly. If efx_mcdi_ev_cpl() 258 * completed the request first, then we'll just end up completing the 259 * request again, which is safe. 260 * 261 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which 262 * wait_event_timeout() implicitly provides. 263 */ 264 if (mcdi->mode == MCDI_MODE_POLL) 265 return efx_mcdi_poll(efx); 266 267 return 0; 268 } 269 270 static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) 271 { 272 /* If the interface is RUNNING, then move to COMPLETED and wake any 273 * waiters. If the interface isn't in RUNNING then we've received a 274 * duplicate completion after we've already transitioned back to 275 * QUIESCENT. [A subsequent invocation would increment seqno, so would 276 * have failed the seqno check]. 277 */ 278 if (atomic_cmpxchg(&mcdi->state, 279 MCDI_STATE_RUNNING, 280 MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { 281 wake_up(&mcdi->wq); 282 return true; 283 } 284 285 return false; 286 } 287 288 static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) 289 { 290 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); 291 wake_up(&mcdi->wq); 292 } 293 294 static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, 295 unsigned int datalen, unsigned int errno) 296 { 297 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 298 bool wake = false; 299 300 spin_lock(&mcdi->iface_lock); 301 302 if ((seqno ^ mcdi->seqno) & SEQ_MASK) { 303 if (mcdi->credits) 304 /* The request has been cancelled */ 305 --mcdi->credits; 306 else 307 netif_err(efx, hw, efx->net_dev, 308 "MC response mismatch tx seq 0x%x rx " 309 "seq 0x%x\n", seqno, mcdi->seqno); 310 } else { 311 mcdi->resprc = errno; 312 mcdi->resplen = datalen; 313 314 wake = true; 315 } 316 317 spin_unlock(&mcdi->iface_lock); 318 319 if (wake) 320 efx_mcdi_complete(mcdi); 321 } 322 323 /* Issue the given command by writing the data into the shared memory PDU, 324 * ring the doorbell and wait for completion. Copyout the result. */ 325 int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, 326 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, 327 size_t *outlen_actual) 328 { 329 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 330 int rc; 331 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 332 333 efx_mcdi_acquire(mcdi); 334 335 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ 336 spin_lock_bh(&mcdi->iface_lock); 337 ++mcdi->seqno; 338 spin_unlock_bh(&mcdi->iface_lock); 339 340 efx_mcdi_copyin(efx, cmd, inbuf, inlen); 341 342 if (mcdi->mode == MCDI_MODE_POLL) 343 rc = efx_mcdi_poll(efx); 344 else 345 rc = efx_mcdi_await_completion(efx); 346 347 if (rc != 0) { 348 /* Close the race with efx_mcdi_ev_cpl() executing just too late 349 * and completing a request we've just cancelled, by ensuring 350 * that the seqno check therein fails. 351 */ 352 spin_lock_bh(&mcdi->iface_lock); 353 ++mcdi->seqno; 354 ++mcdi->credits; 355 spin_unlock_bh(&mcdi->iface_lock); 356 357 netif_err(efx, hw, efx->net_dev, 358 "MC command 0x%x inlen %d mode %d timed out\n", 359 cmd, (int)inlen, mcdi->mode); 360 } else { 361 size_t resplen; 362 363 /* At the very least we need a memory barrier here to ensure 364 * we pick up changes from efx_mcdi_ev_cpl(). Protect against 365 * a spurious efx_mcdi_ev_cpl() running concurrently by 366 * acquiring the iface_lock. */ 367 spin_lock_bh(&mcdi->iface_lock); 368 rc = -mcdi->resprc; 369 resplen = mcdi->resplen; 370 spin_unlock_bh(&mcdi->iface_lock); 371 372 if (rc == 0) { 373 efx_mcdi_copyout(efx, outbuf, 374 min(outlen, mcdi->resplen + 3) & ~0x3); 375 if (outlen_actual != NULL) 376 *outlen_actual = resplen; 377 } else if (cmd == MC_CMD_REBOOT && rc == -EIO) 378 ; /* Don't reset if MC_CMD_REBOOT returns EIO */ 379 else if (rc == -EIO || rc == -EINTR) { 380 netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", 381 -rc); 382 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 383 } else 384 netif_dbg(efx, hw, efx->net_dev, 385 "MC command 0x%x inlen %d failed rc=%d\n", 386 cmd, (int)inlen, -rc); 387 388 if (rc == -EIO || rc == -EINTR) { 389 msleep(MCDI_STATUS_SLEEP_MS); 390 efx_mcdi_poll_reboot(efx); 391 } 392 } 393 394 efx_mcdi_release(mcdi); 395 return rc; 396 } 397 398 void efx_mcdi_mode_poll(struct efx_nic *efx) 399 { 400 struct efx_mcdi_iface *mcdi; 401 402 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 403 return; 404 405 mcdi = efx_mcdi(efx); 406 if (mcdi->mode == MCDI_MODE_POLL) 407 return; 408 409 /* We can switch from event completion to polled completion, because 410 * mcdi requests are always completed in shared memory. We do this by 411 * switching the mode to POLL'd then completing the request. 412 * efx_mcdi_await_completion() will then call efx_mcdi_poll(). 413 * 414 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), 415 * which efx_mcdi_complete() provides for us. 416 */ 417 mcdi->mode = MCDI_MODE_POLL; 418 419 efx_mcdi_complete(mcdi); 420 } 421 422 void efx_mcdi_mode_event(struct efx_nic *efx) 423 { 424 struct efx_mcdi_iface *mcdi; 425 426 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 427 return; 428 429 mcdi = efx_mcdi(efx); 430 431 if (mcdi->mode == MCDI_MODE_EVENTS) 432 return; 433 434 /* We can't switch from polled to event completion in the middle of a 435 * request, because the completion method is specified in the request. 436 * So acquire the interface to serialise the requestors. We don't need 437 * to acquire the iface_lock to change the mode here, but we do need a 438 * write memory barrier ensure that efx_mcdi_rpc() sees it, which 439 * efx_mcdi_acquire() provides. 440 */ 441 efx_mcdi_acquire(mcdi); 442 mcdi->mode = MCDI_MODE_EVENTS; 443 efx_mcdi_release(mcdi); 444 } 445 446 static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) 447 { 448 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 449 450 /* If there is an outstanding MCDI request, it has been terminated 451 * either by a BADASSERT or REBOOT event. If the mcdi interface is 452 * in polled mode, then do nothing because the MC reboot handler will 453 * set the header correctly. However, if the mcdi interface is waiting 454 * for a CMDDONE event it won't receive it [and since all MCDI events 455 * are sent to the same queue, we can't be racing with 456 * efx_mcdi_ev_cpl()] 457 * 458 * There's a race here with efx_mcdi_rpc(), because we might receive 459 * a REBOOT event *before* the request has been copied out. In polled 460 * mode (during startup) this is irrelevant, because efx_mcdi_complete() 461 * is ignored. In event mode, this condition is just an edge-case of 462 * receiving a REBOOT event after posting the MCDI request. Did the mc 463 * reboot before or after the copyout? The best we can do always is 464 * just return failure. 465 */ 466 spin_lock(&mcdi->iface_lock); 467 if (efx_mcdi_complete(mcdi)) { 468 if (mcdi->mode == MCDI_MODE_EVENTS) { 469 mcdi->resprc = rc; 470 mcdi->resplen = 0; 471 ++mcdi->credits; 472 } 473 } else { 474 int count; 475 476 /* Nobody was waiting for an MCDI request, so trigger a reset */ 477 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 478 479 /* Consume the status word since efx_mcdi_rpc_finish() won't */ 480 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { 481 if (efx_mcdi_poll_reboot(efx)) 482 break; 483 udelay(MCDI_STATUS_DELAY_US); 484 } 485 } 486 487 spin_unlock(&mcdi->iface_lock); 488 } 489 490 static unsigned int efx_mcdi_event_link_speed[] = { 491 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, 492 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, 493 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, 494 }; 495 496 497 static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) 498 { 499 u32 flags, fcntl, speed, lpa; 500 501 speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); 502 EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); 503 speed = efx_mcdi_event_link_speed[speed]; 504 505 flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); 506 fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); 507 lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); 508 509 /* efx->link_state is only modified by efx_mcdi_phy_get_link(), 510 * which is only run after flushing the event queues. Therefore, it 511 * is safe to modify the link state outside of the mac_lock here. 512 */ 513 efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); 514 515 efx_mcdi_phy_check_fcntl(efx, lpa); 516 517 efx_link_status_changed(efx); 518 } 519 520 /* Called from falcon_process_eventq for MCDI events */ 521 void efx_mcdi_process_event(struct efx_channel *channel, 522 efx_qword_t *event) 523 { 524 struct efx_nic *efx = channel->efx; 525 int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); 526 u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); 527 528 switch (code) { 529 case MCDI_EVENT_CODE_BADSSERT: 530 netif_err(efx, hw, efx->net_dev, 531 "MC watchdog or assertion failure at 0x%x\n", data); 532 efx_mcdi_ev_death(efx, EINTR); 533 break; 534 535 case MCDI_EVENT_CODE_PMNOTICE: 536 netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); 537 break; 538 539 case MCDI_EVENT_CODE_CMDDONE: 540 efx_mcdi_ev_cpl(efx, 541 MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), 542 MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), 543 MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); 544 break; 545 546 case MCDI_EVENT_CODE_LINKCHANGE: 547 efx_mcdi_process_link_change(efx, event); 548 break; 549 case MCDI_EVENT_CODE_SENSOREVT: 550 efx_mcdi_sensor_event(efx, event); 551 break; 552 case MCDI_EVENT_CODE_SCHEDERR: 553 netif_info(efx, hw, efx->net_dev, 554 "MC Scheduler error address=0x%x\n", data); 555 break; 556 case MCDI_EVENT_CODE_REBOOT: 557 netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); 558 efx_mcdi_ev_death(efx, EIO); 559 break; 560 case MCDI_EVENT_CODE_MAC_STATS_DMA: 561 /* MAC stats are gather lazily. We can ignore this. */ 562 break; 563 case MCDI_EVENT_CODE_FLR: 564 efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); 565 break; 566 567 default: 568 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", 569 code); 570 } 571 } 572 573 /************************************************************************** 574 * 575 * Specific request functions 576 * 577 ************************************************************************** 578 */ 579 580 void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) 581 { 582 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)]; 583 size_t outlength; 584 const __le16 *ver_words; 585 int rc; 586 587 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); 588 589 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, 590 outbuf, sizeof(outbuf), &outlength); 591 if (rc) 592 goto fail; 593 594 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { 595 rc = -EIO; 596 goto fail; 597 } 598 599 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); 600 snprintf(buf, len, "%u.%u.%u.%u", 601 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), 602 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); 603 return; 604 605 fail: 606 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 607 buf[0] = 0; 608 } 609 610 int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 611 bool *was_attached) 612 { 613 u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; 614 u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; 615 size_t outlen; 616 int rc; 617 618 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, 619 driver_operating ? 1 : 0); 620 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); 621 622 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), 623 outbuf, sizeof(outbuf), &outlen); 624 if (rc) 625 goto fail; 626 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { 627 rc = -EIO; 628 goto fail; 629 } 630 631 if (was_attached != NULL) 632 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); 633 return 0; 634 635 fail: 636 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 637 return rc; 638 } 639 640 int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 641 u16 *fw_subtype_list, u32 *capabilities) 642 { 643 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN]; 644 size_t outlen; 645 int port_num = efx_port_num(efx); 646 int offset; 647 int rc; 648 649 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); 650 651 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, 652 outbuf, sizeof(outbuf), &outlen); 653 if (rc) 654 goto fail; 655 656 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { 657 rc = -EIO; 658 goto fail; 659 } 660 661 offset = (port_num) 662 ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 663 : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; 664 if (mac_address) 665 memcpy(mac_address, outbuf + offset, ETH_ALEN); 666 if (fw_subtype_list) 667 memcpy(fw_subtype_list, 668 outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, 669 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM * 670 sizeof(fw_subtype_list[0])); 671 if (capabilities) { 672 if (port_num) 673 *capabilities = MCDI_DWORD(outbuf, 674 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); 675 else 676 *capabilities = MCDI_DWORD(outbuf, 677 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); 678 } 679 680 return 0; 681 682 fail: 683 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", 684 __func__, rc, (int)outlen); 685 686 return rc; 687 } 688 689 int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) 690 { 691 u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; 692 u32 dest = 0; 693 int rc; 694 695 if (uart) 696 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; 697 if (evq) 698 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; 699 700 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); 701 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); 702 703 BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); 704 705 rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), 706 NULL, 0, NULL); 707 if (rc) 708 goto fail; 709 710 return 0; 711 712 fail: 713 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 714 return rc; 715 } 716 717 int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) 718 { 719 u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; 720 size_t outlen; 721 int rc; 722 723 BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); 724 725 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, 726 outbuf, sizeof(outbuf), &outlen); 727 if (rc) 728 goto fail; 729 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { 730 rc = -EIO; 731 goto fail; 732 } 733 734 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); 735 return 0; 736 737 fail: 738 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 739 __func__, rc); 740 return rc; 741 } 742 743 int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, 744 size_t *size_out, size_t *erase_size_out, 745 bool *protected_out) 746 { 747 u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; 748 u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; 749 size_t outlen; 750 int rc; 751 752 MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); 753 754 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), 755 outbuf, sizeof(outbuf), &outlen); 756 if (rc) 757 goto fail; 758 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { 759 rc = -EIO; 760 goto fail; 761 } 762 763 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); 764 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); 765 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & 766 (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN)); 767 return 0; 768 769 fail: 770 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 771 return rc; 772 } 773 774 int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) 775 { 776 u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN]; 777 int rc; 778 779 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); 780 781 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); 782 783 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), 784 NULL, 0, NULL); 785 if (rc) 786 goto fail; 787 788 return 0; 789 790 fail: 791 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 792 return rc; 793 } 794 795 int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, 796 loff_t offset, u8 *buffer, size_t length) 797 { 798 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; 799 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; 800 size_t outlen; 801 int rc; 802 803 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); 804 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); 805 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); 806 807 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), 808 outbuf, sizeof(outbuf), &outlen); 809 if (rc) 810 goto fail; 811 812 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); 813 return 0; 814 815 fail: 816 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 817 return rc; 818 } 819 820 int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 821 loff_t offset, const u8 *buffer, size_t length) 822 { 823 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; 824 int rc; 825 826 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); 827 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); 828 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); 829 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); 830 831 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); 832 833 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, 834 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), 835 NULL, 0, NULL); 836 if (rc) 837 goto fail; 838 839 return 0; 840 841 fail: 842 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 843 return rc; 844 } 845 846 int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, 847 loff_t offset, size_t length) 848 { 849 u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN]; 850 int rc; 851 852 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); 853 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); 854 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); 855 856 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); 857 858 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), 859 NULL, 0, NULL); 860 if (rc) 861 goto fail; 862 863 return 0; 864 865 fail: 866 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 867 return rc; 868 } 869 870 int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) 871 { 872 u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN]; 873 int rc; 874 875 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); 876 877 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); 878 879 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), 880 NULL, 0, NULL); 881 if (rc) 882 goto fail; 883 884 return 0; 885 886 fail: 887 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 888 return rc; 889 } 890 891 static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) 892 { 893 u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; 894 u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; 895 int rc; 896 897 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); 898 899 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), 900 outbuf, sizeof(outbuf), NULL); 901 if (rc) 902 return rc; 903 904 switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { 905 case MC_CMD_NVRAM_TEST_PASS: 906 case MC_CMD_NVRAM_TEST_NOTSUPP: 907 return 0; 908 default: 909 return -EIO; 910 } 911 } 912 913 int efx_mcdi_nvram_test_all(struct efx_nic *efx) 914 { 915 u32 nvram_types; 916 unsigned int type; 917 int rc; 918 919 rc = efx_mcdi_nvram_types(efx, &nvram_types); 920 if (rc) 921 goto fail1; 922 923 type = 0; 924 while (nvram_types != 0) { 925 if (nvram_types & 1) { 926 rc = efx_mcdi_nvram_test(efx, type); 927 if (rc) 928 goto fail2; 929 } 930 type++; 931 nvram_types >>= 1; 932 } 933 934 return 0; 935 936 fail2: 937 netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", 938 __func__, type); 939 fail1: 940 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 941 return rc; 942 } 943 944 static int efx_mcdi_read_assertion(struct efx_nic *efx) 945 { 946 u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; 947 u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; 948 unsigned int flags, index, ofst; 949 const char *reason; 950 size_t outlen; 951 int retry; 952 int rc; 953 954 /* Attempt to read any stored assertion state before we reboot 955 * the mcfw out of the assertion handler. Retry twice, once 956 * because a boot-time assertion might cause this command to fail 957 * with EINTR. And once again because GET_ASSERTS can race with 958 * MC_CMD_REBOOT running on the other port. */ 959 retry = 2; 960 do { 961 MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); 962 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, 963 inbuf, MC_CMD_GET_ASSERTS_IN_LEN, 964 outbuf, sizeof(outbuf), &outlen); 965 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); 966 967 if (rc) 968 return rc; 969 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) 970 return -EIO; 971 972 /* Print out any recorded assertion state */ 973 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); 974 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) 975 return 0; 976 977 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) 978 ? "system-level assertion" 979 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) 980 ? "thread-level assertion" 981 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) 982 ? "watchdog reset" 983 : "unknown assertion"; 984 netif_err(efx, hw, efx->net_dev, 985 "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, 986 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), 987 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); 988 989 /* Print out the registers */ 990 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; 991 for (index = 1; index < 32; index++) { 992 netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, 993 MCDI_DWORD2(outbuf, ofst)); 994 ofst += sizeof(efx_dword_t); 995 } 996 997 return 0; 998 } 999 1000 static void efx_mcdi_exit_assertion(struct efx_nic *efx) 1001 { 1002 u8 inbuf[MC_CMD_REBOOT_IN_LEN]; 1003 1004 /* Atomically reboot the mcfw out of the assertion handler */ 1005 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1006 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 1007 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); 1008 efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, 1009 NULL, 0, NULL); 1010 } 1011 1012 int efx_mcdi_handle_assertion(struct efx_nic *efx) 1013 { 1014 int rc; 1015 1016 rc = efx_mcdi_read_assertion(efx); 1017 if (rc) 1018 return rc; 1019 1020 efx_mcdi_exit_assertion(efx); 1021 1022 return 0; 1023 } 1024 1025 void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) 1026 { 1027 u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; 1028 int rc; 1029 1030 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); 1031 BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); 1032 BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); 1033 1034 BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); 1035 1036 MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); 1037 1038 rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), 1039 NULL, 0, NULL); 1040 if (rc) 1041 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1042 __func__, rc); 1043 } 1044 1045 int efx_mcdi_reset_port(struct efx_nic *efx) 1046 { 1047 int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL); 1048 if (rc) 1049 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1050 __func__, rc); 1051 return rc; 1052 } 1053 1054 int efx_mcdi_reset_mc(struct efx_nic *efx) 1055 { 1056 u8 inbuf[MC_CMD_REBOOT_IN_LEN]; 1057 int rc; 1058 1059 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1060 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); 1061 rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), 1062 NULL, 0, NULL); 1063 /* White is black, and up is down */ 1064 if (rc == -EIO) 1065 return 0; 1066 if (rc == 0) 1067 rc = -EIO; 1068 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1069 return rc; 1070 } 1071 1072 static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, 1073 const u8 *mac, int *id_out) 1074 { 1075 u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; 1076 u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; 1077 size_t outlen; 1078 int rc; 1079 1080 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); 1081 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, 1082 MC_CMD_FILTER_MODE_SIMPLE); 1083 memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); 1084 1085 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), 1086 outbuf, sizeof(outbuf), &outlen); 1087 if (rc) 1088 goto fail; 1089 1090 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { 1091 rc = -EIO; 1092 goto fail; 1093 } 1094 1095 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); 1096 1097 return 0; 1098 1099 fail: 1100 *id_out = -1; 1101 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1102 return rc; 1103 1104 } 1105 1106 1107 int 1108 efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) 1109 { 1110 return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); 1111 } 1112 1113 1114 int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) 1115 { 1116 u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; 1117 size_t outlen; 1118 int rc; 1119 1120 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, 1121 outbuf, sizeof(outbuf), &outlen); 1122 if (rc) 1123 goto fail; 1124 1125 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { 1126 rc = -EIO; 1127 goto fail; 1128 } 1129 1130 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); 1131 1132 return 0; 1133 1134 fail: 1135 *id_out = -1; 1136 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1137 return rc; 1138 } 1139 1140 1141 int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) 1142 { 1143 u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; 1144 int rc; 1145 1146 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); 1147 1148 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), 1149 NULL, 0, NULL); 1150 if (rc) 1151 goto fail; 1152 1153 return 0; 1154 1155 fail: 1156 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1157 return rc; 1158 } 1159 1160 int efx_mcdi_flush_rxqs(struct efx_nic *efx) 1161 { 1162 struct efx_channel *channel; 1163 struct efx_rx_queue *rx_queue; 1164 __le32 *qid; 1165 int rc, count; 1166 1167 qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL); 1168 if (qid == NULL) 1169 return -ENOMEM; 1170 1171 count = 0; 1172 efx_for_each_channel(channel, efx) { 1173 efx_for_each_channel_rx_queue(rx_queue, channel) { 1174 if (rx_queue->flush_pending) { 1175 rx_queue->flush_pending = false; 1176 atomic_dec(&efx->rxq_flush_pending); 1177 qid[count++] = cpu_to_le32( 1178 efx_rx_queue_index(rx_queue)); 1179 } 1180 } 1181 } 1182 1183 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid, 1184 count * sizeof(*qid), NULL, 0, NULL); 1185 WARN_ON(rc > 0); 1186 1187 kfree(qid); 1188 1189 return rc; 1190 } 1191 1192 int efx_mcdi_wol_filter_reset(struct efx_nic *efx) 1193 { 1194 int rc; 1195 1196 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); 1197 if (rc) 1198 goto fail; 1199 1200 return 0; 1201 1202 fail: 1203 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1204 return rc; 1205 } 1206 1207