1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2008-2013 Solarflare Communications Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published 7 * by the Free Software Foundation, incorporated herein by reference. 8 */ 9 10 #include <linux/delay.h> 11 #include <asm/cmpxchg.h> 12 #include "net_driver.h" 13 #include "nic.h" 14 #include "io.h" 15 #include "farch_regs.h" 16 #include "mcdi_pcol.h" 17 #include "phy.h" 18 19 /************************************************************************** 20 * 21 * Management-Controller-to-Driver Interface 22 * 23 ************************************************************************** 24 */ 25 26 #define MCDI_RPC_TIMEOUT (10 * HZ) 27 28 /* A reboot/assertion causes the MCDI status word to be set after the 29 * command word is set or a REBOOT event is sent. If we notice a reboot 30 * via these mechanisms then wait 250ms for the status word to be set. 31 */ 32 #define MCDI_STATUS_DELAY_US 100 33 #define MCDI_STATUS_DELAY_COUNT 2500 34 #define MCDI_STATUS_SLEEP_MS \ 35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) 36 37 #define SEQ_MASK \ 38 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) 39 40 struct efx_mcdi_async_param { 41 struct list_head list; 42 unsigned int cmd; 43 size_t inlen; 44 size_t outlen; 45 efx_mcdi_async_completer *complete; 46 unsigned long cookie; 47 /* followed by request/response buffer */ 48 }; 49 50 static void efx_mcdi_timeout_async(unsigned long context); 51 static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 52 bool *was_attached_out); 53 54 static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) 55 { 56 EFX_BUG_ON_PARANOID(!efx->mcdi); 57 return &efx->mcdi->iface; 58 } 59 60 int efx_mcdi_init(struct efx_nic *efx) 61 { 62 struct efx_mcdi_iface *mcdi; 63 bool already_attached; 64 int rc; 65 66 efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); 67 if (!efx->mcdi) 68 return -ENOMEM; 69 70 mcdi = efx_mcdi(efx); 71 mcdi->efx = efx; 72 init_waitqueue_head(&mcdi->wq); 73 spin_lock_init(&mcdi->iface_lock); 74 mcdi->state = MCDI_STATE_QUIESCENT; 75 mcdi->mode = MCDI_MODE_POLL; 76 spin_lock_init(&mcdi->async_lock); 77 INIT_LIST_HEAD(&mcdi->async_list); 78 setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async, 79 (unsigned long)mcdi); 80 81 (void) efx_mcdi_poll_reboot(efx); 82 mcdi->new_epoch = true; 83 84 /* Recover from a failed assertion before probing */ 85 rc = efx_mcdi_handle_assertion(efx); 86 if (rc) 87 return rc; 88 89 /* Let the MC (and BMC, if this is a LOM) know that the driver 90 * is loaded. We should do this before we reset the NIC. 91 */ 92 rc = efx_mcdi_drv_attach(efx, true, &already_attached); 93 if (rc) { 94 netif_err(efx, probe, efx->net_dev, 95 "Unable to register driver with MCPU\n"); 96 return rc; 97 } 98 if (already_attached) 99 /* Not a fatal error */ 100 netif_err(efx, probe, efx->net_dev, 101 "Host already registered with MCPU\n"); 102 103 return 0; 104 } 105 106 void efx_mcdi_fini(struct efx_nic *efx) 107 { 108 if (!efx->mcdi) 109 return; 110 111 BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT); 112 113 /* Relinquish the device (back to the BMC, if this is a LOM) */ 114 efx_mcdi_drv_attach(efx, false, NULL); 115 116 kfree(efx->mcdi); 117 } 118 119 static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, 120 const efx_dword_t *inbuf, size_t inlen) 121 { 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 123 efx_dword_t hdr[2]; 124 size_t hdr_len; 125 u32 xflags, seqno; 126 127 BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT); 128 129 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ 130 spin_lock_bh(&mcdi->iface_lock); 131 ++mcdi->seqno; 132 spin_unlock_bh(&mcdi->iface_lock); 133 134 seqno = mcdi->seqno & SEQ_MASK; 135 xflags = 0; 136 if (mcdi->mode == MCDI_MODE_EVENTS) 137 xflags |= MCDI_HEADER_XFLAGS_EVREQ; 138 139 if (efx->type->mcdi_max_ver == 1) { 140 /* MCDI v1 */ 141 EFX_POPULATE_DWORD_7(hdr[0], 142 MCDI_HEADER_RESPONSE, 0, 143 MCDI_HEADER_RESYNC, 1, 144 MCDI_HEADER_CODE, cmd, 145 MCDI_HEADER_DATALEN, inlen, 146 MCDI_HEADER_SEQ, seqno, 147 MCDI_HEADER_XFLAGS, xflags, 148 MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); 149 hdr_len = 4; 150 } else { 151 /* MCDI v2 */ 152 BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2); 153 EFX_POPULATE_DWORD_7(hdr[0], 154 MCDI_HEADER_RESPONSE, 0, 155 MCDI_HEADER_RESYNC, 1, 156 MCDI_HEADER_CODE, MC_CMD_V2_EXTN, 157 MCDI_HEADER_DATALEN, 0, 158 MCDI_HEADER_SEQ, seqno, 159 MCDI_HEADER_XFLAGS, xflags, 160 MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); 161 EFX_POPULATE_DWORD_2(hdr[1], 162 MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd, 163 MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen); 164 hdr_len = 8; 165 } 166 167 efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); 168 169 mcdi->new_epoch = false; 170 } 171 172 static int efx_mcdi_errno(unsigned int mcdi_err) 173 { 174 switch (mcdi_err) { 175 case 0: 176 return 0; 177 #define TRANSLATE_ERROR(name) \ 178 case MC_CMD_ERR_ ## name: \ 179 return -name; 180 TRANSLATE_ERROR(EPERM); 181 TRANSLATE_ERROR(ENOENT); 182 TRANSLATE_ERROR(EINTR); 183 TRANSLATE_ERROR(EAGAIN); 184 TRANSLATE_ERROR(EACCES); 185 TRANSLATE_ERROR(EBUSY); 186 TRANSLATE_ERROR(EINVAL); 187 TRANSLATE_ERROR(EDEADLK); 188 TRANSLATE_ERROR(ENOSYS); 189 TRANSLATE_ERROR(ETIME); 190 TRANSLATE_ERROR(EALREADY); 191 TRANSLATE_ERROR(ENOSPC); 192 #undef TRANSLATE_ERROR 193 case MC_CMD_ERR_ALLOC_FAIL: 194 return -ENOBUFS; 195 case MC_CMD_ERR_MAC_EXIST: 196 return -EADDRINUSE; 197 default: 198 return -EPROTO; 199 } 200 } 201 202 static void efx_mcdi_read_response_header(struct efx_nic *efx) 203 { 204 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 205 unsigned int respseq, respcmd, error; 206 efx_dword_t hdr; 207 208 efx->type->mcdi_read_response(efx, &hdr, 0, 4); 209 respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ); 210 respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE); 211 error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR); 212 213 if (respcmd != MC_CMD_V2_EXTN) { 214 mcdi->resp_hdr_len = 4; 215 mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN); 216 } else { 217 efx->type->mcdi_read_response(efx, &hdr, 4, 4); 218 mcdi->resp_hdr_len = 8; 219 mcdi->resp_data_len = 220 EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); 221 } 222 223 if (error && mcdi->resp_data_len == 0) { 224 netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); 225 mcdi->resprc = -EIO; 226 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { 227 netif_err(efx, hw, efx->net_dev, 228 "MC response mismatch tx seq 0x%x rx seq 0x%x\n", 229 respseq, mcdi->seqno); 230 mcdi->resprc = -EIO; 231 } else if (error) { 232 efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4); 233 mcdi->resprc = 234 efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0)); 235 } else { 236 mcdi->resprc = 0; 237 } 238 } 239 240 static int efx_mcdi_poll(struct efx_nic *efx) 241 { 242 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 243 unsigned long time, finish; 244 unsigned int spins; 245 int rc; 246 247 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ 248 rc = efx_mcdi_poll_reboot(efx); 249 if (rc) { 250 spin_lock_bh(&mcdi->iface_lock); 251 mcdi->resprc = rc; 252 mcdi->resp_hdr_len = 0; 253 mcdi->resp_data_len = 0; 254 spin_unlock_bh(&mcdi->iface_lock); 255 return 0; 256 } 257 258 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, 259 * because generally mcdi responses are fast. After that, back off 260 * and poll once a jiffy (approximately) 261 */ 262 spins = TICK_USEC; 263 finish = jiffies + MCDI_RPC_TIMEOUT; 264 265 while (1) { 266 if (spins != 0) { 267 --spins; 268 udelay(1); 269 } else { 270 schedule_timeout_uninterruptible(1); 271 } 272 273 time = jiffies; 274 275 rmb(); 276 if (efx->type->mcdi_poll_response(efx)) 277 break; 278 279 if (time_after(time, finish)) 280 return -ETIMEDOUT; 281 } 282 283 spin_lock_bh(&mcdi->iface_lock); 284 efx_mcdi_read_response_header(efx); 285 spin_unlock_bh(&mcdi->iface_lock); 286 287 /* Return rc=0 like wait_event_timeout() */ 288 return 0; 289 } 290 291 /* Test and clear MC-rebooted flag for this port/function; reset 292 * software state as necessary. 293 */ 294 int efx_mcdi_poll_reboot(struct efx_nic *efx) 295 { 296 if (!efx->mcdi) 297 return 0; 298 299 return efx->type->mcdi_poll_reboot(efx); 300 } 301 302 static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi) 303 { 304 return cmpxchg(&mcdi->state, 305 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) == 306 MCDI_STATE_QUIESCENT; 307 } 308 309 static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi) 310 { 311 /* Wait until the interface becomes QUIESCENT and we win the race 312 * to mark it RUNNING_SYNC. 313 */ 314 wait_event(mcdi->wq, 315 cmpxchg(&mcdi->state, 316 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) == 317 MCDI_STATE_QUIESCENT); 318 } 319 320 static int efx_mcdi_await_completion(struct efx_nic *efx) 321 { 322 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 323 324 if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED, 325 MCDI_RPC_TIMEOUT) == 0) 326 return -ETIMEDOUT; 327 328 /* Check if efx_mcdi_set_mode() switched us back to polled completions. 329 * In which case, poll for completions directly. If efx_mcdi_ev_cpl() 330 * completed the request first, then we'll just end up completing the 331 * request again, which is safe. 332 * 333 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which 334 * wait_event_timeout() implicitly provides. 335 */ 336 if (mcdi->mode == MCDI_MODE_POLL) 337 return efx_mcdi_poll(efx); 338 339 return 0; 340 } 341 342 /* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the 343 * requester. Return whether this was done. Does not take any locks. 344 */ 345 static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi) 346 { 347 if (cmpxchg(&mcdi->state, 348 MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) == 349 MCDI_STATE_RUNNING_SYNC) { 350 wake_up(&mcdi->wq); 351 return true; 352 } 353 354 return false; 355 } 356 357 static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) 358 { 359 if (mcdi->mode == MCDI_MODE_EVENTS) { 360 struct efx_mcdi_async_param *async; 361 struct efx_nic *efx = mcdi->efx; 362 363 /* Process the asynchronous request queue */ 364 spin_lock_bh(&mcdi->async_lock); 365 async = list_first_entry_or_null( 366 &mcdi->async_list, struct efx_mcdi_async_param, list); 367 if (async) { 368 mcdi->state = MCDI_STATE_RUNNING_ASYNC; 369 efx_mcdi_send_request(efx, async->cmd, 370 (const efx_dword_t *)(async + 1), 371 async->inlen); 372 mod_timer(&mcdi->async_timer, 373 jiffies + MCDI_RPC_TIMEOUT); 374 } 375 spin_unlock_bh(&mcdi->async_lock); 376 377 if (async) 378 return; 379 } 380 381 mcdi->state = MCDI_STATE_QUIESCENT; 382 wake_up(&mcdi->wq); 383 } 384 385 /* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the 386 * asynchronous completion function, and release the interface. 387 * Return whether this was done. Must be called in bh-disabled 388 * context. Will take iface_lock and async_lock. 389 */ 390 static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) 391 { 392 struct efx_nic *efx = mcdi->efx; 393 struct efx_mcdi_async_param *async; 394 size_t hdr_len, data_len; 395 efx_dword_t *outbuf; 396 int rc; 397 398 if (cmpxchg(&mcdi->state, 399 MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) != 400 MCDI_STATE_RUNNING_ASYNC) 401 return false; 402 403 spin_lock(&mcdi->iface_lock); 404 if (timeout) { 405 /* Ensure that if the completion event arrives later, 406 * the seqno check in efx_mcdi_ev_cpl() will fail 407 */ 408 ++mcdi->seqno; 409 ++mcdi->credits; 410 rc = -ETIMEDOUT; 411 hdr_len = 0; 412 data_len = 0; 413 } else { 414 rc = mcdi->resprc; 415 hdr_len = mcdi->resp_hdr_len; 416 data_len = mcdi->resp_data_len; 417 } 418 spin_unlock(&mcdi->iface_lock); 419 420 /* Stop the timer. In case the timer function is running, we 421 * must wait for it to return so that there is no possibility 422 * of it aborting the next request. 423 */ 424 if (!timeout) 425 del_timer_sync(&mcdi->async_timer); 426 427 spin_lock(&mcdi->async_lock); 428 async = list_first_entry(&mcdi->async_list, 429 struct efx_mcdi_async_param, list); 430 list_del(&async->list); 431 spin_unlock(&mcdi->async_lock); 432 433 outbuf = (efx_dword_t *)(async + 1); 434 efx->type->mcdi_read_response(efx, outbuf, hdr_len, 435 min(async->outlen, data_len)); 436 async->complete(efx, async->cookie, rc, outbuf, data_len); 437 kfree(async); 438 439 efx_mcdi_release(mcdi); 440 441 return true; 442 } 443 444 static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, 445 unsigned int datalen, unsigned int mcdi_err) 446 { 447 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 448 bool wake = false; 449 450 spin_lock(&mcdi->iface_lock); 451 452 if ((seqno ^ mcdi->seqno) & SEQ_MASK) { 453 if (mcdi->credits) 454 /* The request has been cancelled */ 455 --mcdi->credits; 456 else 457 netif_err(efx, hw, efx->net_dev, 458 "MC response mismatch tx seq 0x%x rx " 459 "seq 0x%x\n", seqno, mcdi->seqno); 460 } else { 461 if (efx->type->mcdi_max_ver >= 2) { 462 /* MCDI v2 responses don't fit in an event */ 463 efx_mcdi_read_response_header(efx); 464 } else { 465 mcdi->resprc = efx_mcdi_errno(mcdi_err); 466 mcdi->resp_hdr_len = 4; 467 mcdi->resp_data_len = datalen; 468 } 469 470 wake = true; 471 } 472 473 spin_unlock(&mcdi->iface_lock); 474 475 if (wake) { 476 if (!efx_mcdi_complete_async(mcdi, false)) 477 (void) efx_mcdi_complete_sync(mcdi); 478 479 /* If the interface isn't RUNNING_ASYNC or 480 * RUNNING_SYNC then we've received a duplicate 481 * completion after we've already transitioned back to 482 * QUIESCENT. [A subsequent invocation would increment 483 * seqno, so would have failed the seqno check]. 484 */ 485 } 486 } 487 488 static void efx_mcdi_timeout_async(unsigned long context) 489 { 490 struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context; 491 492 efx_mcdi_complete_async(mcdi, true); 493 } 494 495 static int 496 efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen) 497 { 498 if (efx->type->mcdi_max_ver < 0 || 499 (efx->type->mcdi_max_ver < 2 && 500 cmd > MC_CMD_CMD_SPACE_ESCAPE_7)) 501 return -EINVAL; 502 503 if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 || 504 (efx->type->mcdi_max_ver < 2 && 505 inlen > MCDI_CTL_SDU_LEN_MAX_V1)) 506 return -EMSGSIZE; 507 508 return 0; 509 } 510 511 int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, 512 const efx_dword_t *inbuf, size_t inlen, 513 efx_dword_t *outbuf, size_t outlen, 514 size_t *outlen_actual) 515 { 516 int rc; 517 518 rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); 519 if (rc) 520 return rc; 521 return efx_mcdi_rpc_finish(efx, cmd, inlen, 522 outbuf, outlen, outlen_actual); 523 } 524 525 int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, 526 const efx_dword_t *inbuf, size_t inlen) 527 { 528 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 529 int rc; 530 531 rc = efx_mcdi_check_supported(efx, cmd, inlen); 532 if (rc) 533 return rc; 534 535 efx_mcdi_acquire_sync(mcdi); 536 efx_mcdi_send_request(efx, cmd, inbuf, inlen); 537 return 0; 538 } 539 540 /** 541 * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously 542 * @efx: NIC through which to issue the command 543 * @cmd: Command type number 544 * @inbuf: Command parameters 545 * @inlen: Length of command parameters, in bytes 546 * @outlen: Length to allocate for response buffer, in bytes 547 * @complete: Function to be called on completion or cancellation. 548 * @cookie: Arbitrary value to be passed to @complete. 549 * 550 * This function does not sleep and therefore may be called in atomic 551 * context. It will fail if event queues are disabled or if MCDI 552 * event completions have been disabled due to an error. 553 * 554 * If it succeeds, the @complete function will be called exactly once 555 * in atomic context, when one of the following occurs: 556 * (a) the completion event is received (in NAPI context) 557 * (b) event queues are disabled (in the process that disables them) 558 * (c) the request times-out (in timer context) 559 */ 560 int 561 efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, 562 const efx_dword_t *inbuf, size_t inlen, size_t outlen, 563 efx_mcdi_async_completer *complete, unsigned long cookie) 564 { 565 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 566 struct efx_mcdi_async_param *async; 567 int rc; 568 569 rc = efx_mcdi_check_supported(efx, cmd, inlen); 570 if (rc) 571 return rc; 572 573 async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4), 574 GFP_ATOMIC); 575 if (!async) 576 return -ENOMEM; 577 578 async->cmd = cmd; 579 async->inlen = inlen; 580 async->outlen = outlen; 581 async->complete = complete; 582 async->cookie = cookie; 583 memcpy(async + 1, inbuf, inlen); 584 585 spin_lock_bh(&mcdi->async_lock); 586 587 if (mcdi->mode == MCDI_MODE_EVENTS) { 588 list_add_tail(&async->list, &mcdi->async_list); 589 590 /* If this is at the front of the queue, try to start it 591 * immediately 592 */ 593 if (mcdi->async_list.next == &async->list && 594 efx_mcdi_acquire_async(mcdi)) { 595 efx_mcdi_send_request(efx, cmd, inbuf, inlen); 596 mod_timer(&mcdi->async_timer, 597 jiffies + MCDI_RPC_TIMEOUT); 598 } 599 } else { 600 kfree(async); 601 rc = -ENETDOWN; 602 } 603 604 spin_unlock_bh(&mcdi->async_lock); 605 606 return rc; 607 } 608 609 int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, 610 efx_dword_t *outbuf, size_t outlen, 611 size_t *outlen_actual) 612 { 613 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 614 int rc; 615 616 if (mcdi->mode == MCDI_MODE_POLL) 617 rc = efx_mcdi_poll(efx); 618 else 619 rc = efx_mcdi_await_completion(efx); 620 621 if (rc != 0) { 622 /* Close the race with efx_mcdi_ev_cpl() executing just too late 623 * and completing a request we've just cancelled, by ensuring 624 * that the seqno check therein fails. 625 */ 626 spin_lock_bh(&mcdi->iface_lock); 627 ++mcdi->seqno; 628 ++mcdi->credits; 629 spin_unlock_bh(&mcdi->iface_lock); 630 631 netif_err(efx, hw, efx->net_dev, 632 "MC command 0x%x inlen %d mode %d timed out\n", 633 cmd, (int)inlen, mcdi->mode); 634 } else { 635 size_t hdr_len, data_len; 636 637 /* At the very least we need a memory barrier here to ensure 638 * we pick up changes from efx_mcdi_ev_cpl(). Protect against 639 * a spurious efx_mcdi_ev_cpl() running concurrently by 640 * acquiring the iface_lock. */ 641 spin_lock_bh(&mcdi->iface_lock); 642 rc = mcdi->resprc; 643 hdr_len = mcdi->resp_hdr_len; 644 data_len = mcdi->resp_data_len; 645 spin_unlock_bh(&mcdi->iface_lock); 646 647 BUG_ON(rc > 0); 648 649 if (rc == 0) { 650 efx->type->mcdi_read_response(efx, outbuf, hdr_len, 651 min(outlen, data_len)); 652 if (outlen_actual != NULL) 653 *outlen_actual = data_len; 654 } else if (cmd == MC_CMD_REBOOT && rc == -EIO) 655 ; /* Don't reset if MC_CMD_REBOOT returns EIO */ 656 else if (rc == -EIO || rc == -EINTR) { 657 netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", 658 -rc); 659 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 660 } else 661 netif_dbg(efx, hw, efx->net_dev, 662 "MC command 0x%x inlen %d failed rc=%d\n", 663 cmd, (int)inlen, -rc); 664 665 if (rc == -EIO || rc == -EINTR) { 666 msleep(MCDI_STATUS_SLEEP_MS); 667 efx_mcdi_poll_reboot(efx); 668 mcdi->new_epoch = true; 669 } 670 } 671 672 efx_mcdi_release(mcdi); 673 return rc; 674 } 675 676 /* Switch to polled MCDI completions. This can be called in various 677 * error conditions with various locks held, so it must be lockless. 678 * Caller is responsible for flushing asynchronous requests later. 679 */ 680 void efx_mcdi_mode_poll(struct efx_nic *efx) 681 { 682 struct efx_mcdi_iface *mcdi; 683 684 if (!efx->mcdi) 685 return; 686 687 mcdi = efx_mcdi(efx); 688 if (mcdi->mode == MCDI_MODE_POLL) 689 return; 690 691 /* We can switch from event completion to polled completion, because 692 * mcdi requests are always completed in shared memory. We do this by 693 * switching the mode to POLL'd then completing the request. 694 * efx_mcdi_await_completion() will then call efx_mcdi_poll(). 695 * 696 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), 697 * which efx_mcdi_complete_sync() provides for us. 698 */ 699 mcdi->mode = MCDI_MODE_POLL; 700 701 efx_mcdi_complete_sync(mcdi); 702 } 703 704 /* Flush any running or queued asynchronous requests, after event processing 705 * is stopped 706 */ 707 void efx_mcdi_flush_async(struct efx_nic *efx) 708 { 709 struct efx_mcdi_async_param *async, *next; 710 struct efx_mcdi_iface *mcdi; 711 712 if (!efx->mcdi) 713 return; 714 715 mcdi = efx_mcdi(efx); 716 717 /* We must be in polling mode so no more requests can be queued */ 718 BUG_ON(mcdi->mode != MCDI_MODE_POLL); 719 720 del_timer_sync(&mcdi->async_timer); 721 722 /* If a request is still running, make sure we give the MC 723 * time to complete it so that the response won't overwrite our 724 * next request. 725 */ 726 if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) { 727 efx_mcdi_poll(efx); 728 mcdi->state = MCDI_STATE_QUIESCENT; 729 } 730 731 /* Nothing else will access the async list now, so it is safe 732 * to walk it without holding async_lock. If we hold it while 733 * calling a completer then lockdep may warn that we have 734 * acquired locks in the wrong order. 735 */ 736 list_for_each_entry_safe(async, next, &mcdi->async_list, list) { 737 async->complete(efx, async->cookie, -ENETDOWN, NULL, 0); 738 list_del(&async->list); 739 kfree(async); 740 } 741 } 742 743 void efx_mcdi_mode_event(struct efx_nic *efx) 744 { 745 struct efx_mcdi_iface *mcdi; 746 747 if (!efx->mcdi) 748 return; 749 750 mcdi = efx_mcdi(efx); 751 752 if (mcdi->mode == MCDI_MODE_EVENTS) 753 return; 754 755 /* We can't switch from polled to event completion in the middle of a 756 * request, because the completion method is specified in the request. 757 * So acquire the interface to serialise the requestors. We don't need 758 * to acquire the iface_lock to change the mode here, but we do need a 759 * write memory barrier ensure that efx_mcdi_rpc() sees it, which 760 * efx_mcdi_acquire() provides. 761 */ 762 efx_mcdi_acquire_sync(mcdi); 763 mcdi->mode = MCDI_MODE_EVENTS; 764 efx_mcdi_release(mcdi); 765 } 766 767 static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) 768 { 769 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 770 771 /* If there is an outstanding MCDI request, it has been terminated 772 * either by a BADASSERT or REBOOT event. If the mcdi interface is 773 * in polled mode, then do nothing because the MC reboot handler will 774 * set the header correctly. However, if the mcdi interface is waiting 775 * for a CMDDONE event it won't receive it [and since all MCDI events 776 * are sent to the same queue, we can't be racing with 777 * efx_mcdi_ev_cpl()] 778 * 779 * If there is an outstanding asynchronous request, we can't 780 * complete it now (efx_mcdi_complete() would deadlock). The 781 * reset process will take care of this. 782 * 783 * There's a race here with efx_mcdi_send_request(), because 784 * we might receive a REBOOT event *before* the request has 785 * been copied out. In polled mode (during startup) this is 786 * irrelevant, because efx_mcdi_complete_sync() is ignored. In 787 * event mode, this condition is just an edge-case of 788 * receiving a REBOOT event after posting the MCDI 789 * request. Did the mc reboot before or after the copyout? The 790 * best we can do always is just return failure. 791 */ 792 spin_lock(&mcdi->iface_lock); 793 if (efx_mcdi_complete_sync(mcdi)) { 794 if (mcdi->mode == MCDI_MODE_EVENTS) { 795 mcdi->resprc = rc; 796 mcdi->resp_hdr_len = 0; 797 mcdi->resp_data_len = 0; 798 ++mcdi->credits; 799 } 800 } else { 801 int count; 802 803 /* Consume the status word since efx_mcdi_rpc_finish() won't */ 804 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { 805 if (efx_mcdi_poll_reboot(efx)) 806 break; 807 udelay(MCDI_STATUS_DELAY_US); 808 } 809 mcdi->new_epoch = true; 810 811 /* Nobody was waiting for an MCDI request, so trigger a reset */ 812 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 813 } 814 815 spin_unlock(&mcdi->iface_lock); 816 } 817 818 /* Called from falcon_process_eventq for MCDI events */ 819 void efx_mcdi_process_event(struct efx_channel *channel, 820 efx_qword_t *event) 821 { 822 struct efx_nic *efx = channel->efx; 823 int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); 824 u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); 825 826 switch (code) { 827 case MCDI_EVENT_CODE_BADSSERT: 828 netif_err(efx, hw, efx->net_dev, 829 "MC watchdog or assertion failure at 0x%x\n", data); 830 efx_mcdi_ev_death(efx, -EINTR); 831 break; 832 833 case MCDI_EVENT_CODE_PMNOTICE: 834 netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); 835 break; 836 837 case MCDI_EVENT_CODE_CMDDONE: 838 efx_mcdi_ev_cpl(efx, 839 MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), 840 MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), 841 MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); 842 break; 843 844 case MCDI_EVENT_CODE_LINKCHANGE: 845 efx_mcdi_process_link_change(efx, event); 846 break; 847 case MCDI_EVENT_CODE_SENSOREVT: 848 efx_mcdi_sensor_event(efx, event); 849 break; 850 case MCDI_EVENT_CODE_SCHEDERR: 851 netif_info(efx, hw, efx->net_dev, 852 "MC Scheduler error address=0x%x\n", data); 853 break; 854 case MCDI_EVENT_CODE_REBOOT: 855 case MCDI_EVENT_CODE_MC_REBOOT: 856 netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); 857 efx_mcdi_ev_death(efx, -EIO); 858 break; 859 case MCDI_EVENT_CODE_MAC_STATS_DMA: 860 /* MAC stats are gather lazily. We can ignore this. */ 861 break; 862 case MCDI_EVENT_CODE_FLR: 863 efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); 864 break; 865 case MCDI_EVENT_CODE_PTP_RX: 866 case MCDI_EVENT_CODE_PTP_FAULT: 867 case MCDI_EVENT_CODE_PTP_PPS: 868 efx_ptp_event(efx, event); 869 break; 870 case MCDI_EVENT_CODE_TX_FLUSH: 871 case MCDI_EVENT_CODE_RX_FLUSH: 872 /* Two flush events will be sent: one to the same event 873 * queue as completions, and one to event queue 0. 874 * In the latter case the {RX,TX}_FLUSH_TO_DRIVER 875 * flag will be set, and we should ignore the event 876 * because we want to wait for all completions. 877 */ 878 BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN != 879 MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN); 880 if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER)) 881 efx_ef10_handle_drain_event(efx); 882 break; 883 case MCDI_EVENT_CODE_TX_ERR: 884 case MCDI_EVENT_CODE_RX_ERR: 885 netif_err(efx, hw, efx->net_dev, 886 "%s DMA error (event: "EFX_QWORD_FMT")\n", 887 code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX", 888 EFX_QWORD_VAL(*event)); 889 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 890 break; 891 default: 892 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", 893 code); 894 } 895 } 896 897 /************************************************************************** 898 * 899 * Specific request functions 900 * 901 ************************************************************************** 902 */ 903 904 void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) 905 { 906 MCDI_DECLARE_BUF(outbuf, 907 max(MC_CMD_GET_VERSION_OUT_LEN, 908 MC_CMD_GET_CAPABILITIES_OUT_LEN)); 909 size_t outlength; 910 const __le16 *ver_words; 911 size_t offset; 912 int rc; 913 914 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); 915 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, 916 outbuf, sizeof(outbuf), &outlength); 917 if (rc) 918 goto fail; 919 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { 920 rc = -EIO; 921 goto fail; 922 } 923 924 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); 925 offset = snprintf(buf, len, "%u.%u.%u.%u", 926 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), 927 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); 928 929 /* EF10 may have multiple datapath firmware variants within a 930 * single version. Report which variants are running. 931 */ 932 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { 933 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); 934 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, 935 outbuf, sizeof(outbuf), &outlength); 936 if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN) 937 offset += snprintf( 938 buf + offset, len - offset, " rx? tx?"); 939 else 940 offset += snprintf( 941 buf + offset, len - offset, " rx%x tx%x", 942 MCDI_WORD(outbuf, 943 GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID), 944 MCDI_WORD(outbuf, 945 GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID)); 946 947 /* It's theoretically possible for the string to exceed 31 948 * characters, though in practice the first three version 949 * components are short enough that this doesn't happen. 950 */ 951 if (WARN_ON(offset >= len)) 952 buf[0] = 0; 953 } 954 955 return; 956 957 fail: 958 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 959 buf[0] = 0; 960 } 961 962 static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 963 bool *was_attached) 964 { 965 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); 966 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN); 967 size_t outlen; 968 int rc; 969 970 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, 971 driver_operating ? 1 : 0); 972 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); 973 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY); 974 975 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), 976 outbuf, sizeof(outbuf), &outlen); 977 if (rc) 978 goto fail; 979 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { 980 rc = -EIO; 981 goto fail; 982 } 983 984 /* We currently assume we have control of the external link 985 * and are completely trusted by firmware. Abort probing 986 * if that's not true for this function. 987 */ 988 if (driver_operating && 989 outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN && 990 (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) & 991 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | 992 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) != 993 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | 994 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) { 995 netif_err(efx, probe, efx->net_dev, 996 "This driver version only supports one function per port\n"); 997 return -ENODEV; 998 } 999 1000 if (was_attached != NULL) 1001 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); 1002 return 0; 1003 1004 fail: 1005 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1006 return rc; 1007 } 1008 1009 int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 1010 u16 *fw_subtype_list, u32 *capabilities) 1011 { 1012 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX); 1013 size_t outlen, i; 1014 int port_num = efx_port_num(efx); 1015 int rc; 1016 1017 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); 1018 1019 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, 1020 outbuf, sizeof(outbuf), &outlen); 1021 if (rc) 1022 goto fail; 1023 1024 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { 1025 rc = -EIO; 1026 goto fail; 1027 } 1028 1029 if (mac_address) 1030 memcpy(mac_address, 1031 port_num ? 1032 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : 1033 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0), 1034 ETH_ALEN); 1035 if (fw_subtype_list) { 1036 for (i = 0; 1037 i < MCDI_VAR_ARRAY_LEN(outlen, 1038 GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST); 1039 i++) 1040 fw_subtype_list[i] = MCDI_ARRAY_WORD( 1041 outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i); 1042 for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++) 1043 fw_subtype_list[i] = 0; 1044 } 1045 if (capabilities) { 1046 if (port_num) 1047 *capabilities = MCDI_DWORD(outbuf, 1048 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); 1049 else 1050 *capabilities = MCDI_DWORD(outbuf, 1051 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); 1052 } 1053 1054 return 0; 1055 1056 fail: 1057 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", 1058 __func__, rc, (int)outlen); 1059 1060 return rc; 1061 } 1062 1063 int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) 1064 { 1065 MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN); 1066 u32 dest = 0; 1067 int rc; 1068 1069 if (uart) 1070 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; 1071 if (evq) 1072 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; 1073 1074 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); 1075 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); 1076 1077 BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); 1078 1079 rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), 1080 NULL, 0, NULL); 1081 if (rc) 1082 goto fail; 1083 1084 return 0; 1085 1086 fail: 1087 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1088 return rc; 1089 } 1090 1091 int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) 1092 { 1093 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN); 1094 size_t outlen; 1095 int rc; 1096 1097 BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); 1098 1099 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, 1100 outbuf, sizeof(outbuf), &outlen); 1101 if (rc) 1102 goto fail; 1103 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { 1104 rc = -EIO; 1105 goto fail; 1106 } 1107 1108 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); 1109 return 0; 1110 1111 fail: 1112 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1113 __func__, rc); 1114 return rc; 1115 } 1116 1117 int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, 1118 size_t *size_out, size_t *erase_size_out, 1119 bool *protected_out) 1120 { 1121 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN); 1122 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN); 1123 size_t outlen; 1124 int rc; 1125 1126 MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); 1127 1128 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), 1129 outbuf, sizeof(outbuf), &outlen); 1130 if (rc) 1131 goto fail; 1132 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { 1133 rc = -EIO; 1134 goto fail; 1135 } 1136 1137 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); 1138 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); 1139 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & 1140 (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN)); 1141 return 0; 1142 1143 fail: 1144 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1145 return rc; 1146 } 1147 1148 static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) 1149 { 1150 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN); 1151 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN); 1152 int rc; 1153 1154 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); 1155 1156 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), 1157 outbuf, sizeof(outbuf), NULL); 1158 if (rc) 1159 return rc; 1160 1161 switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { 1162 case MC_CMD_NVRAM_TEST_PASS: 1163 case MC_CMD_NVRAM_TEST_NOTSUPP: 1164 return 0; 1165 default: 1166 return -EIO; 1167 } 1168 } 1169 1170 int efx_mcdi_nvram_test_all(struct efx_nic *efx) 1171 { 1172 u32 nvram_types; 1173 unsigned int type; 1174 int rc; 1175 1176 rc = efx_mcdi_nvram_types(efx, &nvram_types); 1177 if (rc) 1178 goto fail1; 1179 1180 type = 0; 1181 while (nvram_types != 0) { 1182 if (nvram_types & 1) { 1183 rc = efx_mcdi_nvram_test(efx, type); 1184 if (rc) 1185 goto fail2; 1186 } 1187 type++; 1188 nvram_types >>= 1; 1189 } 1190 1191 return 0; 1192 1193 fail2: 1194 netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", 1195 __func__, type); 1196 fail1: 1197 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1198 return rc; 1199 } 1200 1201 static int efx_mcdi_read_assertion(struct efx_nic *efx) 1202 { 1203 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN); 1204 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); 1205 unsigned int flags, index; 1206 const char *reason; 1207 size_t outlen; 1208 int retry; 1209 int rc; 1210 1211 /* Attempt to read any stored assertion state before we reboot 1212 * the mcfw out of the assertion handler. Retry twice, once 1213 * because a boot-time assertion might cause this command to fail 1214 * with EINTR. And once again because GET_ASSERTS can race with 1215 * MC_CMD_REBOOT running on the other port. */ 1216 retry = 2; 1217 do { 1218 MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); 1219 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, 1220 inbuf, MC_CMD_GET_ASSERTS_IN_LEN, 1221 outbuf, sizeof(outbuf), &outlen); 1222 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); 1223 1224 if (rc) 1225 return rc; 1226 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) 1227 return -EIO; 1228 1229 /* Print out any recorded assertion state */ 1230 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); 1231 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) 1232 return 0; 1233 1234 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) 1235 ? "system-level assertion" 1236 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) 1237 ? "thread-level assertion" 1238 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) 1239 ? "watchdog reset" 1240 : "unknown assertion"; 1241 netif_err(efx, hw, efx->net_dev, 1242 "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, 1243 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), 1244 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); 1245 1246 /* Print out the registers */ 1247 for (index = 0; 1248 index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; 1249 index++) 1250 netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", 1251 1 + index, 1252 MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS, 1253 index)); 1254 1255 return 0; 1256 } 1257 1258 static void efx_mcdi_exit_assertion(struct efx_nic *efx) 1259 { 1260 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); 1261 1262 /* If the MC is running debug firmware, it might now be 1263 * waiting for a debugger to attach, but we just want it to 1264 * reboot. We set a flag that makes the command a no-op if it 1265 * has already done so. We don't know what return code to 1266 * expect (0 or -EIO), so ignore it. 1267 */ 1268 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1269 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 1270 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); 1271 (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, 1272 NULL, 0, NULL); 1273 } 1274 1275 int efx_mcdi_handle_assertion(struct efx_nic *efx) 1276 { 1277 int rc; 1278 1279 rc = efx_mcdi_read_assertion(efx); 1280 if (rc) 1281 return rc; 1282 1283 efx_mcdi_exit_assertion(efx); 1284 1285 return 0; 1286 } 1287 1288 void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) 1289 { 1290 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN); 1291 int rc; 1292 1293 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); 1294 BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); 1295 BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); 1296 1297 BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); 1298 1299 MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); 1300 1301 rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), 1302 NULL, 0, NULL); 1303 if (rc) 1304 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1305 __func__, rc); 1306 } 1307 1308 static int efx_mcdi_reset_port(struct efx_nic *efx) 1309 { 1310 int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL); 1311 if (rc) 1312 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1313 __func__, rc); 1314 return rc; 1315 } 1316 1317 static int efx_mcdi_reset_mc(struct efx_nic *efx) 1318 { 1319 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); 1320 int rc; 1321 1322 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1323 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); 1324 rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), 1325 NULL, 0, NULL); 1326 /* White is black, and up is down */ 1327 if (rc == -EIO) 1328 return 0; 1329 if (rc == 0) 1330 rc = -EIO; 1331 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1332 return rc; 1333 } 1334 1335 enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason) 1336 { 1337 return RESET_TYPE_RECOVER_OR_ALL; 1338 } 1339 1340 int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) 1341 { 1342 int rc; 1343 1344 /* Recover from a failed assertion pre-reset */ 1345 rc = efx_mcdi_handle_assertion(efx); 1346 if (rc) 1347 return rc; 1348 1349 if (method == RESET_TYPE_WORLD) 1350 return efx_mcdi_reset_mc(efx); 1351 else 1352 return efx_mcdi_reset_port(efx); 1353 } 1354 1355 static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, 1356 const u8 *mac, int *id_out) 1357 { 1358 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN); 1359 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN); 1360 size_t outlen; 1361 int rc; 1362 1363 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); 1364 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, 1365 MC_CMD_FILTER_MODE_SIMPLE); 1366 memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); 1367 1368 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), 1369 outbuf, sizeof(outbuf), &outlen); 1370 if (rc) 1371 goto fail; 1372 1373 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { 1374 rc = -EIO; 1375 goto fail; 1376 } 1377 1378 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); 1379 1380 return 0; 1381 1382 fail: 1383 *id_out = -1; 1384 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1385 return rc; 1386 1387 } 1388 1389 1390 int 1391 efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) 1392 { 1393 return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); 1394 } 1395 1396 1397 int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) 1398 { 1399 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN); 1400 size_t outlen; 1401 int rc; 1402 1403 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, 1404 outbuf, sizeof(outbuf), &outlen); 1405 if (rc) 1406 goto fail; 1407 1408 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { 1409 rc = -EIO; 1410 goto fail; 1411 } 1412 1413 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); 1414 1415 return 0; 1416 1417 fail: 1418 *id_out = -1; 1419 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1420 return rc; 1421 } 1422 1423 1424 int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) 1425 { 1426 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN); 1427 int rc; 1428 1429 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); 1430 1431 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), 1432 NULL, 0, NULL); 1433 if (rc) 1434 goto fail; 1435 1436 return 0; 1437 1438 fail: 1439 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1440 return rc; 1441 } 1442 1443 int efx_mcdi_flush_rxqs(struct efx_nic *efx) 1444 { 1445 struct efx_channel *channel; 1446 struct efx_rx_queue *rx_queue; 1447 MCDI_DECLARE_BUF(inbuf, 1448 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS)); 1449 int rc, count; 1450 1451 BUILD_BUG_ON(EFX_MAX_CHANNELS > 1452 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); 1453 1454 count = 0; 1455 efx_for_each_channel(channel, efx) { 1456 efx_for_each_channel_rx_queue(rx_queue, channel) { 1457 if (rx_queue->flush_pending) { 1458 rx_queue->flush_pending = false; 1459 atomic_dec(&efx->rxq_flush_pending); 1460 MCDI_SET_ARRAY_DWORD( 1461 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, 1462 count, efx_rx_queue_index(rx_queue)); 1463 count++; 1464 } 1465 } 1466 } 1467 1468 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, 1469 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL); 1470 WARN_ON(rc < 0); 1471 1472 return rc; 1473 } 1474 1475 int efx_mcdi_wol_filter_reset(struct efx_nic *efx) 1476 { 1477 int rc; 1478 1479 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); 1480 if (rc) 1481 goto fail; 1482 1483 return 0; 1484 1485 fail: 1486 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1487 return rc; 1488 } 1489 1490 int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled) 1491 { 1492 MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN); 1493 1494 BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0); 1495 MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type); 1496 MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled); 1497 return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), 1498 NULL, 0, NULL); 1499 } 1500 1501 #ifdef CONFIG_SFC_MTD 1502 1503 #define EFX_MCDI_NVRAM_LEN_MAX 128 1504 1505 static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) 1506 { 1507 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN); 1508 int rc; 1509 1510 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); 1511 1512 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); 1513 1514 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), 1515 NULL, 0, NULL); 1516 if (rc) 1517 goto fail; 1518 1519 return 0; 1520 1521 fail: 1522 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1523 return rc; 1524 } 1525 1526 static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, 1527 loff_t offset, u8 *buffer, size_t length) 1528 { 1529 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN); 1530 MCDI_DECLARE_BUF(outbuf, 1531 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)); 1532 size_t outlen; 1533 int rc; 1534 1535 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); 1536 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); 1537 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); 1538 1539 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), 1540 outbuf, sizeof(outbuf), &outlen); 1541 if (rc) 1542 goto fail; 1543 1544 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); 1545 return 0; 1546 1547 fail: 1548 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1549 return rc; 1550 } 1551 1552 static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 1553 loff_t offset, const u8 *buffer, size_t length) 1554 { 1555 MCDI_DECLARE_BUF(inbuf, 1556 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)); 1557 int rc; 1558 1559 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); 1560 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); 1561 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); 1562 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); 1563 1564 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); 1565 1566 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, 1567 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), 1568 NULL, 0, NULL); 1569 if (rc) 1570 goto fail; 1571 1572 return 0; 1573 1574 fail: 1575 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1576 return rc; 1577 } 1578 1579 static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, 1580 loff_t offset, size_t length) 1581 { 1582 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN); 1583 int rc; 1584 1585 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); 1586 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); 1587 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); 1588 1589 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); 1590 1591 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), 1592 NULL, 0, NULL); 1593 if (rc) 1594 goto fail; 1595 1596 return 0; 1597 1598 fail: 1599 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1600 return rc; 1601 } 1602 1603 static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) 1604 { 1605 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN); 1606 int rc; 1607 1608 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); 1609 1610 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); 1611 1612 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), 1613 NULL, 0, NULL); 1614 if (rc) 1615 goto fail; 1616 1617 return 0; 1618 1619 fail: 1620 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1621 return rc; 1622 } 1623 1624 int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, 1625 size_t len, size_t *retlen, u8 *buffer) 1626 { 1627 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); 1628 struct efx_nic *efx = mtd->priv; 1629 loff_t offset = start; 1630 loff_t end = min_t(loff_t, start + len, mtd->size); 1631 size_t chunk; 1632 int rc = 0; 1633 1634 while (offset < end) { 1635 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); 1636 rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset, 1637 buffer, chunk); 1638 if (rc) 1639 goto out; 1640 offset += chunk; 1641 buffer += chunk; 1642 } 1643 out: 1644 *retlen = offset - start; 1645 return rc; 1646 } 1647 1648 int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) 1649 { 1650 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); 1651 struct efx_nic *efx = mtd->priv; 1652 loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); 1653 loff_t end = min_t(loff_t, start + len, mtd->size); 1654 size_t chunk = part->common.mtd.erasesize; 1655 int rc = 0; 1656 1657 if (!part->updating) { 1658 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); 1659 if (rc) 1660 goto out; 1661 part->updating = true; 1662 } 1663 1664 /* The MCDI interface can in fact do multiple erase blocks at once; 1665 * but erasing may be slow, so we make multiple calls here to avoid 1666 * tripping the MCDI RPC timeout. */ 1667 while (offset < end) { 1668 rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset, 1669 chunk); 1670 if (rc) 1671 goto out; 1672 offset += chunk; 1673 } 1674 out: 1675 return rc; 1676 } 1677 1678 int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, 1679 size_t len, size_t *retlen, const u8 *buffer) 1680 { 1681 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); 1682 struct efx_nic *efx = mtd->priv; 1683 loff_t offset = start; 1684 loff_t end = min_t(loff_t, start + len, mtd->size); 1685 size_t chunk; 1686 int rc = 0; 1687 1688 if (!part->updating) { 1689 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); 1690 if (rc) 1691 goto out; 1692 part->updating = true; 1693 } 1694 1695 while (offset < end) { 1696 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); 1697 rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset, 1698 buffer, chunk); 1699 if (rc) 1700 goto out; 1701 offset += chunk; 1702 buffer += chunk; 1703 } 1704 out: 1705 *retlen = offset - start; 1706 return rc; 1707 } 1708 1709 int efx_mcdi_mtd_sync(struct mtd_info *mtd) 1710 { 1711 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); 1712 struct efx_nic *efx = mtd->priv; 1713 int rc = 0; 1714 1715 if (part->updating) { 1716 part->updating = false; 1717 rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type); 1718 } 1719 1720 return rc; 1721 } 1722 1723 void efx_mcdi_mtd_rename(struct efx_mtd_partition *part) 1724 { 1725 struct efx_mcdi_mtd_partition *mcdi_part = 1726 container_of(part, struct efx_mcdi_mtd_partition, common); 1727 struct efx_nic *efx = part->mtd.priv; 1728 1729 snprintf(part->name, sizeof(part->name), "%s %s:%02x", 1730 efx->name, part->type_name, mcdi_part->fw_subtype); 1731 } 1732 1733 #endif /* CONFIG_SFC_MTD */ 1734