1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2008-2013 Solarflare Communications Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published 7 * by the Free Software Foundation, incorporated herein by reference. 8 */ 9 10 #include <linux/delay.h> 11 #include <asm/cmpxchg.h> 12 #include "net_driver.h" 13 #include "nic.h" 14 #include "io.h" 15 #include "farch_regs.h" 16 #include "mcdi_pcol.h" 17 #include "phy.h" 18 19 /************************************************************************** 20 * 21 * Management-Controller-to-Driver Interface 22 * 23 ************************************************************************** 24 */ 25 26 #define MCDI_RPC_TIMEOUT (10 * HZ) 27 28 /* A reboot/assertion causes the MCDI status word to be set after the 29 * command word is set or a REBOOT event is sent. If we notice a reboot 30 * via these mechanisms then wait 20ms for the status word to be set. 31 */ 32 #define MCDI_STATUS_DELAY_US 100 33 #define MCDI_STATUS_DELAY_COUNT 200 34 #define MCDI_STATUS_SLEEP_MS \ 35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) 36 37 #define SEQ_MASK \ 38 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) 39 40 struct efx_mcdi_async_param { 41 struct list_head list; 42 unsigned int cmd; 43 size_t inlen; 44 size_t outlen; 45 efx_mcdi_async_completer *complete; 46 unsigned long cookie; 47 /* followed by request/response buffer */ 48 }; 49 50 static void efx_mcdi_timeout_async(unsigned long context); 51 static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 52 bool *was_attached_out); 53 54 static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) 55 { 56 EFX_BUG_ON_PARANOID(!efx->mcdi); 57 return &efx->mcdi->iface; 58 } 59 60 int efx_mcdi_init(struct efx_nic *efx) 61 { 62 struct efx_mcdi_iface *mcdi; 63 bool already_attached; 64 int rc; 65 66 efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); 67 if (!efx->mcdi) 68 return -ENOMEM; 69 70 mcdi = efx_mcdi(efx); 71 mcdi->efx = efx; 72 init_waitqueue_head(&mcdi->wq); 73 spin_lock_init(&mcdi->iface_lock); 74 mcdi->state = MCDI_STATE_QUIESCENT; 75 mcdi->mode = MCDI_MODE_POLL; 76 spin_lock_init(&mcdi->async_lock); 77 INIT_LIST_HEAD(&mcdi->async_list); 78 setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async, 79 (unsigned long)mcdi); 80 81 (void) efx_mcdi_poll_reboot(efx); 82 mcdi->new_epoch = true; 83 84 /* Recover from a failed assertion before probing */ 85 rc = efx_mcdi_handle_assertion(efx); 86 if (rc) 87 return rc; 88 89 /* Let the MC (and BMC, if this is a LOM) know that the driver 90 * is loaded. We should do this before we reset the NIC. 91 */ 92 rc = efx_mcdi_drv_attach(efx, true, &already_attached); 93 if (rc) { 94 netif_err(efx, probe, efx->net_dev, 95 "Unable to register driver with MCPU\n"); 96 return rc; 97 } 98 if (already_attached) 99 /* Not a fatal error */ 100 netif_err(efx, probe, efx->net_dev, 101 "Host already registered with MCPU\n"); 102 103 return 0; 104 } 105 106 void efx_mcdi_fini(struct efx_nic *efx) 107 { 108 if (!efx->mcdi) 109 return; 110 111 BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT); 112 113 /* Relinquish the device (back to the BMC, if this is a LOM) */ 114 efx_mcdi_drv_attach(efx, false, NULL); 115 116 kfree(efx->mcdi); 117 } 118 119 static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, 120 const efx_dword_t *inbuf, size_t inlen) 121 { 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 123 efx_dword_t hdr[2]; 124 size_t hdr_len; 125 u32 xflags, seqno; 126 127 BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT); 128 129 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ 130 spin_lock_bh(&mcdi->iface_lock); 131 ++mcdi->seqno; 132 spin_unlock_bh(&mcdi->iface_lock); 133 134 seqno = mcdi->seqno & SEQ_MASK; 135 xflags = 0; 136 if (mcdi->mode == MCDI_MODE_EVENTS) 137 xflags |= MCDI_HEADER_XFLAGS_EVREQ; 138 139 if (efx->type->mcdi_max_ver == 1) { 140 /* MCDI v1 */ 141 EFX_POPULATE_DWORD_7(hdr[0], 142 MCDI_HEADER_RESPONSE, 0, 143 MCDI_HEADER_RESYNC, 1, 144 MCDI_HEADER_CODE, cmd, 145 MCDI_HEADER_DATALEN, inlen, 146 MCDI_HEADER_SEQ, seqno, 147 MCDI_HEADER_XFLAGS, xflags, 148 MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); 149 hdr_len = 4; 150 } else { 151 /* MCDI v2 */ 152 BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2); 153 EFX_POPULATE_DWORD_7(hdr[0], 154 MCDI_HEADER_RESPONSE, 0, 155 MCDI_HEADER_RESYNC, 1, 156 MCDI_HEADER_CODE, MC_CMD_V2_EXTN, 157 MCDI_HEADER_DATALEN, 0, 158 MCDI_HEADER_SEQ, seqno, 159 MCDI_HEADER_XFLAGS, xflags, 160 MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); 161 EFX_POPULATE_DWORD_2(hdr[1], 162 MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd, 163 MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen); 164 hdr_len = 8; 165 } 166 167 efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); 168 169 mcdi->new_epoch = false; 170 } 171 172 static int efx_mcdi_errno(unsigned int mcdi_err) 173 { 174 switch (mcdi_err) { 175 case 0: 176 return 0; 177 #define TRANSLATE_ERROR(name) \ 178 case MC_CMD_ERR_ ## name: \ 179 return -name; 180 TRANSLATE_ERROR(EPERM); 181 TRANSLATE_ERROR(ENOENT); 182 TRANSLATE_ERROR(EINTR); 183 TRANSLATE_ERROR(EAGAIN); 184 TRANSLATE_ERROR(EACCES); 185 TRANSLATE_ERROR(EBUSY); 186 TRANSLATE_ERROR(EINVAL); 187 TRANSLATE_ERROR(EDEADLK); 188 TRANSLATE_ERROR(ENOSYS); 189 TRANSLATE_ERROR(ETIME); 190 TRANSLATE_ERROR(EALREADY); 191 TRANSLATE_ERROR(ENOSPC); 192 #undef TRANSLATE_ERROR 193 case MC_CMD_ERR_ALLOC_FAIL: 194 return -ENOBUFS; 195 case MC_CMD_ERR_MAC_EXIST: 196 return -EADDRINUSE; 197 default: 198 return -EPROTO; 199 } 200 } 201 202 static void efx_mcdi_read_response_header(struct efx_nic *efx) 203 { 204 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 205 unsigned int respseq, respcmd, error; 206 efx_dword_t hdr; 207 208 efx->type->mcdi_read_response(efx, &hdr, 0, 4); 209 respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ); 210 respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE); 211 error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR); 212 213 if (respcmd != MC_CMD_V2_EXTN) { 214 mcdi->resp_hdr_len = 4; 215 mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN); 216 } else { 217 efx->type->mcdi_read_response(efx, &hdr, 4, 4); 218 mcdi->resp_hdr_len = 8; 219 mcdi->resp_data_len = 220 EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); 221 } 222 223 if (error && mcdi->resp_data_len == 0) { 224 netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); 225 mcdi->resprc = -EIO; 226 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { 227 netif_err(efx, hw, efx->net_dev, 228 "MC response mismatch tx seq 0x%x rx seq 0x%x\n", 229 respseq, mcdi->seqno); 230 mcdi->resprc = -EIO; 231 } else if (error) { 232 efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4); 233 mcdi->resprc = 234 efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0)); 235 } else { 236 mcdi->resprc = 0; 237 } 238 } 239 240 static int efx_mcdi_poll(struct efx_nic *efx) 241 { 242 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 243 unsigned long time, finish; 244 unsigned int spins; 245 int rc; 246 247 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ 248 rc = efx_mcdi_poll_reboot(efx); 249 if (rc) { 250 spin_lock_bh(&mcdi->iface_lock); 251 mcdi->resprc = rc; 252 mcdi->resp_hdr_len = 0; 253 mcdi->resp_data_len = 0; 254 spin_unlock_bh(&mcdi->iface_lock); 255 return 0; 256 } 257 258 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, 259 * because generally mcdi responses are fast. After that, back off 260 * and poll once a jiffy (approximately) 261 */ 262 spins = TICK_USEC; 263 finish = jiffies + MCDI_RPC_TIMEOUT; 264 265 while (1) { 266 if (spins != 0) { 267 --spins; 268 udelay(1); 269 } else { 270 schedule_timeout_uninterruptible(1); 271 } 272 273 time = jiffies; 274 275 rmb(); 276 if (efx->type->mcdi_poll_response(efx)) 277 break; 278 279 if (time_after(time, finish)) 280 return -ETIMEDOUT; 281 } 282 283 spin_lock_bh(&mcdi->iface_lock); 284 efx_mcdi_read_response_header(efx); 285 spin_unlock_bh(&mcdi->iface_lock); 286 287 /* Return rc=0 like wait_event_timeout() */ 288 return 0; 289 } 290 291 /* Test and clear MC-rebooted flag for this port/function; reset 292 * software state as necessary. 293 */ 294 int efx_mcdi_poll_reboot(struct efx_nic *efx) 295 { 296 if (!efx->mcdi) 297 return 0; 298 299 return efx->type->mcdi_poll_reboot(efx); 300 } 301 302 static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi) 303 { 304 return cmpxchg(&mcdi->state, 305 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) == 306 MCDI_STATE_QUIESCENT; 307 } 308 309 static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi) 310 { 311 /* Wait until the interface becomes QUIESCENT and we win the race 312 * to mark it RUNNING_SYNC. 313 */ 314 wait_event(mcdi->wq, 315 cmpxchg(&mcdi->state, 316 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) == 317 MCDI_STATE_QUIESCENT); 318 } 319 320 static int efx_mcdi_await_completion(struct efx_nic *efx) 321 { 322 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 323 324 if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED, 325 MCDI_RPC_TIMEOUT) == 0) 326 return -ETIMEDOUT; 327 328 /* Check if efx_mcdi_set_mode() switched us back to polled completions. 329 * In which case, poll for completions directly. If efx_mcdi_ev_cpl() 330 * completed the request first, then we'll just end up completing the 331 * request again, which is safe. 332 * 333 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which 334 * wait_event_timeout() implicitly provides. 335 */ 336 if (mcdi->mode == MCDI_MODE_POLL) 337 return efx_mcdi_poll(efx); 338 339 return 0; 340 } 341 342 /* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the 343 * requester. Return whether this was done. Does not take any locks. 344 */ 345 static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi) 346 { 347 if (cmpxchg(&mcdi->state, 348 MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) == 349 MCDI_STATE_RUNNING_SYNC) { 350 wake_up(&mcdi->wq); 351 return true; 352 } 353 354 return false; 355 } 356 357 static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) 358 { 359 if (mcdi->mode == MCDI_MODE_EVENTS) { 360 struct efx_mcdi_async_param *async; 361 struct efx_nic *efx = mcdi->efx; 362 363 /* Process the asynchronous request queue */ 364 spin_lock_bh(&mcdi->async_lock); 365 async = list_first_entry_or_null( 366 &mcdi->async_list, struct efx_mcdi_async_param, list); 367 if (async) { 368 mcdi->state = MCDI_STATE_RUNNING_ASYNC; 369 efx_mcdi_send_request(efx, async->cmd, 370 (const efx_dword_t *)(async + 1), 371 async->inlen); 372 mod_timer(&mcdi->async_timer, 373 jiffies + MCDI_RPC_TIMEOUT); 374 } 375 spin_unlock_bh(&mcdi->async_lock); 376 377 if (async) 378 return; 379 } 380 381 mcdi->state = MCDI_STATE_QUIESCENT; 382 wake_up(&mcdi->wq); 383 } 384 385 /* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the 386 * asynchronous completion function, and release the interface. 387 * Return whether this was done. Must be called in bh-disabled 388 * context. Will take iface_lock and async_lock. 389 */ 390 static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) 391 { 392 struct efx_nic *efx = mcdi->efx; 393 struct efx_mcdi_async_param *async; 394 size_t hdr_len, data_len; 395 efx_dword_t *outbuf; 396 int rc; 397 398 if (cmpxchg(&mcdi->state, 399 MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) != 400 MCDI_STATE_RUNNING_ASYNC) 401 return false; 402 403 spin_lock(&mcdi->iface_lock); 404 if (timeout) { 405 /* Ensure that if the completion event arrives later, 406 * the seqno check in efx_mcdi_ev_cpl() will fail 407 */ 408 ++mcdi->seqno; 409 ++mcdi->credits; 410 rc = -ETIMEDOUT; 411 hdr_len = 0; 412 data_len = 0; 413 } else { 414 rc = mcdi->resprc; 415 hdr_len = mcdi->resp_hdr_len; 416 data_len = mcdi->resp_data_len; 417 } 418 spin_unlock(&mcdi->iface_lock); 419 420 /* Stop the timer. In case the timer function is running, we 421 * must wait for it to return so that there is no possibility 422 * of it aborting the next request. 423 */ 424 if (!timeout) 425 del_timer_sync(&mcdi->async_timer); 426 427 spin_lock(&mcdi->async_lock); 428 async = list_first_entry(&mcdi->async_list, 429 struct efx_mcdi_async_param, list); 430 list_del(&async->list); 431 spin_unlock(&mcdi->async_lock); 432 433 outbuf = (efx_dword_t *)(async + 1); 434 efx->type->mcdi_read_response(efx, outbuf, hdr_len, 435 min(async->outlen, data_len)); 436 async->complete(efx, async->cookie, rc, outbuf, data_len); 437 kfree(async); 438 439 efx_mcdi_release(mcdi); 440 441 return true; 442 } 443 444 static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, 445 unsigned int datalen, unsigned int mcdi_err) 446 { 447 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 448 bool wake = false; 449 450 spin_lock(&mcdi->iface_lock); 451 452 if ((seqno ^ mcdi->seqno) & SEQ_MASK) { 453 if (mcdi->credits) 454 /* The request has been cancelled */ 455 --mcdi->credits; 456 else 457 netif_err(efx, hw, efx->net_dev, 458 "MC response mismatch tx seq 0x%x rx " 459 "seq 0x%x\n", seqno, mcdi->seqno); 460 } else { 461 if (efx->type->mcdi_max_ver >= 2) { 462 /* MCDI v2 responses don't fit in an event */ 463 efx_mcdi_read_response_header(efx); 464 } else { 465 mcdi->resprc = efx_mcdi_errno(mcdi_err); 466 mcdi->resp_hdr_len = 4; 467 mcdi->resp_data_len = datalen; 468 } 469 470 wake = true; 471 } 472 473 spin_unlock(&mcdi->iface_lock); 474 475 if (wake) { 476 if (!efx_mcdi_complete_async(mcdi, false)) 477 (void) efx_mcdi_complete_sync(mcdi); 478 479 /* If the interface isn't RUNNING_ASYNC or 480 * RUNNING_SYNC then we've received a duplicate 481 * completion after we've already transitioned back to 482 * QUIESCENT. [A subsequent invocation would increment 483 * seqno, so would have failed the seqno check]. 484 */ 485 } 486 } 487 488 static void efx_mcdi_timeout_async(unsigned long context) 489 { 490 struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context; 491 492 efx_mcdi_complete_async(mcdi, true); 493 } 494 495 static int 496 efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen) 497 { 498 if (efx->type->mcdi_max_ver < 0 || 499 (efx->type->mcdi_max_ver < 2 && 500 cmd > MC_CMD_CMD_SPACE_ESCAPE_7)) 501 return -EINVAL; 502 503 if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 || 504 (efx->type->mcdi_max_ver < 2 && 505 inlen > MCDI_CTL_SDU_LEN_MAX_V1)) 506 return -EMSGSIZE; 507 508 return 0; 509 } 510 511 int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, 512 const efx_dword_t *inbuf, size_t inlen, 513 efx_dword_t *outbuf, size_t outlen, 514 size_t *outlen_actual) 515 { 516 int rc; 517 518 rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); 519 if (rc) 520 return rc; 521 return efx_mcdi_rpc_finish(efx, cmd, inlen, 522 outbuf, outlen, outlen_actual); 523 } 524 525 int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, 526 const efx_dword_t *inbuf, size_t inlen) 527 { 528 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 529 int rc; 530 531 rc = efx_mcdi_check_supported(efx, cmd, inlen); 532 if (rc) 533 return rc; 534 535 efx_mcdi_acquire_sync(mcdi); 536 efx_mcdi_send_request(efx, cmd, inbuf, inlen); 537 return 0; 538 } 539 540 /** 541 * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously 542 * @efx: NIC through which to issue the command 543 * @cmd: Command type number 544 * @inbuf: Command parameters 545 * @inlen: Length of command parameters, in bytes 546 * @outlen: Length to allocate for response buffer, in bytes 547 * @complete: Function to be called on completion or cancellation. 548 * @cookie: Arbitrary value to be passed to @complete. 549 * 550 * This function does not sleep and therefore may be called in atomic 551 * context. It will fail if event queues are disabled or if MCDI 552 * event completions have been disabled due to an error. 553 * 554 * If it succeeds, the @complete function will be called exactly once 555 * in atomic context, when one of the following occurs: 556 * (a) the completion event is received (in NAPI context) 557 * (b) event queues are disabled (in the process that disables them) 558 * (c) the request times-out (in timer context) 559 */ 560 int 561 efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, 562 const efx_dword_t *inbuf, size_t inlen, size_t outlen, 563 efx_mcdi_async_completer *complete, unsigned long cookie) 564 { 565 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 566 struct efx_mcdi_async_param *async; 567 int rc; 568 569 rc = efx_mcdi_check_supported(efx, cmd, inlen); 570 if (rc) 571 return rc; 572 573 async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4), 574 GFP_ATOMIC); 575 if (!async) 576 return -ENOMEM; 577 578 async->cmd = cmd; 579 async->inlen = inlen; 580 async->outlen = outlen; 581 async->complete = complete; 582 async->cookie = cookie; 583 memcpy(async + 1, inbuf, inlen); 584 585 spin_lock_bh(&mcdi->async_lock); 586 587 if (mcdi->mode == MCDI_MODE_EVENTS) { 588 list_add_tail(&async->list, &mcdi->async_list); 589 590 /* If this is at the front of the queue, try to start it 591 * immediately 592 */ 593 if (mcdi->async_list.next == &async->list && 594 efx_mcdi_acquire_async(mcdi)) { 595 efx_mcdi_send_request(efx, cmd, inbuf, inlen); 596 mod_timer(&mcdi->async_timer, 597 jiffies + MCDI_RPC_TIMEOUT); 598 } 599 } else { 600 kfree(async); 601 rc = -ENETDOWN; 602 } 603 604 spin_unlock_bh(&mcdi->async_lock); 605 606 return rc; 607 } 608 609 int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, 610 efx_dword_t *outbuf, size_t outlen, 611 size_t *outlen_actual) 612 { 613 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 614 int rc; 615 616 if (mcdi->mode == MCDI_MODE_POLL) 617 rc = efx_mcdi_poll(efx); 618 else 619 rc = efx_mcdi_await_completion(efx); 620 621 if (rc != 0) { 622 /* Close the race with efx_mcdi_ev_cpl() executing just too late 623 * and completing a request we've just cancelled, by ensuring 624 * that the seqno check therein fails. 625 */ 626 spin_lock_bh(&mcdi->iface_lock); 627 ++mcdi->seqno; 628 ++mcdi->credits; 629 spin_unlock_bh(&mcdi->iface_lock); 630 631 netif_err(efx, hw, efx->net_dev, 632 "MC command 0x%x inlen %d mode %d timed out\n", 633 cmd, (int)inlen, mcdi->mode); 634 } else { 635 size_t hdr_len, data_len; 636 637 /* At the very least we need a memory barrier here to ensure 638 * we pick up changes from efx_mcdi_ev_cpl(). Protect against 639 * a spurious efx_mcdi_ev_cpl() running concurrently by 640 * acquiring the iface_lock. */ 641 spin_lock_bh(&mcdi->iface_lock); 642 rc = mcdi->resprc; 643 hdr_len = mcdi->resp_hdr_len; 644 data_len = mcdi->resp_data_len; 645 spin_unlock_bh(&mcdi->iface_lock); 646 647 BUG_ON(rc > 0); 648 649 if (rc == 0) { 650 efx->type->mcdi_read_response(efx, outbuf, hdr_len, 651 min(outlen, data_len)); 652 if (outlen_actual != NULL) 653 *outlen_actual = data_len; 654 } else if (cmd == MC_CMD_REBOOT && rc == -EIO) 655 ; /* Don't reset if MC_CMD_REBOOT returns EIO */ 656 else if (rc == -EIO || rc == -EINTR) { 657 netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", 658 -rc); 659 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 660 } else 661 netif_dbg(efx, hw, efx->net_dev, 662 "MC command 0x%x inlen %d failed rc=%d\n", 663 cmd, (int)inlen, -rc); 664 665 if (rc == -EIO || rc == -EINTR) { 666 msleep(MCDI_STATUS_SLEEP_MS); 667 efx_mcdi_poll_reboot(efx); 668 mcdi->new_epoch = true; 669 } 670 } 671 672 efx_mcdi_release(mcdi); 673 return rc; 674 } 675 676 /* Switch to polled MCDI completions. This can be called in various 677 * error conditions with various locks held, so it must be lockless. 678 * Caller is responsible for flushing asynchronous requests later. 679 */ 680 void efx_mcdi_mode_poll(struct efx_nic *efx) 681 { 682 struct efx_mcdi_iface *mcdi; 683 684 if (!efx->mcdi) 685 return; 686 687 mcdi = efx_mcdi(efx); 688 if (mcdi->mode == MCDI_MODE_POLL) 689 return; 690 691 /* We can switch from event completion to polled completion, because 692 * mcdi requests are always completed in shared memory. We do this by 693 * switching the mode to POLL'd then completing the request. 694 * efx_mcdi_await_completion() will then call efx_mcdi_poll(). 695 * 696 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), 697 * which efx_mcdi_complete_sync() provides for us. 698 */ 699 mcdi->mode = MCDI_MODE_POLL; 700 701 efx_mcdi_complete_sync(mcdi); 702 } 703 704 /* Flush any running or queued asynchronous requests, after event processing 705 * is stopped 706 */ 707 void efx_mcdi_flush_async(struct efx_nic *efx) 708 { 709 struct efx_mcdi_async_param *async, *next; 710 struct efx_mcdi_iface *mcdi; 711 712 if (!efx->mcdi) 713 return; 714 715 mcdi = efx_mcdi(efx); 716 717 /* We must be in polling mode so no more requests can be queued */ 718 BUG_ON(mcdi->mode != MCDI_MODE_POLL); 719 720 del_timer_sync(&mcdi->async_timer); 721 722 /* If a request is still running, make sure we give the MC 723 * time to complete it so that the response won't overwrite our 724 * next request. 725 */ 726 if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) { 727 efx_mcdi_poll(efx); 728 mcdi->state = MCDI_STATE_QUIESCENT; 729 } 730 731 /* Nothing else will access the async list now, so it is safe 732 * to walk it without holding async_lock. If we hold it while 733 * calling a completer then lockdep may warn that we have 734 * acquired locks in the wrong order. 735 */ 736 list_for_each_entry_safe(async, next, &mcdi->async_list, list) { 737 async->complete(efx, async->cookie, -ENETDOWN, NULL, 0); 738 list_del(&async->list); 739 kfree(async); 740 } 741 } 742 743 void efx_mcdi_mode_event(struct efx_nic *efx) 744 { 745 struct efx_mcdi_iface *mcdi; 746 747 if (!efx->mcdi) 748 return; 749 750 mcdi = efx_mcdi(efx); 751 752 if (mcdi->mode == MCDI_MODE_EVENTS) 753 return; 754 755 /* We can't switch from polled to event completion in the middle of a 756 * request, because the completion method is specified in the request. 757 * So acquire the interface to serialise the requestors. We don't need 758 * to acquire the iface_lock to change the mode here, but we do need a 759 * write memory barrier ensure that efx_mcdi_rpc() sees it, which 760 * efx_mcdi_acquire() provides. 761 */ 762 efx_mcdi_acquire_sync(mcdi); 763 mcdi->mode = MCDI_MODE_EVENTS; 764 efx_mcdi_release(mcdi); 765 } 766 767 static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) 768 { 769 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 770 771 /* If there is an outstanding MCDI request, it has been terminated 772 * either by a BADASSERT or REBOOT event. If the mcdi interface is 773 * in polled mode, then do nothing because the MC reboot handler will 774 * set the header correctly. However, if the mcdi interface is waiting 775 * for a CMDDONE event it won't receive it [and since all MCDI events 776 * are sent to the same queue, we can't be racing with 777 * efx_mcdi_ev_cpl()] 778 * 779 * If there is an outstanding asynchronous request, we can't 780 * complete it now (efx_mcdi_complete() would deadlock). The 781 * reset process will take care of this. 782 * 783 * There's a race here with efx_mcdi_send_request(), because 784 * we might receive a REBOOT event *before* the request has 785 * been copied out. In polled mode (during startup) this is 786 * irrelevant, because efx_mcdi_complete_sync() is ignored. In 787 * event mode, this condition is just an edge-case of 788 * receiving a REBOOT event after posting the MCDI 789 * request. Did the mc reboot before or after the copyout? The 790 * best we can do always is just return failure. 791 */ 792 spin_lock(&mcdi->iface_lock); 793 if (efx_mcdi_complete_sync(mcdi)) { 794 if (mcdi->mode == MCDI_MODE_EVENTS) { 795 mcdi->resprc = rc; 796 mcdi->resp_hdr_len = 0; 797 mcdi->resp_data_len = 0; 798 ++mcdi->credits; 799 } 800 } else { 801 int count; 802 803 /* Nobody was waiting for an MCDI request, so trigger a reset */ 804 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 805 806 /* Consume the status word since efx_mcdi_rpc_finish() won't */ 807 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { 808 if (efx_mcdi_poll_reboot(efx)) 809 break; 810 udelay(MCDI_STATUS_DELAY_US); 811 } 812 mcdi->new_epoch = true; 813 } 814 815 spin_unlock(&mcdi->iface_lock); 816 } 817 818 /* Called from falcon_process_eventq for MCDI events */ 819 void efx_mcdi_process_event(struct efx_channel *channel, 820 efx_qword_t *event) 821 { 822 struct efx_nic *efx = channel->efx; 823 int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); 824 u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); 825 826 switch (code) { 827 case MCDI_EVENT_CODE_BADSSERT: 828 netif_err(efx, hw, efx->net_dev, 829 "MC watchdog or assertion failure at 0x%x\n", data); 830 efx_mcdi_ev_death(efx, -EINTR); 831 break; 832 833 case MCDI_EVENT_CODE_PMNOTICE: 834 netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); 835 break; 836 837 case MCDI_EVENT_CODE_CMDDONE: 838 efx_mcdi_ev_cpl(efx, 839 MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), 840 MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), 841 MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); 842 break; 843 844 case MCDI_EVENT_CODE_LINKCHANGE: 845 efx_mcdi_process_link_change(efx, event); 846 break; 847 case MCDI_EVENT_CODE_SENSOREVT: 848 efx_mcdi_sensor_event(efx, event); 849 break; 850 case MCDI_EVENT_CODE_SCHEDERR: 851 netif_info(efx, hw, efx->net_dev, 852 "MC Scheduler error address=0x%x\n", data); 853 break; 854 case MCDI_EVENT_CODE_REBOOT: 855 case MCDI_EVENT_CODE_MC_REBOOT: 856 netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); 857 efx_mcdi_ev_death(efx, -EIO); 858 break; 859 case MCDI_EVENT_CODE_MAC_STATS_DMA: 860 /* MAC stats are gather lazily. We can ignore this. */ 861 break; 862 case MCDI_EVENT_CODE_FLR: 863 efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); 864 break; 865 case MCDI_EVENT_CODE_PTP_RX: 866 case MCDI_EVENT_CODE_PTP_FAULT: 867 case MCDI_EVENT_CODE_PTP_PPS: 868 efx_ptp_event(efx, event); 869 break; 870 case MCDI_EVENT_CODE_TX_FLUSH: 871 case MCDI_EVENT_CODE_RX_FLUSH: 872 /* Two flush events will be sent: one to the same event 873 * queue as completions, and one to event queue 0. 874 * In the latter case the {RX,TX}_FLUSH_TO_DRIVER 875 * flag will be set, and we should ignore the event 876 * because we want to wait for all completions. 877 */ 878 BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN != 879 MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN); 880 if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER)) 881 efx_ef10_handle_drain_event(efx); 882 break; 883 case MCDI_EVENT_CODE_TX_ERR: 884 case MCDI_EVENT_CODE_RX_ERR: 885 netif_err(efx, hw, efx->net_dev, 886 "%s DMA error (event: "EFX_QWORD_FMT")\n", 887 code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX", 888 EFX_QWORD_VAL(*event)); 889 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 890 break; 891 default: 892 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", 893 code); 894 } 895 } 896 897 /************************************************************************** 898 * 899 * Specific request functions 900 * 901 ************************************************************************** 902 */ 903 904 void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) 905 { 906 MCDI_DECLARE_BUF(outbuf, 907 max(MC_CMD_GET_VERSION_OUT_LEN, 908 MC_CMD_GET_CAPABILITIES_OUT_LEN)); 909 size_t outlength; 910 const __le16 *ver_words; 911 size_t offset; 912 int rc; 913 914 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); 915 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, 916 outbuf, sizeof(outbuf), &outlength); 917 if (rc) 918 goto fail; 919 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { 920 rc = -EIO; 921 goto fail; 922 } 923 924 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); 925 offset = snprintf(buf, len, "%u.%u.%u.%u", 926 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), 927 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); 928 929 /* EF10 may have multiple datapath firmware variants within a 930 * single version. Report which variants are running. 931 */ 932 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { 933 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); 934 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, 935 outbuf, sizeof(outbuf), &outlength); 936 if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN) 937 offset += snprintf( 938 buf + offset, len - offset, " rx? tx?"); 939 else 940 offset += snprintf( 941 buf + offset, len - offset, " rx%x tx%x", 942 MCDI_WORD(outbuf, 943 GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID), 944 MCDI_WORD(outbuf, 945 GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID)); 946 947 /* It's theoretically possible for the string to exceed 31 948 * characters, though in practice the first three version 949 * components are short enough that this doesn't happen. 950 */ 951 if (WARN_ON(offset >= len)) 952 buf[0] = 0; 953 } 954 955 return; 956 957 fail: 958 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 959 buf[0] = 0; 960 } 961 962 static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 963 bool *was_attached) 964 { 965 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); 966 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN); 967 size_t outlen; 968 int rc; 969 970 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, 971 driver_operating ? 1 : 0); 972 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); 973 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY); 974 975 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), 976 outbuf, sizeof(outbuf), &outlen); 977 if (rc) 978 goto fail; 979 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { 980 rc = -EIO; 981 goto fail; 982 } 983 984 if (was_attached != NULL) 985 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); 986 return 0; 987 988 fail: 989 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 990 return rc; 991 } 992 993 int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 994 u16 *fw_subtype_list, u32 *capabilities) 995 { 996 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX); 997 size_t outlen, i; 998 int port_num = efx_port_num(efx); 999 int rc; 1000 1001 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); 1002 1003 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, 1004 outbuf, sizeof(outbuf), &outlen); 1005 if (rc) 1006 goto fail; 1007 1008 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { 1009 rc = -EIO; 1010 goto fail; 1011 } 1012 1013 if (mac_address) 1014 memcpy(mac_address, 1015 port_num ? 1016 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : 1017 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0), 1018 ETH_ALEN); 1019 if (fw_subtype_list) { 1020 for (i = 0; 1021 i < MCDI_VAR_ARRAY_LEN(outlen, 1022 GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST); 1023 i++) 1024 fw_subtype_list[i] = MCDI_ARRAY_WORD( 1025 outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i); 1026 for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++) 1027 fw_subtype_list[i] = 0; 1028 } 1029 if (capabilities) { 1030 if (port_num) 1031 *capabilities = MCDI_DWORD(outbuf, 1032 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); 1033 else 1034 *capabilities = MCDI_DWORD(outbuf, 1035 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); 1036 } 1037 1038 return 0; 1039 1040 fail: 1041 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", 1042 __func__, rc, (int)outlen); 1043 1044 return rc; 1045 } 1046 1047 int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) 1048 { 1049 MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN); 1050 u32 dest = 0; 1051 int rc; 1052 1053 if (uart) 1054 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; 1055 if (evq) 1056 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; 1057 1058 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); 1059 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); 1060 1061 BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); 1062 1063 rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), 1064 NULL, 0, NULL); 1065 if (rc) 1066 goto fail; 1067 1068 return 0; 1069 1070 fail: 1071 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1072 return rc; 1073 } 1074 1075 int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) 1076 { 1077 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN); 1078 size_t outlen; 1079 int rc; 1080 1081 BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); 1082 1083 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, 1084 outbuf, sizeof(outbuf), &outlen); 1085 if (rc) 1086 goto fail; 1087 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { 1088 rc = -EIO; 1089 goto fail; 1090 } 1091 1092 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); 1093 return 0; 1094 1095 fail: 1096 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1097 __func__, rc); 1098 return rc; 1099 } 1100 1101 int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, 1102 size_t *size_out, size_t *erase_size_out, 1103 bool *protected_out) 1104 { 1105 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN); 1106 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN); 1107 size_t outlen; 1108 int rc; 1109 1110 MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); 1111 1112 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), 1113 outbuf, sizeof(outbuf), &outlen); 1114 if (rc) 1115 goto fail; 1116 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { 1117 rc = -EIO; 1118 goto fail; 1119 } 1120 1121 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); 1122 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); 1123 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & 1124 (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN)); 1125 return 0; 1126 1127 fail: 1128 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1129 return rc; 1130 } 1131 1132 static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) 1133 { 1134 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN); 1135 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN); 1136 int rc; 1137 1138 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); 1139 1140 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), 1141 outbuf, sizeof(outbuf), NULL); 1142 if (rc) 1143 return rc; 1144 1145 switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { 1146 case MC_CMD_NVRAM_TEST_PASS: 1147 case MC_CMD_NVRAM_TEST_NOTSUPP: 1148 return 0; 1149 default: 1150 return -EIO; 1151 } 1152 } 1153 1154 int efx_mcdi_nvram_test_all(struct efx_nic *efx) 1155 { 1156 u32 nvram_types; 1157 unsigned int type; 1158 int rc; 1159 1160 rc = efx_mcdi_nvram_types(efx, &nvram_types); 1161 if (rc) 1162 goto fail1; 1163 1164 type = 0; 1165 while (nvram_types != 0) { 1166 if (nvram_types & 1) { 1167 rc = efx_mcdi_nvram_test(efx, type); 1168 if (rc) 1169 goto fail2; 1170 } 1171 type++; 1172 nvram_types >>= 1; 1173 } 1174 1175 return 0; 1176 1177 fail2: 1178 netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", 1179 __func__, type); 1180 fail1: 1181 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1182 return rc; 1183 } 1184 1185 static int efx_mcdi_read_assertion(struct efx_nic *efx) 1186 { 1187 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN); 1188 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); 1189 unsigned int flags, index; 1190 const char *reason; 1191 size_t outlen; 1192 int retry; 1193 int rc; 1194 1195 /* Attempt to read any stored assertion state before we reboot 1196 * the mcfw out of the assertion handler. Retry twice, once 1197 * because a boot-time assertion might cause this command to fail 1198 * with EINTR. And once again because GET_ASSERTS can race with 1199 * MC_CMD_REBOOT running on the other port. */ 1200 retry = 2; 1201 do { 1202 MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); 1203 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, 1204 inbuf, MC_CMD_GET_ASSERTS_IN_LEN, 1205 outbuf, sizeof(outbuf), &outlen); 1206 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); 1207 1208 if (rc) 1209 return rc; 1210 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) 1211 return -EIO; 1212 1213 /* Print out any recorded assertion state */ 1214 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); 1215 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) 1216 return 0; 1217 1218 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) 1219 ? "system-level assertion" 1220 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) 1221 ? "thread-level assertion" 1222 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) 1223 ? "watchdog reset" 1224 : "unknown assertion"; 1225 netif_err(efx, hw, efx->net_dev, 1226 "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, 1227 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), 1228 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); 1229 1230 /* Print out the registers */ 1231 for (index = 0; 1232 index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; 1233 index++) 1234 netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", 1235 1 + index, 1236 MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS, 1237 index)); 1238 1239 return 0; 1240 } 1241 1242 static void efx_mcdi_exit_assertion(struct efx_nic *efx) 1243 { 1244 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); 1245 1246 /* If the MC is running debug firmware, it might now be 1247 * waiting for a debugger to attach, but we just want it to 1248 * reboot. We set a flag that makes the command a no-op if it 1249 * has already done so. We don't know what return code to 1250 * expect (0 or -EIO), so ignore it. 1251 */ 1252 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1253 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 1254 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); 1255 (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, 1256 NULL, 0, NULL); 1257 } 1258 1259 int efx_mcdi_handle_assertion(struct efx_nic *efx) 1260 { 1261 int rc; 1262 1263 rc = efx_mcdi_read_assertion(efx); 1264 if (rc) 1265 return rc; 1266 1267 efx_mcdi_exit_assertion(efx); 1268 1269 return 0; 1270 } 1271 1272 void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) 1273 { 1274 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN); 1275 int rc; 1276 1277 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); 1278 BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); 1279 BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); 1280 1281 BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); 1282 1283 MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); 1284 1285 rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), 1286 NULL, 0, NULL); 1287 if (rc) 1288 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1289 __func__, rc); 1290 } 1291 1292 static int efx_mcdi_reset_port(struct efx_nic *efx) 1293 { 1294 int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL); 1295 if (rc) 1296 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1297 __func__, rc); 1298 return rc; 1299 } 1300 1301 static int efx_mcdi_reset_mc(struct efx_nic *efx) 1302 { 1303 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); 1304 int rc; 1305 1306 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1307 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); 1308 rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), 1309 NULL, 0, NULL); 1310 /* White is black, and up is down */ 1311 if (rc == -EIO) 1312 return 0; 1313 if (rc == 0) 1314 rc = -EIO; 1315 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1316 return rc; 1317 } 1318 1319 enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason) 1320 { 1321 return RESET_TYPE_RECOVER_OR_ALL; 1322 } 1323 1324 int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) 1325 { 1326 int rc; 1327 1328 /* Recover from a failed assertion pre-reset */ 1329 rc = efx_mcdi_handle_assertion(efx); 1330 if (rc) 1331 return rc; 1332 1333 if (method == RESET_TYPE_WORLD) 1334 return efx_mcdi_reset_mc(efx); 1335 else 1336 return efx_mcdi_reset_port(efx); 1337 } 1338 1339 static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, 1340 const u8 *mac, int *id_out) 1341 { 1342 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN); 1343 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN); 1344 size_t outlen; 1345 int rc; 1346 1347 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); 1348 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, 1349 MC_CMD_FILTER_MODE_SIMPLE); 1350 memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); 1351 1352 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), 1353 outbuf, sizeof(outbuf), &outlen); 1354 if (rc) 1355 goto fail; 1356 1357 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { 1358 rc = -EIO; 1359 goto fail; 1360 } 1361 1362 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); 1363 1364 return 0; 1365 1366 fail: 1367 *id_out = -1; 1368 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1369 return rc; 1370 1371 } 1372 1373 1374 int 1375 efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) 1376 { 1377 return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); 1378 } 1379 1380 1381 int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) 1382 { 1383 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN); 1384 size_t outlen; 1385 int rc; 1386 1387 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, 1388 outbuf, sizeof(outbuf), &outlen); 1389 if (rc) 1390 goto fail; 1391 1392 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { 1393 rc = -EIO; 1394 goto fail; 1395 } 1396 1397 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); 1398 1399 return 0; 1400 1401 fail: 1402 *id_out = -1; 1403 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1404 return rc; 1405 } 1406 1407 1408 int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) 1409 { 1410 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN); 1411 int rc; 1412 1413 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); 1414 1415 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), 1416 NULL, 0, NULL); 1417 if (rc) 1418 goto fail; 1419 1420 return 0; 1421 1422 fail: 1423 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1424 return rc; 1425 } 1426 1427 int efx_mcdi_flush_rxqs(struct efx_nic *efx) 1428 { 1429 struct efx_channel *channel; 1430 struct efx_rx_queue *rx_queue; 1431 MCDI_DECLARE_BUF(inbuf, 1432 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS)); 1433 int rc, count; 1434 1435 BUILD_BUG_ON(EFX_MAX_CHANNELS > 1436 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); 1437 1438 count = 0; 1439 efx_for_each_channel(channel, efx) { 1440 efx_for_each_channel_rx_queue(rx_queue, channel) { 1441 if (rx_queue->flush_pending) { 1442 rx_queue->flush_pending = false; 1443 atomic_dec(&efx->rxq_flush_pending); 1444 MCDI_SET_ARRAY_DWORD( 1445 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, 1446 count, efx_rx_queue_index(rx_queue)); 1447 count++; 1448 } 1449 } 1450 } 1451 1452 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, 1453 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL); 1454 WARN_ON(rc < 0); 1455 1456 return rc; 1457 } 1458 1459 int efx_mcdi_wol_filter_reset(struct efx_nic *efx) 1460 { 1461 int rc; 1462 1463 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); 1464 if (rc) 1465 goto fail; 1466 1467 return 0; 1468 1469 fail: 1470 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1471 return rc; 1472 } 1473 1474 int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled) 1475 { 1476 MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN); 1477 1478 BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0); 1479 MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type); 1480 MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled); 1481 return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), 1482 NULL, 0, NULL); 1483 } 1484 1485 #ifdef CONFIG_SFC_MTD 1486 1487 #define EFX_MCDI_NVRAM_LEN_MAX 128 1488 1489 static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) 1490 { 1491 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN); 1492 int rc; 1493 1494 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); 1495 1496 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); 1497 1498 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), 1499 NULL, 0, NULL); 1500 if (rc) 1501 goto fail; 1502 1503 return 0; 1504 1505 fail: 1506 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1507 return rc; 1508 } 1509 1510 static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, 1511 loff_t offset, u8 *buffer, size_t length) 1512 { 1513 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN); 1514 MCDI_DECLARE_BUF(outbuf, 1515 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)); 1516 size_t outlen; 1517 int rc; 1518 1519 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); 1520 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); 1521 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); 1522 1523 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), 1524 outbuf, sizeof(outbuf), &outlen); 1525 if (rc) 1526 goto fail; 1527 1528 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); 1529 return 0; 1530 1531 fail: 1532 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1533 return rc; 1534 } 1535 1536 static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 1537 loff_t offset, const u8 *buffer, size_t length) 1538 { 1539 MCDI_DECLARE_BUF(inbuf, 1540 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)); 1541 int rc; 1542 1543 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); 1544 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); 1545 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); 1546 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); 1547 1548 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); 1549 1550 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, 1551 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), 1552 NULL, 0, NULL); 1553 if (rc) 1554 goto fail; 1555 1556 return 0; 1557 1558 fail: 1559 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1560 return rc; 1561 } 1562 1563 static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, 1564 loff_t offset, size_t length) 1565 { 1566 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN); 1567 int rc; 1568 1569 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); 1570 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); 1571 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); 1572 1573 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); 1574 1575 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), 1576 NULL, 0, NULL); 1577 if (rc) 1578 goto fail; 1579 1580 return 0; 1581 1582 fail: 1583 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1584 return rc; 1585 } 1586 1587 static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) 1588 { 1589 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN); 1590 int rc; 1591 1592 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); 1593 1594 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); 1595 1596 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), 1597 NULL, 0, NULL); 1598 if (rc) 1599 goto fail; 1600 1601 return 0; 1602 1603 fail: 1604 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1605 return rc; 1606 } 1607 1608 int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, 1609 size_t len, size_t *retlen, u8 *buffer) 1610 { 1611 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); 1612 struct efx_nic *efx = mtd->priv; 1613 loff_t offset = start; 1614 loff_t end = min_t(loff_t, start + len, mtd->size); 1615 size_t chunk; 1616 int rc = 0; 1617 1618 while (offset < end) { 1619 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); 1620 rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset, 1621 buffer, chunk); 1622 if (rc) 1623 goto out; 1624 offset += chunk; 1625 buffer += chunk; 1626 } 1627 out: 1628 *retlen = offset - start; 1629 return rc; 1630 } 1631 1632 int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) 1633 { 1634 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); 1635 struct efx_nic *efx = mtd->priv; 1636 loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); 1637 loff_t end = min_t(loff_t, start + len, mtd->size); 1638 size_t chunk = part->common.mtd.erasesize; 1639 int rc = 0; 1640 1641 if (!part->updating) { 1642 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); 1643 if (rc) 1644 goto out; 1645 part->updating = true; 1646 } 1647 1648 /* The MCDI interface can in fact do multiple erase blocks at once; 1649 * but erasing may be slow, so we make multiple calls here to avoid 1650 * tripping the MCDI RPC timeout. */ 1651 while (offset < end) { 1652 rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset, 1653 chunk); 1654 if (rc) 1655 goto out; 1656 offset += chunk; 1657 } 1658 out: 1659 return rc; 1660 } 1661 1662 int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, 1663 size_t len, size_t *retlen, const u8 *buffer) 1664 { 1665 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); 1666 struct efx_nic *efx = mtd->priv; 1667 loff_t offset = start; 1668 loff_t end = min_t(loff_t, start + len, mtd->size); 1669 size_t chunk; 1670 int rc = 0; 1671 1672 if (!part->updating) { 1673 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); 1674 if (rc) 1675 goto out; 1676 part->updating = true; 1677 } 1678 1679 while (offset < end) { 1680 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); 1681 rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset, 1682 buffer, chunk); 1683 if (rc) 1684 goto out; 1685 offset += chunk; 1686 buffer += chunk; 1687 } 1688 out: 1689 *retlen = offset - start; 1690 return rc; 1691 } 1692 1693 int efx_mcdi_mtd_sync(struct mtd_info *mtd) 1694 { 1695 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); 1696 struct efx_nic *efx = mtd->priv; 1697 int rc = 0; 1698 1699 if (part->updating) { 1700 part->updating = false; 1701 rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type); 1702 } 1703 1704 return rc; 1705 } 1706 1707 void efx_mcdi_mtd_rename(struct efx_mtd_partition *part) 1708 { 1709 struct efx_mcdi_mtd_partition *mcdi_part = 1710 container_of(part, struct efx_mcdi_mtd_partition, common); 1711 struct efx_nic *efx = part->mtd.priv; 1712 1713 snprintf(part->name, sizeof(part->name), "%s %s:%02x", 1714 efx->name, part->type_name, mcdi_part->fw_subtype); 1715 } 1716 1717 #endif /* CONFIG_SFC_MTD */ 1718