1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2008-2013 Solarflare Communications Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published 7 * by the Free Software Foundation, incorporated herein by reference. 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/moduleparam.h> 12 #include <linux/atomic.h> 13 #include "net_driver.h" 14 #include "nic.h" 15 #include "io.h" 16 #include "farch_regs.h" 17 #include "mcdi_pcol.h" 18 19 /************************************************************************** 20 * 21 * Management-Controller-to-Driver Interface 22 * 23 ************************************************************************** 24 */ 25 26 #define MCDI_RPC_TIMEOUT (10 * HZ) 27 28 /* A reboot/assertion causes the MCDI status word to be set after the 29 * command word is set or a REBOOT event is sent. If we notice a reboot 30 * via these mechanisms then wait 250ms for the status word to be set. 31 */ 32 #define MCDI_STATUS_DELAY_US 100 33 #define MCDI_STATUS_DELAY_COUNT 2500 34 #define MCDI_STATUS_SLEEP_MS \ 35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) 36 37 #define SEQ_MASK \ 38 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) 39 40 struct efx_mcdi_async_param { 41 struct list_head list; 42 unsigned int cmd; 43 size_t inlen; 44 size_t outlen; 45 bool quiet; 46 efx_mcdi_async_completer *complete; 47 unsigned long cookie; 48 /* followed by request/response buffer */ 49 }; 50 51 static void efx_mcdi_timeout_async(unsigned long context); 52 static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 53 bool *was_attached_out); 54 static bool efx_mcdi_poll_once(struct efx_nic *efx); 55 static void efx_mcdi_abandon(struct efx_nic *efx); 56 57 #ifdef CONFIG_SFC_MCDI_LOGGING 58 static bool mcdi_logging_default; 59 module_param(mcdi_logging_default, bool, 0644); 60 MODULE_PARM_DESC(mcdi_logging_default, 61 "Enable MCDI logging on newly-probed functions"); 62 #endif 63 64 int efx_mcdi_init(struct efx_nic *efx) 65 { 66 struct efx_mcdi_iface *mcdi; 67 bool already_attached; 68 int rc = -ENOMEM; 69 70 efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); 71 if (!efx->mcdi) 72 goto fail; 73 74 mcdi = efx_mcdi(efx); 75 mcdi->efx = efx; 76 #ifdef CONFIG_SFC_MCDI_LOGGING 77 /* consuming code assumes buffer is page-sized */ 78 mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL); 79 if (!mcdi->logging_buffer) 80 goto fail1; 81 mcdi->logging_enabled = mcdi_logging_default; 82 #endif 83 init_waitqueue_head(&mcdi->wq); 84 init_waitqueue_head(&mcdi->proxy_rx_wq); 85 spin_lock_init(&mcdi->iface_lock); 86 mcdi->state = MCDI_STATE_QUIESCENT; 87 mcdi->mode = MCDI_MODE_POLL; 88 spin_lock_init(&mcdi->async_lock); 89 INIT_LIST_HEAD(&mcdi->async_list); 90 setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async, 91 (unsigned long)mcdi); 92 93 (void) efx_mcdi_poll_reboot(efx); 94 mcdi->new_epoch = true; 95 96 /* Recover from a failed assertion before probing */ 97 rc = efx_mcdi_handle_assertion(efx); 98 if (rc) 99 goto fail2; 100 101 /* Let the MC (and BMC, if this is a LOM) know that the driver 102 * is loaded. We should do this before we reset the NIC. 103 */ 104 rc = efx_mcdi_drv_attach(efx, true, &already_attached); 105 if (rc) { 106 netif_err(efx, probe, efx->net_dev, 107 "Unable to register driver with MCPU\n"); 108 goto fail2; 109 } 110 if (already_attached) 111 /* Not a fatal error */ 112 netif_err(efx, probe, efx->net_dev, 113 "Host already registered with MCPU\n"); 114 115 if (efx->mcdi->fn_flags & 116 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) 117 efx->primary = efx; 118 119 return 0; 120 fail2: 121 #ifdef CONFIG_SFC_MCDI_LOGGING 122 free_page((unsigned long)mcdi->logging_buffer); 123 fail1: 124 #endif 125 kfree(efx->mcdi); 126 efx->mcdi = NULL; 127 fail: 128 return rc; 129 } 130 131 void efx_mcdi_fini(struct efx_nic *efx) 132 { 133 if (!efx->mcdi) 134 return; 135 136 BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT); 137 138 /* Relinquish the device (back to the BMC, if this is a LOM) */ 139 efx_mcdi_drv_attach(efx, false, NULL); 140 141 #ifdef CONFIG_SFC_MCDI_LOGGING 142 free_page((unsigned long)efx->mcdi->iface.logging_buffer); 143 #endif 144 145 kfree(efx->mcdi); 146 } 147 148 static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, 149 const efx_dword_t *inbuf, size_t inlen) 150 { 151 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 152 #ifdef CONFIG_SFC_MCDI_LOGGING 153 char *buf = mcdi->logging_buffer; /* page-sized */ 154 #endif 155 efx_dword_t hdr[2]; 156 size_t hdr_len; 157 u32 xflags, seqno; 158 159 BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT); 160 161 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ 162 spin_lock_bh(&mcdi->iface_lock); 163 ++mcdi->seqno; 164 spin_unlock_bh(&mcdi->iface_lock); 165 166 seqno = mcdi->seqno & SEQ_MASK; 167 xflags = 0; 168 if (mcdi->mode == MCDI_MODE_EVENTS) 169 xflags |= MCDI_HEADER_XFLAGS_EVREQ; 170 171 if (efx->type->mcdi_max_ver == 1) { 172 /* MCDI v1 */ 173 EFX_POPULATE_DWORD_7(hdr[0], 174 MCDI_HEADER_RESPONSE, 0, 175 MCDI_HEADER_RESYNC, 1, 176 MCDI_HEADER_CODE, cmd, 177 MCDI_HEADER_DATALEN, inlen, 178 MCDI_HEADER_SEQ, seqno, 179 MCDI_HEADER_XFLAGS, xflags, 180 MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); 181 hdr_len = 4; 182 } else { 183 /* MCDI v2 */ 184 BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2); 185 EFX_POPULATE_DWORD_7(hdr[0], 186 MCDI_HEADER_RESPONSE, 0, 187 MCDI_HEADER_RESYNC, 1, 188 MCDI_HEADER_CODE, MC_CMD_V2_EXTN, 189 MCDI_HEADER_DATALEN, 0, 190 MCDI_HEADER_SEQ, seqno, 191 MCDI_HEADER_XFLAGS, xflags, 192 MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); 193 EFX_POPULATE_DWORD_2(hdr[1], 194 MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd, 195 MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen); 196 hdr_len = 8; 197 } 198 199 #ifdef CONFIG_SFC_MCDI_LOGGING 200 if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { 201 int bytes = 0; 202 int i; 203 /* Lengths should always be a whole number of dwords, so scream 204 * if they're not. 205 */ 206 WARN_ON_ONCE(hdr_len % 4); 207 WARN_ON_ONCE(inlen % 4); 208 209 /* We own the logging buffer, as only one MCDI can be in 210 * progress on a NIC at any one time. So no need for locking. 211 */ 212 for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++) 213 bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, 214 " %08x", le32_to_cpu(hdr[i].u32[0])); 215 216 for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++) 217 bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, 218 " %08x", le32_to_cpu(inbuf[i].u32[0])); 219 220 netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf); 221 } 222 #endif 223 224 efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); 225 226 mcdi->new_epoch = false; 227 } 228 229 static int efx_mcdi_errno(unsigned int mcdi_err) 230 { 231 switch (mcdi_err) { 232 case 0: 233 return 0; 234 #define TRANSLATE_ERROR(name) \ 235 case MC_CMD_ERR_ ## name: \ 236 return -name; 237 TRANSLATE_ERROR(EPERM); 238 TRANSLATE_ERROR(ENOENT); 239 TRANSLATE_ERROR(EINTR); 240 TRANSLATE_ERROR(EAGAIN); 241 TRANSLATE_ERROR(EACCES); 242 TRANSLATE_ERROR(EBUSY); 243 TRANSLATE_ERROR(EINVAL); 244 TRANSLATE_ERROR(EDEADLK); 245 TRANSLATE_ERROR(ENOSYS); 246 TRANSLATE_ERROR(ETIME); 247 TRANSLATE_ERROR(EALREADY); 248 TRANSLATE_ERROR(ENOSPC); 249 #undef TRANSLATE_ERROR 250 case MC_CMD_ERR_ENOTSUP: 251 return -EOPNOTSUPP; 252 case MC_CMD_ERR_ALLOC_FAIL: 253 return -ENOBUFS; 254 case MC_CMD_ERR_MAC_EXIST: 255 return -EADDRINUSE; 256 default: 257 return -EPROTO; 258 } 259 } 260 261 static void efx_mcdi_read_response_header(struct efx_nic *efx) 262 { 263 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 264 unsigned int respseq, respcmd, error; 265 #ifdef CONFIG_SFC_MCDI_LOGGING 266 char *buf = mcdi->logging_buffer; /* page-sized */ 267 #endif 268 efx_dword_t hdr; 269 270 efx->type->mcdi_read_response(efx, &hdr, 0, 4); 271 respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ); 272 respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE); 273 error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR); 274 275 if (respcmd != MC_CMD_V2_EXTN) { 276 mcdi->resp_hdr_len = 4; 277 mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN); 278 } else { 279 efx->type->mcdi_read_response(efx, &hdr, 4, 4); 280 mcdi->resp_hdr_len = 8; 281 mcdi->resp_data_len = 282 EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); 283 } 284 285 #ifdef CONFIG_SFC_MCDI_LOGGING 286 if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { 287 size_t hdr_len, data_len; 288 int bytes = 0; 289 int i; 290 291 WARN_ON_ONCE(mcdi->resp_hdr_len % 4); 292 hdr_len = mcdi->resp_hdr_len / 4; 293 /* MCDI_DECLARE_BUF ensures that underlying buffer is padded 294 * to dword size, and the MCDI buffer is always dword size 295 */ 296 data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4); 297 298 /* We own the logging buffer, as only one MCDI can be in 299 * progress on a NIC at any one time. So no need for locking. 300 */ 301 for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) { 302 efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4); 303 bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, 304 " %08x", le32_to_cpu(hdr.u32[0])); 305 } 306 307 for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) { 308 efx->type->mcdi_read_response(efx, &hdr, 309 mcdi->resp_hdr_len + (i * 4), 4); 310 bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, 311 " %08x", le32_to_cpu(hdr.u32[0])); 312 } 313 314 netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf); 315 } 316 #endif 317 318 mcdi->resprc_raw = 0; 319 if (error && mcdi->resp_data_len == 0) { 320 netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); 321 mcdi->resprc = -EIO; 322 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { 323 netif_err(efx, hw, efx->net_dev, 324 "MC response mismatch tx seq 0x%x rx seq 0x%x\n", 325 respseq, mcdi->seqno); 326 mcdi->resprc = -EIO; 327 } else if (error) { 328 efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4); 329 mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0); 330 mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw); 331 } else { 332 mcdi->resprc = 0; 333 } 334 } 335 336 static bool efx_mcdi_poll_once(struct efx_nic *efx) 337 { 338 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 339 340 rmb(); 341 if (!efx->type->mcdi_poll_response(efx)) 342 return false; 343 344 spin_lock_bh(&mcdi->iface_lock); 345 efx_mcdi_read_response_header(efx); 346 spin_unlock_bh(&mcdi->iface_lock); 347 348 return true; 349 } 350 351 static int efx_mcdi_poll(struct efx_nic *efx) 352 { 353 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 354 unsigned long time, finish; 355 unsigned int spins; 356 int rc; 357 358 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ 359 rc = efx_mcdi_poll_reboot(efx); 360 if (rc) { 361 spin_lock_bh(&mcdi->iface_lock); 362 mcdi->resprc = rc; 363 mcdi->resp_hdr_len = 0; 364 mcdi->resp_data_len = 0; 365 spin_unlock_bh(&mcdi->iface_lock); 366 return 0; 367 } 368 369 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, 370 * because generally mcdi responses are fast. After that, back off 371 * and poll once a jiffy (approximately) 372 */ 373 spins = TICK_USEC; 374 finish = jiffies + MCDI_RPC_TIMEOUT; 375 376 while (1) { 377 if (spins != 0) { 378 --spins; 379 udelay(1); 380 } else { 381 schedule_timeout_uninterruptible(1); 382 } 383 384 time = jiffies; 385 386 if (efx_mcdi_poll_once(efx)) 387 break; 388 389 if (time_after(time, finish)) 390 return -ETIMEDOUT; 391 } 392 393 /* Return rc=0 like wait_event_timeout() */ 394 return 0; 395 } 396 397 /* Test and clear MC-rebooted flag for this port/function; reset 398 * software state as necessary. 399 */ 400 int efx_mcdi_poll_reboot(struct efx_nic *efx) 401 { 402 if (!efx->mcdi) 403 return 0; 404 405 return efx->type->mcdi_poll_reboot(efx); 406 } 407 408 static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi) 409 { 410 return cmpxchg(&mcdi->state, 411 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) == 412 MCDI_STATE_QUIESCENT; 413 } 414 415 static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi) 416 { 417 /* Wait until the interface becomes QUIESCENT and we win the race 418 * to mark it RUNNING_SYNC. 419 */ 420 wait_event(mcdi->wq, 421 cmpxchg(&mcdi->state, 422 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) == 423 MCDI_STATE_QUIESCENT); 424 } 425 426 static int efx_mcdi_await_completion(struct efx_nic *efx) 427 { 428 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 429 430 if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED, 431 MCDI_RPC_TIMEOUT) == 0) 432 return -ETIMEDOUT; 433 434 /* Check if efx_mcdi_set_mode() switched us back to polled completions. 435 * In which case, poll for completions directly. If efx_mcdi_ev_cpl() 436 * completed the request first, then we'll just end up completing the 437 * request again, which is safe. 438 * 439 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which 440 * wait_event_timeout() implicitly provides. 441 */ 442 if (mcdi->mode == MCDI_MODE_POLL) 443 return efx_mcdi_poll(efx); 444 445 return 0; 446 } 447 448 /* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the 449 * requester. Return whether this was done. Does not take any locks. 450 */ 451 static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi) 452 { 453 if (cmpxchg(&mcdi->state, 454 MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) == 455 MCDI_STATE_RUNNING_SYNC) { 456 wake_up(&mcdi->wq); 457 return true; 458 } 459 460 return false; 461 } 462 463 static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) 464 { 465 if (mcdi->mode == MCDI_MODE_EVENTS) { 466 struct efx_mcdi_async_param *async; 467 struct efx_nic *efx = mcdi->efx; 468 469 /* Process the asynchronous request queue */ 470 spin_lock_bh(&mcdi->async_lock); 471 async = list_first_entry_or_null( 472 &mcdi->async_list, struct efx_mcdi_async_param, list); 473 if (async) { 474 mcdi->state = MCDI_STATE_RUNNING_ASYNC; 475 efx_mcdi_send_request(efx, async->cmd, 476 (const efx_dword_t *)(async + 1), 477 async->inlen); 478 mod_timer(&mcdi->async_timer, 479 jiffies + MCDI_RPC_TIMEOUT); 480 } 481 spin_unlock_bh(&mcdi->async_lock); 482 483 if (async) 484 return; 485 } 486 487 mcdi->state = MCDI_STATE_QUIESCENT; 488 wake_up(&mcdi->wq); 489 } 490 491 /* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the 492 * asynchronous completion function, and release the interface. 493 * Return whether this was done. Must be called in bh-disabled 494 * context. Will take iface_lock and async_lock. 495 */ 496 static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) 497 { 498 struct efx_nic *efx = mcdi->efx; 499 struct efx_mcdi_async_param *async; 500 size_t hdr_len, data_len, err_len; 501 efx_dword_t *outbuf; 502 MCDI_DECLARE_BUF_ERR(errbuf); 503 int rc; 504 505 if (cmpxchg(&mcdi->state, 506 MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) != 507 MCDI_STATE_RUNNING_ASYNC) 508 return false; 509 510 spin_lock(&mcdi->iface_lock); 511 if (timeout) { 512 /* Ensure that if the completion event arrives later, 513 * the seqno check in efx_mcdi_ev_cpl() will fail 514 */ 515 ++mcdi->seqno; 516 ++mcdi->credits; 517 rc = -ETIMEDOUT; 518 hdr_len = 0; 519 data_len = 0; 520 } else { 521 rc = mcdi->resprc; 522 hdr_len = mcdi->resp_hdr_len; 523 data_len = mcdi->resp_data_len; 524 } 525 spin_unlock(&mcdi->iface_lock); 526 527 /* Stop the timer. In case the timer function is running, we 528 * must wait for it to return so that there is no possibility 529 * of it aborting the next request. 530 */ 531 if (!timeout) 532 del_timer_sync(&mcdi->async_timer); 533 534 spin_lock(&mcdi->async_lock); 535 async = list_first_entry(&mcdi->async_list, 536 struct efx_mcdi_async_param, list); 537 list_del(&async->list); 538 spin_unlock(&mcdi->async_lock); 539 540 outbuf = (efx_dword_t *)(async + 1); 541 efx->type->mcdi_read_response(efx, outbuf, hdr_len, 542 min(async->outlen, data_len)); 543 if (!timeout && rc && !async->quiet) { 544 err_len = min(sizeof(errbuf), data_len); 545 efx->type->mcdi_read_response(efx, errbuf, hdr_len, 546 sizeof(errbuf)); 547 efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf, 548 err_len, rc); 549 } 550 551 if (async->complete) 552 async->complete(efx, async->cookie, rc, outbuf, 553 min(async->outlen, data_len)); 554 kfree(async); 555 556 efx_mcdi_release(mcdi); 557 558 return true; 559 } 560 561 static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, 562 unsigned int datalen, unsigned int mcdi_err) 563 { 564 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 565 bool wake = false; 566 567 spin_lock(&mcdi->iface_lock); 568 569 if ((seqno ^ mcdi->seqno) & SEQ_MASK) { 570 if (mcdi->credits) 571 /* The request has been cancelled */ 572 --mcdi->credits; 573 else 574 netif_err(efx, hw, efx->net_dev, 575 "MC response mismatch tx seq 0x%x rx " 576 "seq 0x%x\n", seqno, mcdi->seqno); 577 } else { 578 if (efx->type->mcdi_max_ver >= 2) { 579 /* MCDI v2 responses don't fit in an event */ 580 efx_mcdi_read_response_header(efx); 581 } else { 582 mcdi->resprc = efx_mcdi_errno(mcdi_err); 583 mcdi->resp_hdr_len = 4; 584 mcdi->resp_data_len = datalen; 585 } 586 587 wake = true; 588 } 589 590 spin_unlock(&mcdi->iface_lock); 591 592 if (wake) { 593 if (!efx_mcdi_complete_async(mcdi, false)) 594 (void) efx_mcdi_complete_sync(mcdi); 595 596 /* If the interface isn't RUNNING_ASYNC or 597 * RUNNING_SYNC then we've received a duplicate 598 * completion after we've already transitioned back to 599 * QUIESCENT. [A subsequent invocation would increment 600 * seqno, so would have failed the seqno check]. 601 */ 602 } 603 } 604 605 static void efx_mcdi_timeout_async(unsigned long context) 606 { 607 struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context; 608 609 efx_mcdi_complete_async(mcdi, true); 610 } 611 612 static int 613 efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen) 614 { 615 if (efx->type->mcdi_max_ver < 0 || 616 (efx->type->mcdi_max_ver < 2 && 617 cmd > MC_CMD_CMD_SPACE_ESCAPE_7)) 618 return -EINVAL; 619 620 if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 || 621 (efx->type->mcdi_max_ver < 2 && 622 inlen > MCDI_CTL_SDU_LEN_MAX_V1)) 623 return -EMSGSIZE; 624 625 return 0; 626 } 627 628 static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx, 629 size_t hdr_len, size_t data_len, 630 u32 *proxy_handle) 631 { 632 MCDI_DECLARE_BUF_ERR(testbuf); 633 const size_t buflen = sizeof(testbuf); 634 635 if (!proxy_handle || data_len < buflen) 636 return false; 637 638 efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen); 639 if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) { 640 *proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE); 641 return true; 642 } 643 644 return false; 645 } 646 647 static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, 648 size_t inlen, 649 efx_dword_t *outbuf, size_t outlen, 650 size_t *outlen_actual, bool quiet, 651 u32 *proxy_handle, int *raw_rc) 652 { 653 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 654 MCDI_DECLARE_BUF_ERR(errbuf); 655 int rc; 656 657 if (mcdi->mode == MCDI_MODE_POLL) 658 rc = efx_mcdi_poll(efx); 659 else 660 rc = efx_mcdi_await_completion(efx); 661 662 if (rc != 0) { 663 netif_err(efx, hw, efx->net_dev, 664 "MC command 0x%x inlen %d mode %d timed out\n", 665 cmd, (int)inlen, mcdi->mode); 666 667 if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) { 668 netif_err(efx, hw, efx->net_dev, 669 "MCDI request was completed without an event\n"); 670 rc = 0; 671 } 672 673 efx_mcdi_abandon(efx); 674 675 /* Close the race with efx_mcdi_ev_cpl() executing just too late 676 * and completing a request we've just cancelled, by ensuring 677 * that the seqno check therein fails. 678 */ 679 spin_lock_bh(&mcdi->iface_lock); 680 ++mcdi->seqno; 681 ++mcdi->credits; 682 spin_unlock_bh(&mcdi->iface_lock); 683 } 684 685 if (proxy_handle) 686 *proxy_handle = 0; 687 688 if (rc != 0) { 689 if (outlen_actual) 690 *outlen_actual = 0; 691 } else { 692 size_t hdr_len, data_len, err_len; 693 694 /* At the very least we need a memory barrier here to ensure 695 * we pick up changes from efx_mcdi_ev_cpl(). Protect against 696 * a spurious efx_mcdi_ev_cpl() running concurrently by 697 * acquiring the iface_lock. */ 698 spin_lock_bh(&mcdi->iface_lock); 699 rc = mcdi->resprc; 700 if (raw_rc) 701 *raw_rc = mcdi->resprc_raw; 702 hdr_len = mcdi->resp_hdr_len; 703 data_len = mcdi->resp_data_len; 704 err_len = min(sizeof(errbuf), data_len); 705 spin_unlock_bh(&mcdi->iface_lock); 706 707 BUG_ON(rc > 0); 708 709 efx->type->mcdi_read_response(efx, outbuf, hdr_len, 710 min(outlen, data_len)); 711 if (outlen_actual) 712 *outlen_actual = data_len; 713 714 efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len); 715 716 if (cmd == MC_CMD_REBOOT && rc == -EIO) { 717 /* Don't reset if MC_CMD_REBOOT returns EIO */ 718 } else if (rc == -EIO || rc == -EINTR) { 719 netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", 720 -rc); 721 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 722 } else if (proxy_handle && (rc == -EPROTO) && 723 efx_mcdi_get_proxy_handle(efx, hdr_len, data_len, 724 proxy_handle)) { 725 mcdi->proxy_rx_status = 0; 726 mcdi->proxy_rx_handle = 0; 727 mcdi->state = MCDI_STATE_PROXY_WAIT; 728 } else if (rc && !quiet) { 729 efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len, 730 rc); 731 } 732 733 if (rc == -EIO || rc == -EINTR) { 734 msleep(MCDI_STATUS_SLEEP_MS); 735 efx_mcdi_poll_reboot(efx); 736 mcdi->new_epoch = true; 737 } 738 } 739 740 if (!proxy_handle || !*proxy_handle) 741 efx_mcdi_release(mcdi); 742 return rc; 743 } 744 745 static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi) 746 { 747 if (mcdi->state == MCDI_STATE_PROXY_WAIT) { 748 /* Interrupt the proxy wait. */ 749 mcdi->proxy_rx_status = -EINTR; 750 wake_up(&mcdi->proxy_rx_wq); 751 } 752 } 753 754 static void efx_mcdi_ev_proxy_response(struct efx_nic *efx, 755 u32 handle, int status) 756 { 757 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 758 759 WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT); 760 761 mcdi->proxy_rx_status = efx_mcdi_errno(status); 762 /* Ensure the status is written before we update the handle, since the 763 * latter is used to check if we've finished. 764 */ 765 wmb(); 766 mcdi->proxy_rx_handle = handle; 767 wake_up(&mcdi->proxy_rx_wq); 768 } 769 770 static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet) 771 { 772 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 773 int rc; 774 775 /* Wait for a proxy event, or timeout. */ 776 rc = wait_event_timeout(mcdi->proxy_rx_wq, 777 mcdi->proxy_rx_handle != 0 || 778 mcdi->proxy_rx_status == -EINTR, 779 MCDI_RPC_TIMEOUT); 780 781 if (rc <= 0) { 782 netif_dbg(efx, hw, efx->net_dev, 783 "MCDI proxy timeout %d\n", handle); 784 return -ETIMEDOUT; 785 } else if (mcdi->proxy_rx_handle != handle) { 786 netif_warn(efx, hw, efx->net_dev, 787 "MCDI proxy unexpected handle %d (expected %d)\n", 788 mcdi->proxy_rx_handle, handle); 789 return -EINVAL; 790 } 791 792 return mcdi->proxy_rx_status; 793 } 794 795 static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, 796 const efx_dword_t *inbuf, size_t inlen, 797 efx_dword_t *outbuf, size_t outlen, 798 size_t *outlen_actual, bool quiet, int *raw_rc) 799 { 800 u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */ 801 int rc; 802 803 if (inbuf && inlen && (inbuf == outbuf)) { 804 /* The input buffer can't be aliased with the output. */ 805 WARN_ON(1); 806 return -EINVAL; 807 } 808 809 rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); 810 if (rc) 811 return rc; 812 813 rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, 814 outlen_actual, quiet, &proxy_handle, raw_rc); 815 816 if (proxy_handle) { 817 /* Handle proxy authorisation. This allows approval of MCDI 818 * operations to be delegated to the admin function, allowing 819 * fine control over (eg) multicast subscriptions. 820 */ 821 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 822 823 netif_dbg(efx, hw, efx->net_dev, 824 "MCDI waiting for proxy auth %d\n", 825 proxy_handle); 826 rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet); 827 828 if (rc == 0) { 829 netif_dbg(efx, hw, efx->net_dev, 830 "MCDI proxy retry %d\n", proxy_handle); 831 832 /* We now retry the original request. */ 833 mcdi->state = MCDI_STATE_RUNNING_SYNC; 834 efx_mcdi_send_request(efx, cmd, inbuf, inlen); 835 836 rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, 837 outbuf, outlen, outlen_actual, 838 quiet, NULL, raw_rc); 839 } else { 840 netif_printk(efx, hw, 841 rc == -EPERM ? KERN_DEBUG : KERN_ERR, 842 efx->net_dev, 843 "MC command 0x%x failed after proxy auth rc=%d\n", 844 cmd, rc); 845 846 if (rc == -EINTR || rc == -EIO) 847 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 848 efx_mcdi_release(mcdi); 849 } 850 } 851 852 return rc; 853 } 854 855 static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd, 856 const efx_dword_t *inbuf, size_t inlen, 857 efx_dword_t *outbuf, size_t outlen, 858 size_t *outlen_actual, bool quiet) 859 { 860 int raw_rc = 0; 861 int rc; 862 863 rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen, 864 outbuf, outlen, outlen_actual, true, &raw_rc); 865 866 if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) && 867 efx->type->is_vf) { 868 /* If the EVB port isn't available within a VF this may 869 * mean the PF is still bringing the switch up. We should 870 * retry our request shortly. 871 */ 872 unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT; 873 unsigned int delay_us = 10000; 874 875 netif_dbg(efx, hw, efx->net_dev, 876 "%s: NO_EVB_PORT; will retry request\n", 877 __func__); 878 879 do { 880 usleep_range(delay_us, delay_us + 10000); 881 rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen, 882 outbuf, outlen, outlen_actual, 883 true, &raw_rc); 884 if (delay_us < 100000) 885 delay_us <<= 1; 886 } while ((rc == -EPROTO) && 887 (raw_rc == MC_CMD_ERR_NO_EVB_PORT) && 888 time_before(jiffies, abort_time)); 889 } 890 891 if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO)) 892 efx_mcdi_display_error(efx, cmd, inlen, 893 outbuf, outlen, rc); 894 895 return rc; 896 } 897 898 /** 899 * efx_mcdi_rpc - Issue an MCDI command and wait for completion 900 * @efx: NIC through which to issue the command 901 * @cmd: Command type number 902 * @inbuf: Command parameters 903 * @inlen: Length of command parameters, in bytes. Must be a multiple 904 * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1. 905 * @outbuf: Response buffer. May be %NULL if @outlen is 0. 906 * @outlen: Length of response buffer, in bytes. If the actual 907 * response is longer than @outlen & ~3, it will be truncated 908 * to that length. 909 * @outlen_actual: Pointer through which to return the actual response 910 * length. May be %NULL if this is not needed. 911 * 912 * This function may sleep and therefore must be called in an appropriate 913 * context. 914 * 915 * Return: A negative error code, or zero if successful. The error 916 * code may come from the MCDI response or may indicate a failure 917 * to communicate with the MC. In the former case, the response 918 * will still be copied to @outbuf and *@outlen_actual will be 919 * set accordingly. In the latter case, *@outlen_actual will be 920 * set to zero. 921 */ 922 int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, 923 const efx_dword_t *inbuf, size_t inlen, 924 efx_dword_t *outbuf, size_t outlen, 925 size_t *outlen_actual) 926 { 927 return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen, 928 outlen_actual, false); 929 } 930 931 /* Normally, on receiving an error code in the MCDI response, 932 * efx_mcdi_rpc will log an error message containing (among other 933 * things) the raw error code, by means of efx_mcdi_display_error. 934 * This _quiet version suppresses that; if the caller wishes to log 935 * the error conditionally on the return code, it should call this 936 * function and is then responsible for calling efx_mcdi_display_error 937 * as needed. 938 */ 939 int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd, 940 const efx_dword_t *inbuf, size_t inlen, 941 efx_dword_t *outbuf, size_t outlen, 942 size_t *outlen_actual) 943 { 944 return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen, 945 outlen_actual, true); 946 } 947 948 int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, 949 const efx_dword_t *inbuf, size_t inlen) 950 { 951 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 952 int rc; 953 954 rc = efx_mcdi_check_supported(efx, cmd, inlen); 955 if (rc) 956 return rc; 957 958 if (efx->mc_bist_for_other_fn) 959 return -ENETDOWN; 960 961 if (mcdi->mode == MCDI_MODE_FAIL) 962 return -ENETDOWN; 963 964 efx_mcdi_acquire_sync(mcdi); 965 efx_mcdi_send_request(efx, cmd, inbuf, inlen); 966 return 0; 967 } 968 969 static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, 970 const efx_dword_t *inbuf, size_t inlen, 971 size_t outlen, 972 efx_mcdi_async_completer *complete, 973 unsigned long cookie, bool quiet) 974 { 975 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 976 struct efx_mcdi_async_param *async; 977 int rc; 978 979 rc = efx_mcdi_check_supported(efx, cmd, inlen); 980 if (rc) 981 return rc; 982 983 if (efx->mc_bist_for_other_fn) 984 return -ENETDOWN; 985 986 async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4), 987 GFP_ATOMIC); 988 if (!async) 989 return -ENOMEM; 990 991 async->cmd = cmd; 992 async->inlen = inlen; 993 async->outlen = outlen; 994 async->quiet = quiet; 995 async->complete = complete; 996 async->cookie = cookie; 997 memcpy(async + 1, inbuf, inlen); 998 999 spin_lock_bh(&mcdi->async_lock); 1000 1001 if (mcdi->mode == MCDI_MODE_EVENTS) { 1002 list_add_tail(&async->list, &mcdi->async_list); 1003 1004 /* If this is at the front of the queue, try to start it 1005 * immediately 1006 */ 1007 if (mcdi->async_list.next == &async->list && 1008 efx_mcdi_acquire_async(mcdi)) { 1009 efx_mcdi_send_request(efx, cmd, inbuf, inlen); 1010 mod_timer(&mcdi->async_timer, 1011 jiffies + MCDI_RPC_TIMEOUT); 1012 } 1013 } else { 1014 kfree(async); 1015 rc = -ENETDOWN; 1016 } 1017 1018 spin_unlock_bh(&mcdi->async_lock); 1019 1020 return rc; 1021 } 1022 1023 /** 1024 * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously 1025 * @efx: NIC through which to issue the command 1026 * @cmd: Command type number 1027 * @inbuf: Command parameters 1028 * @inlen: Length of command parameters, in bytes 1029 * @outlen: Length to allocate for response buffer, in bytes 1030 * @complete: Function to be called on completion or cancellation. 1031 * @cookie: Arbitrary value to be passed to @complete. 1032 * 1033 * This function does not sleep and therefore may be called in atomic 1034 * context. It will fail if event queues are disabled or if MCDI 1035 * event completions have been disabled due to an error. 1036 * 1037 * If it succeeds, the @complete function will be called exactly once 1038 * in atomic context, when one of the following occurs: 1039 * (a) the completion event is received (in NAPI context) 1040 * (b) event queues are disabled (in the process that disables them) 1041 * (c) the request times-out (in timer context) 1042 */ 1043 int 1044 efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, 1045 const efx_dword_t *inbuf, size_t inlen, size_t outlen, 1046 efx_mcdi_async_completer *complete, unsigned long cookie) 1047 { 1048 return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, 1049 cookie, false); 1050 } 1051 1052 int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, 1053 const efx_dword_t *inbuf, size_t inlen, 1054 size_t outlen, efx_mcdi_async_completer *complete, 1055 unsigned long cookie) 1056 { 1057 return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, 1058 cookie, true); 1059 } 1060 1061 int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, 1062 efx_dword_t *outbuf, size_t outlen, 1063 size_t *outlen_actual) 1064 { 1065 return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, 1066 outlen_actual, false, NULL, NULL); 1067 } 1068 1069 int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen, 1070 efx_dword_t *outbuf, size_t outlen, 1071 size_t *outlen_actual) 1072 { 1073 return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, 1074 outlen_actual, true, NULL, NULL); 1075 } 1076 1077 void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, 1078 size_t inlen, efx_dword_t *outbuf, 1079 size_t outlen, int rc) 1080 { 1081 int code = 0, err_arg = 0; 1082 1083 if (outlen >= MC_CMD_ERR_CODE_OFST + 4) 1084 code = MCDI_DWORD(outbuf, ERR_CODE); 1085 if (outlen >= MC_CMD_ERR_ARG_OFST + 4) 1086 err_arg = MCDI_DWORD(outbuf, ERR_ARG); 1087 netif_printk(efx, hw, rc == -EPERM ? KERN_DEBUG : KERN_ERR, 1088 efx->net_dev, 1089 "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n", 1090 cmd, inlen, rc, code, err_arg); 1091 } 1092 1093 /* Switch to polled MCDI completions. This can be called in various 1094 * error conditions with various locks held, so it must be lockless. 1095 * Caller is responsible for flushing asynchronous requests later. 1096 */ 1097 void efx_mcdi_mode_poll(struct efx_nic *efx) 1098 { 1099 struct efx_mcdi_iface *mcdi; 1100 1101 if (!efx->mcdi) 1102 return; 1103 1104 mcdi = efx_mcdi(efx); 1105 /* If already in polling mode, nothing to do. 1106 * If in fail-fast state, don't switch to polled completion. 1107 * FLR recovery will do that later. 1108 */ 1109 if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL) 1110 return; 1111 1112 /* We can switch from event completion to polled completion, because 1113 * mcdi requests are always completed in shared memory. We do this by 1114 * switching the mode to POLL'd then completing the request. 1115 * efx_mcdi_await_completion() will then call efx_mcdi_poll(). 1116 * 1117 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), 1118 * which efx_mcdi_complete_sync() provides for us. 1119 */ 1120 mcdi->mode = MCDI_MODE_POLL; 1121 1122 efx_mcdi_complete_sync(mcdi); 1123 } 1124 1125 /* Flush any running or queued asynchronous requests, after event processing 1126 * is stopped 1127 */ 1128 void efx_mcdi_flush_async(struct efx_nic *efx) 1129 { 1130 struct efx_mcdi_async_param *async, *next; 1131 struct efx_mcdi_iface *mcdi; 1132 1133 if (!efx->mcdi) 1134 return; 1135 1136 mcdi = efx_mcdi(efx); 1137 1138 /* We must be in poll or fail mode so no more requests can be queued */ 1139 BUG_ON(mcdi->mode == MCDI_MODE_EVENTS); 1140 1141 del_timer_sync(&mcdi->async_timer); 1142 1143 /* If a request is still running, make sure we give the MC 1144 * time to complete it so that the response won't overwrite our 1145 * next request. 1146 */ 1147 if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) { 1148 efx_mcdi_poll(efx); 1149 mcdi->state = MCDI_STATE_QUIESCENT; 1150 } 1151 1152 /* Nothing else will access the async list now, so it is safe 1153 * to walk it without holding async_lock. If we hold it while 1154 * calling a completer then lockdep may warn that we have 1155 * acquired locks in the wrong order. 1156 */ 1157 list_for_each_entry_safe(async, next, &mcdi->async_list, list) { 1158 if (async->complete) 1159 async->complete(efx, async->cookie, -ENETDOWN, NULL, 0); 1160 list_del(&async->list); 1161 kfree(async); 1162 } 1163 } 1164 1165 void efx_mcdi_mode_event(struct efx_nic *efx) 1166 { 1167 struct efx_mcdi_iface *mcdi; 1168 1169 if (!efx->mcdi) 1170 return; 1171 1172 mcdi = efx_mcdi(efx); 1173 /* If already in event completion mode, nothing to do. 1174 * If in fail-fast state, don't switch to event completion. FLR 1175 * recovery will do that later. 1176 */ 1177 if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL) 1178 return; 1179 1180 /* We can't switch from polled to event completion in the middle of a 1181 * request, because the completion method is specified in the request. 1182 * So acquire the interface to serialise the requestors. We don't need 1183 * to acquire the iface_lock to change the mode here, but we do need a 1184 * write memory barrier ensure that efx_mcdi_rpc() sees it, which 1185 * efx_mcdi_acquire() provides. 1186 */ 1187 efx_mcdi_acquire_sync(mcdi); 1188 mcdi->mode = MCDI_MODE_EVENTS; 1189 efx_mcdi_release(mcdi); 1190 } 1191 1192 static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) 1193 { 1194 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 1195 1196 /* If there is an outstanding MCDI request, it has been terminated 1197 * either by a BADASSERT or REBOOT event. If the mcdi interface is 1198 * in polled mode, then do nothing because the MC reboot handler will 1199 * set the header correctly. However, if the mcdi interface is waiting 1200 * for a CMDDONE event it won't receive it [and since all MCDI events 1201 * are sent to the same queue, we can't be racing with 1202 * efx_mcdi_ev_cpl()] 1203 * 1204 * If there is an outstanding asynchronous request, we can't 1205 * complete it now (efx_mcdi_complete() would deadlock). The 1206 * reset process will take care of this. 1207 * 1208 * There's a race here with efx_mcdi_send_request(), because 1209 * we might receive a REBOOT event *before* the request has 1210 * been copied out. In polled mode (during startup) this is 1211 * irrelevant, because efx_mcdi_complete_sync() is ignored. In 1212 * event mode, this condition is just an edge-case of 1213 * receiving a REBOOT event after posting the MCDI 1214 * request. Did the mc reboot before or after the copyout? The 1215 * best we can do always is just return failure. 1216 * 1217 * If there is an outstanding proxy response expected it is not going 1218 * to arrive. We should thus abort it. 1219 */ 1220 spin_lock(&mcdi->iface_lock); 1221 efx_mcdi_proxy_abort(mcdi); 1222 1223 if (efx_mcdi_complete_sync(mcdi)) { 1224 if (mcdi->mode == MCDI_MODE_EVENTS) { 1225 mcdi->resprc = rc; 1226 mcdi->resp_hdr_len = 0; 1227 mcdi->resp_data_len = 0; 1228 ++mcdi->credits; 1229 } 1230 } else { 1231 int count; 1232 1233 /* Consume the status word since efx_mcdi_rpc_finish() won't */ 1234 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { 1235 rc = efx_mcdi_poll_reboot(efx); 1236 if (rc) 1237 break; 1238 udelay(MCDI_STATUS_DELAY_US); 1239 } 1240 1241 /* On EF10, a CODE_MC_REBOOT event can be received without the 1242 * reboot detection in efx_mcdi_poll_reboot() being triggered. 1243 * If zero was returned from the final call to 1244 * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the 1245 * MC has definitely rebooted so prepare for the reset. 1246 */ 1247 if (!rc && efx->type->mcdi_reboot_detected) 1248 efx->type->mcdi_reboot_detected(efx); 1249 1250 mcdi->new_epoch = true; 1251 1252 /* Nobody was waiting for an MCDI request, so trigger a reset */ 1253 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 1254 } 1255 1256 spin_unlock(&mcdi->iface_lock); 1257 } 1258 1259 /* The MC is going down in to BIST mode. set the BIST flag to block 1260 * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset 1261 * (which doesn't actually execute a reset, it waits for the controlling 1262 * function to reset it). 1263 */ 1264 static void efx_mcdi_ev_bist(struct efx_nic *efx) 1265 { 1266 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 1267 1268 spin_lock(&mcdi->iface_lock); 1269 efx->mc_bist_for_other_fn = true; 1270 efx_mcdi_proxy_abort(mcdi); 1271 1272 if (efx_mcdi_complete_sync(mcdi)) { 1273 if (mcdi->mode == MCDI_MODE_EVENTS) { 1274 mcdi->resprc = -EIO; 1275 mcdi->resp_hdr_len = 0; 1276 mcdi->resp_data_len = 0; 1277 ++mcdi->credits; 1278 } 1279 } 1280 mcdi->new_epoch = true; 1281 efx_schedule_reset(efx, RESET_TYPE_MC_BIST); 1282 spin_unlock(&mcdi->iface_lock); 1283 } 1284 1285 /* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try 1286 * to recover. 1287 */ 1288 static void efx_mcdi_abandon(struct efx_nic *efx) 1289 { 1290 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 1291 1292 if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL) 1293 return; /* it had already been done */ 1294 netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n"); 1295 efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT); 1296 } 1297 1298 /* Called from falcon_process_eventq for MCDI events */ 1299 void efx_mcdi_process_event(struct efx_channel *channel, 1300 efx_qword_t *event) 1301 { 1302 struct efx_nic *efx = channel->efx; 1303 int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); 1304 u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); 1305 1306 switch (code) { 1307 case MCDI_EVENT_CODE_BADSSERT: 1308 netif_err(efx, hw, efx->net_dev, 1309 "MC watchdog or assertion failure at 0x%x\n", data); 1310 efx_mcdi_ev_death(efx, -EINTR); 1311 break; 1312 1313 case MCDI_EVENT_CODE_PMNOTICE: 1314 netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); 1315 break; 1316 1317 case MCDI_EVENT_CODE_CMDDONE: 1318 efx_mcdi_ev_cpl(efx, 1319 MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), 1320 MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), 1321 MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); 1322 break; 1323 1324 case MCDI_EVENT_CODE_LINKCHANGE: 1325 efx_mcdi_process_link_change(efx, event); 1326 break; 1327 case MCDI_EVENT_CODE_SENSOREVT: 1328 efx_mcdi_sensor_event(efx, event); 1329 break; 1330 case MCDI_EVENT_CODE_SCHEDERR: 1331 netif_dbg(efx, hw, efx->net_dev, 1332 "MC Scheduler alert (0x%x)\n", data); 1333 break; 1334 case MCDI_EVENT_CODE_REBOOT: 1335 case MCDI_EVENT_CODE_MC_REBOOT: 1336 netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); 1337 efx_mcdi_ev_death(efx, -EIO); 1338 break; 1339 case MCDI_EVENT_CODE_MC_BIST: 1340 netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n"); 1341 efx_mcdi_ev_bist(efx); 1342 break; 1343 case MCDI_EVENT_CODE_MAC_STATS_DMA: 1344 /* MAC stats are gather lazily. We can ignore this. */ 1345 break; 1346 case MCDI_EVENT_CODE_FLR: 1347 if (efx->type->sriov_flr) 1348 efx->type->sriov_flr(efx, 1349 MCDI_EVENT_FIELD(*event, FLR_VF)); 1350 break; 1351 case MCDI_EVENT_CODE_PTP_RX: 1352 case MCDI_EVENT_CODE_PTP_FAULT: 1353 case MCDI_EVENT_CODE_PTP_PPS: 1354 efx_ptp_event(efx, event); 1355 break; 1356 case MCDI_EVENT_CODE_PTP_TIME: 1357 efx_time_sync_event(channel, event); 1358 break; 1359 case MCDI_EVENT_CODE_TX_FLUSH: 1360 case MCDI_EVENT_CODE_RX_FLUSH: 1361 /* Two flush events will be sent: one to the same event 1362 * queue as completions, and one to event queue 0. 1363 * In the latter case the {RX,TX}_FLUSH_TO_DRIVER 1364 * flag will be set, and we should ignore the event 1365 * because we want to wait for all completions. 1366 */ 1367 BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN != 1368 MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN); 1369 if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER)) 1370 efx_ef10_handle_drain_event(efx); 1371 break; 1372 case MCDI_EVENT_CODE_TX_ERR: 1373 case MCDI_EVENT_CODE_RX_ERR: 1374 netif_err(efx, hw, efx->net_dev, 1375 "%s DMA error (event: "EFX_QWORD_FMT")\n", 1376 code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX", 1377 EFX_QWORD_VAL(*event)); 1378 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 1379 break; 1380 case MCDI_EVENT_CODE_PROXY_RESPONSE: 1381 efx_mcdi_ev_proxy_response(efx, 1382 MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE), 1383 MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC)); 1384 break; 1385 default: 1386 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", 1387 code); 1388 } 1389 } 1390 1391 /************************************************************************** 1392 * 1393 * Specific request functions 1394 * 1395 ************************************************************************** 1396 */ 1397 1398 void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) 1399 { 1400 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN); 1401 size_t outlength; 1402 const __le16 *ver_words; 1403 size_t offset; 1404 int rc; 1405 1406 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); 1407 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, 1408 outbuf, sizeof(outbuf), &outlength); 1409 if (rc) 1410 goto fail; 1411 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { 1412 rc = -EIO; 1413 goto fail; 1414 } 1415 1416 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); 1417 offset = snprintf(buf, len, "%u.%u.%u.%u", 1418 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), 1419 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); 1420 1421 /* EF10 may have multiple datapath firmware variants within a 1422 * single version. Report which variants are running. 1423 */ 1424 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { 1425 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1426 1427 offset += snprintf(buf + offset, len - offset, " rx%x tx%x", 1428 nic_data->rx_dpcpu_fw_id, 1429 nic_data->tx_dpcpu_fw_id); 1430 1431 /* It's theoretically possible for the string to exceed 31 1432 * characters, though in practice the first three version 1433 * components are short enough that this doesn't happen. 1434 */ 1435 if (WARN_ON(offset >= len)) 1436 buf[0] = 0; 1437 } 1438 1439 return; 1440 1441 fail: 1442 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1443 buf[0] = 0; 1444 } 1445 1446 static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 1447 bool *was_attached) 1448 { 1449 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); 1450 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN); 1451 size_t outlen; 1452 int rc; 1453 1454 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, 1455 driver_operating ? 1 : 0); 1456 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); 1457 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY); 1458 1459 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), 1460 outbuf, sizeof(outbuf), &outlen); 1461 /* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID 1462 * specified will fail with EPERM, and we have to tell the MC we don't 1463 * care what firmware we get. 1464 */ 1465 if (rc == -EPERM) { 1466 netif_dbg(efx, probe, efx->net_dev, 1467 "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n"); 1468 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, 1469 MC_CMD_FW_DONT_CARE); 1470 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, 1471 sizeof(inbuf), outbuf, sizeof(outbuf), 1472 &outlen); 1473 } 1474 if (rc) { 1475 efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf), 1476 outbuf, outlen, rc); 1477 goto fail; 1478 } 1479 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { 1480 rc = -EIO; 1481 goto fail; 1482 } 1483 1484 if (driver_operating) { 1485 if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) { 1486 efx->mcdi->fn_flags = 1487 MCDI_DWORD(outbuf, 1488 DRV_ATTACH_EXT_OUT_FUNC_FLAGS); 1489 } else { 1490 /* Synthesise flags for Siena */ 1491 efx->mcdi->fn_flags = 1492 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | 1493 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED | 1494 (efx_port_num(efx) == 0) << 1495 MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY; 1496 } 1497 } 1498 1499 /* We currently assume we have control of the external link 1500 * and are completely trusted by firmware. Abort probing 1501 * if that's not true for this function. 1502 */ 1503 1504 if (was_attached != NULL) 1505 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); 1506 return 0; 1507 1508 fail: 1509 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1510 return rc; 1511 } 1512 1513 int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 1514 u16 *fw_subtype_list, u32 *capabilities) 1515 { 1516 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX); 1517 size_t outlen, i; 1518 int port_num = efx_port_num(efx); 1519 int rc; 1520 1521 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); 1522 /* we need __aligned(2) for ether_addr_copy */ 1523 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1); 1524 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1); 1525 1526 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, 1527 outbuf, sizeof(outbuf), &outlen); 1528 if (rc) 1529 goto fail; 1530 1531 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { 1532 rc = -EIO; 1533 goto fail; 1534 } 1535 1536 if (mac_address) 1537 ether_addr_copy(mac_address, 1538 port_num ? 1539 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : 1540 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0)); 1541 if (fw_subtype_list) { 1542 for (i = 0; 1543 i < MCDI_VAR_ARRAY_LEN(outlen, 1544 GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST); 1545 i++) 1546 fw_subtype_list[i] = MCDI_ARRAY_WORD( 1547 outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i); 1548 for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++) 1549 fw_subtype_list[i] = 0; 1550 } 1551 if (capabilities) { 1552 if (port_num) 1553 *capabilities = MCDI_DWORD(outbuf, 1554 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); 1555 else 1556 *capabilities = MCDI_DWORD(outbuf, 1557 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); 1558 } 1559 1560 return 0; 1561 1562 fail: 1563 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", 1564 __func__, rc, (int)outlen); 1565 1566 return rc; 1567 } 1568 1569 int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) 1570 { 1571 MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN); 1572 u32 dest = 0; 1573 int rc; 1574 1575 if (uart) 1576 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; 1577 if (evq) 1578 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; 1579 1580 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); 1581 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); 1582 1583 BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); 1584 1585 rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), 1586 NULL, 0, NULL); 1587 return rc; 1588 } 1589 1590 int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) 1591 { 1592 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN); 1593 size_t outlen; 1594 int rc; 1595 1596 BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); 1597 1598 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, 1599 outbuf, sizeof(outbuf), &outlen); 1600 if (rc) 1601 goto fail; 1602 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { 1603 rc = -EIO; 1604 goto fail; 1605 } 1606 1607 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); 1608 return 0; 1609 1610 fail: 1611 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1612 __func__, rc); 1613 return rc; 1614 } 1615 1616 int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, 1617 size_t *size_out, size_t *erase_size_out, 1618 bool *protected_out) 1619 { 1620 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN); 1621 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN); 1622 size_t outlen; 1623 int rc; 1624 1625 MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); 1626 1627 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), 1628 outbuf, sizeof(outbuf), &outlen); 1629 if (rc) 1630 goto fail; 1631 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { 1632 rc = -EIO; 1633 goto fail; 1634 } 1635 1636 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); 1637 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); 1638 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & 1639 (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN)); 1640 return 0; 1641 1642 fail: 1643 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1644 return rc; 1645 } 1646 1647 static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) 1648 { 1649 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN); 1650 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN); 1651 int rc; 1652 1653 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); 1654 1655 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), 1656 outbuf, sizeof(outbuf), NULL); 1657 if (rc) 1658 return rc; 1659 1660 switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { 1661 case MC_CMD_NVRAM_TEST_PASS: 1662 case MC_CMD_NVRAM_TEST_NOTSUPP: 1663 return 0; 1664 default: 1665 return -EIO; 1666 } 1667 } 1668 1669 int efx_mcdi_nvram_test_all(struct efx_nic *efx) 1670 { 1671 u32 nvram_types; 1672 unsigned int type; 1673 int rc; 1674 1675 rc = efx_mcdi_nvram_types(efx, &nvram_types); 1676 if (rc) 1677 goto fail1; 1678 1679 type = 0; 1680 while (nvram_types != 0) { 1681 if (nvram_types & 1) { 1682 rc = efx_mcdi_nvram_test(efx, type); 1683 if (rc) 1684 goto fail2; 1685 } 1686 type++; 1687 nvram_types >>= 1; 1688 } 1689 1690 return 0; 1691 1692 fail2: 1693 netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", 1694 __func__, type); 1695 fail1: 1696 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1697 return rc; 1698 } 1699 1700 /* Returns 1 if an assertion was read, 0 if no assertion had fired, 1701 * negative on error. 1702 */ 1703 static int efx_mcdi_read_assertion(struct efx_nic *efx) 1704 { 1705 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN); 1706 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); 1707 unsigned int flags, index; 1708 const char *reason; 1709 size_t outlen; 1710 int retry; 1711 int rc; 1712 1713 /* Attempt to read any stored assertion state before we reboot 1714 * the mcfw out of the assertion handler. Retry twice, once 1715 * because a boot-time assertion might cause this command to fail 1716 * with EINTR. And once again because GET_ASSERTS can race with 1717 * MC_CMD_REBOOT running on the other port. */ 1718 retry = 2; 1719 do { 1720 MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); 1721 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS, 1722 inbuf, MC_CMD_GET_ASSERTS_IN_LEN, 1723 outbuf, sizeof(outbuf), &outlen); 1724 if (rc == -EPERM) 1725 return 0; 1726 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); 1727 1728 if (rc) { 1729 efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS, 1730 MC_CMD_GET_ASSERTS_IN_LEN, outbuf, 1731 outlen, rc); 1732 return rc; 1733 } 1734 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) 1735 return -EIO; 1736 1737 /* Print out any recorded assertion state */ 1738 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); 1739 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) 1740 return 0; 1741 1742 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) 1743 ? "system-level assertion" 1744 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) 1745 ? "thread-level assertion" 1746 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) 1747 ? "watchdog reset" 1748 : "unknown assertion"; 1749 netif_err(efx, hw, efx->net_dev, 1750 "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, 1751 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), 1752 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); 1753 1754 /* Print out the registers */ 1755 for (index = 0; 1756 index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; 1757 index++) 1758 netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", 1759 1 + index, 1760 MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS, 1761 index)); 1762 1763 return 1; 1764 } 1765 1766 static int efx_mcdi_exit_assertion(struct efx_nic *efx) 1767 { 1768 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); 1769 int rc; 1770 1771 /* If the MC is running debug firmware, it might now be 1772 * waiting for a debugger to attach, but we just want it to 1773 * reboot. We set a flag that makes the command a no-op if it 1774 * has already done so. 1775 * The MCDI will thus return either 0 or -EIO. 1776 */ 1777 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1778 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 1779 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); 1780 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, 1781 NULL, 0, NULL); 1782 if (rc == -EIO) 1783 rc = 0; 1784 if (rc) 1785 efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN, 1786 NULL, 0, rc); 1787 return rc; 1788 } 1789 1790 int efx_mcdi_handle_assertion(struct efx_nic *efx) 1791 { 1792 int rc; 1793 1794 rc = efx_mcdi_read_assertion(efx); 1795 if (rc <= 0) 1796 return rc; 1797 1798 return efx_mcdi_exit_assertion(efx); 1799 } 1800 1801 void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) 1802 { 1803 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN); 1804 int rc; 1805 1806 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); 1807 BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); 1808 BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); 1809 1810 BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); 1811 1812 MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); 1813 1814 rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), 1815 NULL, 0, NULL); 1816 } 1817 1818 static int efx_mcdi_reset_func(struct efx_nic *efx) 1819 { 1820 MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN); 1821 int rc; 1822 1823 BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0); 1824 MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG, 1825 ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); 1826 rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf), 1827 NULL, 0, NULL); 1828 return rc; 1829 } 1830 1831 static int efx_mcdi_reset_mc(struct efx_nic *efx) 1832 { 1833 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); 1834 int rc; 1835 1836 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1837 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); 1838 rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), 1839 NULL, 0, NULL); 1840 /* White is black, and up is down */ 1841 if (rc == -EIO) 1842 return 0; 1843 if (rc == 0) 1844 rc = -EIO; 1845 return rc; 1846 } 1847 1848 enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason) 1849 { 1850 return RESET_TYPE_RECOVER_OR_ALL; 1851 } 1852 1853 int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) 1854 { 1855 int rc; 1856 1857 /* If MCDI is down, we can't handle_assertion */ 1858 if (method == RESET_TYPE_MCDI_TIMEOUT) { 1859 rc = pci_reset_function(efx->pci_dev); 1860 if (rc) 1861 return rc; 1862 /* Re-enable polled MCDI completion */ 1863 if (efx->mcdi) { 1864 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 1865 mcdi->mode = MCDI_MODE_POLL; 1866 } 1867 return 0; 1868 } 1869 1870 /* Recover from a failed assertion pre-reset */ 1871 rc = efx_mcdi_handle_assertion(efx); 1872 if (rc) 1873 return rc; 1874 1875 if (method == RESET_TYPE_DATAPATH) 1876 return 0; 1877 else if (method == RESET_TYPE_WORLD) 1878 return efx_mcdi_reset_mc(efx); 1879 else 1880 return efx_mcdi_reset_func(efx); 1881 } 1882 1883 static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, 1884 const u8 *mac, int *id_out) 1885 { 1886 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN); 1887 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN); 1888 size_t outlen; 1889 int rc; 1890 1891 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); 1892 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, 1893 MC_CMD_FILTER_MODE_SIMPLE); 1894 ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac); 1895 1896 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), 1897 outbuf, sizeof(outbuf), &outlen); 1898 if (rc) 1899 goto fail; 1900 1901 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { 1902 rc = -EIO; 1903 goto fail; 1904 } 1905 1906 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); 1907 1908 return 0; 1909 1910 fail: 1911 *id_out = -1; 1912 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1913 return rc; 1914 1915 } 1916 1917 1918 int 1919 efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) 1920 { 1921 return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); 1922 } 1923 1924 1925 int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) 1926 { 1927 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN); 1928 size_t outlen; 1929 int rc; 1930 1931 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, 1932 outbuf, sizeof(outbuf), &outlen); 1933 if (rc) 1934 goto fail; 1935 1936 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { 1937 rc = -EIO; 1938 goto fail; 1939 } 1940 1941 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); 1942 1943 return 0; 1944 1945 fail: 1946 *id_out = -1; 1947 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1948 return rc; 1949 } 1950 1951 1952 int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) 1953 { 1954 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN); 1955 int rc; 1956 1957 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); 1958 1959 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), 1960 NULL, 0, NULL); 1961 return rc; 1962 } 1963 1964 int efx_mcdi_flush_rxqs(struct efx_nic *efx) 1965 { 1966 struct efx_channel *channel; 1967 struct efx_rx_queue *rx_queue; 1968 MCDI_DECLARE_BUF(inbuf, 1969 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS)); 1970 int rc, count; 1971 1972 BUILD_BUG_ON(EFX_MAX_CHANNELS > 1973 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); 1974 1975 count = 0; 1976 efx_for_each_channel(channel, efx) { 1977 efx_for_each_channel_rx_queue(rx_queue, channel) { 1978 if (rx_queue->flush_pending) { 1979 rx_queue->flush_pending = false; 1980 atomic_dec(&efx->rxq_flush_pending); 1981 MCDI_SET_ARRAY_DWORD( 1982 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, 1983 count, efx_rx_queue_index(rx_queue)); 1984 count++; 1985 } 1986 } 1987 } 1988 1989 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, 1990 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL); 1991 WARN_ON(rc < 0); 1992 1993 return rc; 1994 } 1995 1996 int efx_mcdi_wol_filter_reset(struct efx_nic *efx) 1997 { 1998 int rc; 1999 2000 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); 2001 return rc; 2002 } 2003 2004 int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, 2005 unsigned int *flags) 2006 { 2007 MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN); 2008 MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN); 2009 size_t outlen; 2010 int rc; 2011 2012 BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0); 2013 MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type); 2014 MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled); 2015 rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), 2016 outbuf, sizeof(outbuf), &outlen); 2017 if (rc) 2018 return rc; 2019 2020 if (!flags) 2021 return 0; 2022 2023 if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN) 2024 *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS); 2025 else 2026 *flags = 0; 2027 2028 return 0; 2029 } 2030 2031 int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, 2032 unsigned int *enabled_out) 2033 { 2034 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN); 2035 size_t outlen; 2036 int rc; 2037 2038 rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0, 2039 outbuf, sizeof(outbuf), &outlen); 2040 if (rc) 2041 goto fail; 2042 2043 if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) { 2044 rc = -EIO; 2045 goto fail; 2046 } 2047 2048 if (impl_out) 2049 *impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED); 2050 2051 if (enabled_out) 2052 *enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED); 2053 2054 return 0; 2055 2056 fail: 2057 /* Older firmware lacks GET_WORKAROUNDS and this isn't especially 2058 * terrifying. The call site will have to deal with it though. 2059 */ 2060 netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR, 2061 efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 2062 return rc; 2063 } 2064 2065 #ifdef CONFIG_SFC_MTD 2066 2067 #define EFX_MCDI_NVRAM_LEN_MAX 128 2068 2069 static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) 2070 { 2071 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN); 2072 int rc; 2073 2074 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); 2075 2076 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); 2077 2078 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), 2079 NULL, 0, NULL); 2080 return rc; 2081 } 2082 2083 static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, 2084 loff_t offset, u8 *buffer, size_t length) 2085 { 2086 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN); 2087 MCDI_DECLARE_BUF(outbuf, 2088 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)); 2089 size_t outlen; 2090 int rc; 2091 2092 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); 2093 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); 2094 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); 2095 2096 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), 2097 outbuf, sizeof(outbuf), &outlen); 2098 if (rc) 2099 return rc; 2100 2101 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); 2102 return 0; 2103 } 2104 2105 static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 2106 loff_t offset, const u8 *buffer, size_t length) 2107 { 2108 MCDI_DECLARE_BUF(inbuf, 2109 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)); 2110 int rc; 2111 2112 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); 2113 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); 2114 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); 2115 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); 2116 2117 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); 2118 2119 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, 2120 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), 2121 NULL, 0, NULL); 2122 return rc; 2123 } 2124 2125 static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, 2126 loff_t offset, size_t length) 2127 { 2128 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN); 2129 int rc; 2130 2131 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); 2132 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); 2133 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); 2134 2135 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); 2136 2137 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), 2138 NULL, 0, NULL); 2139 return rc; 2140 } 2141 2142 static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) 2143 { 2144 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN); 2145 int rc; 2146 2147 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); 2148 2149 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); 2150 2151 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), 2152 NULL, 0, NULL); 2153 return rc; 2154 } 2155 2156 int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, 2157 size_t len, size_t *retlen, u8 *buffer) 2158 { 2159 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); 2160 struct efx_nic *efx = mtd->priv; 2161 loff_t offset = start; 2162 loff_t end = min_t(loff_t, start + len, mtd->size); 2163 size_t chunk; 2164 int rc = 0; 2165 2166 while (offset < end) { 2167 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); 2168 rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset, 2169 buffer, chunk); 2170 if (rc) 2171 goto out; 2172 offset += chunk; 2173 buffer += chunk; 2174 } 2175 out: 2176 *retlen = offset - start; 2177 return rc; 2178 } 2179 2180 int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) 2181 { 2182 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); 2183 struct efx_nic *efx = mtd->priv; 2184 loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); 2185 loff_t end = min_t(loff_t, start + len, mtd->size); 2186 size_t chunk = part->common.mtd.erasesize; 2187 int rc = 0; 2188 2189 if (!part->updating) { 2190 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); 2191 if (rc) 2192 goto out; 2193 part->updating = true; 2194 } 2195 2196 /* The MCDI interface can in fact do multiple erase blocks at once; 2197 * but erasing may be slow, so we make multiple calls here to avoid 2198 * tripping the MCDI RPC timeout. */ 2199 while (offset < end) { 2200 rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset, 2201 chunk); 2202 if (rc) 2203 goto out; 2204 offset += chunk; 2205 } 2206 out: 2207 return rc; 2208 } 2209 2210 int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, 2211 size_t len, size_t *retlen, const u8 *buffer) 2212 { 2213 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); 2214 struct efx_nic *efx = mtd->priv; 2215 loff_t offset = start; 2216 loff_t end = min_t(loff_t, start + len, mtd->size); 2217 size_t chunk; 2218 int rc = 0; 2219 2220 if (!part->updating) { 2221 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); 2222 if (rc) 2223 goto out; 2224 part->updating = true; 2225 } 2226 2227 while (offset < end) { 2228 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); 2229 rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset, 2230 buffer, chunk); 2231 if (rc) 2232 goto out; 2233 offset += chunk; 2234 buffer += chunk; 2235 } 2236 out: 2237 *retlen = offset - start; 2238 return rc; 2239 } 2240 2241 int efx_mcdi_mtd_sync(struct mtd_info *mtd) 2242 { 2243 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); 2244 struct efx_nic *efx = mtd->priv; 2245 int rc = 0; 2246 2247 if (part->updating) { 2248 part->updating = false; 2249 rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type); 2250 } 2251 2252 return rc; 2253 } 2254 2255 void efx_mcdi_mtd_rename(struct efx_mtd_partition *part) 2256 { 2257 struct efx_mcdi_mtd_partition *mcdi_part = 2258 container_of(part, struct efx_mcdi_mtd_partition, common); 2259 struct efx_nic *efx = part->mtd.priv; 2260 2261 snprintf(part->name, sizeof(part->name), "%s %s:%02x", 2262 efx->name, part->type_name, mcdi_part->fw_subtype); 2263 } 2264 2265 #endif /* CONFIG_SFC_MTD */ 2266