1 /* 2 * Copyright (C) 2005 - 2014 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Contact Information: 11 * linux-drivers@emulex.com 12 * 13 * Emulex 14 * 3333 Susan Street 15 * Costa Mesa, CA 92626 16 */ 17 18 #include <linux/module.h> 19 #include "be.h" 20 #include "be_cmds.h" 21 22 static struct be_cmd_priv_map cmd_priv_map[] = { 23 { 24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 25 CMD_SUBSYSTEM_ETH, 26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 28 }, 29 { 30 OPCODE_COMMON_GET_FLOW_CONTROL, 31 CMD_SUBSYSTEM_COMMON, 32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM | 33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 34 }, 35 { 36 OPCODE_COMMON_SET_FLOW_CONTROL, 37 CMD_SUBSYSTEM_COMMON, 38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 40 }, 41 { 42 OPCODE_ETH_GET_PPORT_STATS, 43 CMD_SUBSYSTEM_ETH, 44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 46 }, 47 { 48 OPCODE_COMMON_GET_PHY_DETAILS, 49 CMD_SUBSYSTEM_COMMON, 50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 52 } 53 }; 54 55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) 56 { 57 int i; 58 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map); 59 u32 cmd_privileges = adapter->cmd_privileges; 60 61 for (i = 0; i < num_entries; i++) 62 if (opcode == cmd_priv_map[i].opcode && 63 subsystem == cmd_priv_map[i].subsystem) 64 if (!(cmd_privileges & cmd_priv_map[i].priv_mask)) 65 return false; 66 67 return true; 68 } 69 70 static inline void *embedded_payload(struct be_mcc_wrb *wrb) 71 { 72 return wrb->payload.embedded_payload; 73 } 74 75 static void be_mcc_notify(struct be_adapter *adapter) 76 { 77 struct be_queue_info *mccq = &adapter->mcc_obj.q; 78 u32 val = 0; 79 80 if (be_error(adapter)) 81 return; 82 83 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 84 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 85 86 wmb(); 87 iowrite32(val, adapter->db + DB_MCCQ_OFFSET); 88 } 89 90 /* To check if valid bit is set, check the entire word as we don't know 91 * the endianness of the data (old entry is host endian while a new entry is 92 * little endian) */ 93 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) 94 { 95 u32 flags; 96 97 if (compl->flags != 0) { 98 flags = le32_to_cpu(compl->flags); 99 if (flags & CQE_FLAGS_VALID_MASK) { 100 compl->flags = flags; 101 return true; 102 } 103 } 104 return false; 105 } 106 107 /* Need to reset the entire word that houses the valid bit */ 108 static inline void be_mcc_compl_use(struct be_mcc_compl *compl) 109 { 110 compl->flags = 0; 111 } 112 113 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1) 114 { 115 unsigned long addr; 116 117 addr = tag1; 118 addr = ((addr << 16) << 16) | tag0; 119 return (void *)addr; 120 } 121 122 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status) 123 { 124 if (base_status == MCC_STATUS_NOT_SUPPORTED || 125 base_status == MCC_STATUS_ILLEGAL_REQUEST || 126 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES || 127 (opcode == OPCODE_COMMON_WRITE_FLASHROM && 128 (base_status == MCC_STATUS_ILLEGAL_FIELD || 129 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH))) 130 return true; 131 else 132 return false; 133 } 134 135 /* Place holder for all the async MCC cmds wherein the caller is not in a busy 136 * loop (has not issued be_mcc_notify_wait()) 137 */ 138 static void be_async_cmd_process(struct be_adapter *adapter, 139 struct be_mcc_compl *compl, 140 struct be_cmd_resp_hdr *resp_hdr) 141 { 142 enum mcc_base_status base_status = base_status(compl->status); 143 u8 opcode = 0, subsystem = 0; 144 145 if (resp_hdr) { 146 opcode = resp_hdr->opcode; 147 subsystem = resp_hdr->subsystem; 148 } 149 150 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && 151 subsystem == CMD_SUBSYSTEM_LOWLEVEL) { 152 complete(&adapter->et_cmd_compl); 153 return; 154 } 155 156 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM || 157 opcode == OPCODE_COMMON_WRITE_OBJECT) && 158 subsystem == CMD_SUBSYSTEM_COMMON) { 159 adapter->flash_status = compl->status; 160 complete(&adapter->et_cmd_compl); 161 return; 162 } 163 164 if ((opcode == OPCODE_ETH_GET_STATISTICS || 165 opcode == OPCODE_ETH_GET_PPORT_STATS) && 166 subsystem == CMD_SUBSYSTEM_ETH && 167 base_status == MCC_STATUS_SUCCESS) { 168 be_parse_stats(adapter); 169 adapter->stats_cmd_sent = false; 170 return; 171 } 172 173 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && 174 subsystem == CMD_SUBSYSTEM_COMMON) { 175 if (base_status == MCC_STATUS_SUCCESS) { 176 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 177 (void *)resp_hdr; 178 adapter->drv_stats.be_on_die_temperature = 179 resp->on_die_temperature; 180 } else { 181 adapter->be_get_temp_freq = 0; 182 } 183 return; 184 } 185 } 186 187 static int be_mcc_compl_process(struct be_adapter *adapter, 188 struct be_mcc_compl *compl) 189 { 190 enum mcc_base_status base_status; 191 enum mcc_addl_status addl_status; 192 struct be_cmd_resp_hdr *resp_hdr; 193 u8 opcode = 0, subsystem = 0; 194 195 /* Just swap the status to host endian; mcc tag is opaquely copied 196 * from mcc_wrb */ 197 be_dws_le_to_cpu(compl, 4); 198 199 base_status = base_status(compl->status); 200 addl_status = addl_status(compl->status); 201 202 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); 203 if (resp_hdr) { 204 opcode = resp_hdr->opcode; 205 subsystem = resp_hdr->subsystem; 206 } 207 208 be_async_cmd_process(adapter, compl, resp_hdr); 209 210 if (base_status != MCC_STATUS_SUCCESS && 211 !be_skip_err_log(opcode, base_status, addl_status)) { 212 213 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 214 dev_warn(&adapter->pdev->dev, 215 "VF is not privileged to issue opcode %d-%d\n", 216 opcode, subsystem); 217 } else { 218 dev_err(&adapter->pdev->dev, 219 "opcode %d-%d failed:status %d-%d\n", 220 opcode, subsystem, base_status, addl_status); 221 } 222 } 223 return compl->status; 224 } 225 226 /* Link state evt is a string of bytes; no need for endian swapping */ 227 static void be_async_link_state_process(struct be_adapter *adapter, 228 struct be_mcc_compl *compl) 229 { 230 struct be_async_event_link_state *evt = 231 (struct be_async_event_link_state *)compl; 232 233 /* When link status changes, link speed must be re-queried from FW */ 234 adapter->phy.link_speed = -1; 235 236 /* On BEx the FW does not send a separate link status 237 * notification for physical and logical link. 238 * On other chips just process the logical link 239 * status notification 240 */ 241 if (!BEx_chip(adapter) && 242 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK)) 243 return; 244 245 /* For the initial link status do not rely on the ASYNC event as 246 * it may not be received in some cases. 247 */ 248 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) 249 be_link_status_update(adapter, 250 evt->port_link_status & LINK_STATUS_MASK); 251 } 252 253 /* Grp5 CoS Priority evt */ 254 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 255 struct be_mcc_compl *compl) 256 { 257 struct be_async_event_grp5_cos_priority *evt = 258 (struct be_async_event_grp5_cos_priority *)compl; 259 260 if (evt->valid) { 261 adapter->vlan_prio_bmap = evt->available_priority_bmap; 262 adapter->recommended_prio &= ~VLAN_PRIO_MASK; 263 adapter->recommended_prio = 264 evt->reco_default_priority << VLAN_PRIO_SHIFT; 265 } 266 } 267 268 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */ 269 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 270 struct be_mcc_compl *compl) 271 { 272 struct be_async_event_grp5_qos_link_speed *evt = 273 (struct be_async_event_grp5_qos_link_speed *)compl; 274 275 if (adapter->phy.link_speed >= 0 && 276 evt->physical_port == adapter->port_num) 277 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10; 278 } 279 280 /*Grp5 PVID evt*/ 281 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 282 struct be_mcc_compl *compl) 283 { 284 struct be_async_event_grp5_pvid_state *evt = 285 (struct be_async_event_grp5_pvid_state *)compl; 286 287 if (evt->enabled) { 288 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 289 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid); 290 } else { 291 adapter->pvid = 0; 292 } 293 } 294 295 static void be_async_grp5_evt_process(struct be_adapter *adapter, 296 struct be_mcc_compl *compl) 297 { 298 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) & 299 ASYNC_EVENT_TYPE_MASK; 300 301 switch (event_type) { 302 case ASYNC_EVENT_COS_PRIORITY: 303 be_async_grp5_cos_priority_process(adapter, compl); 304 break; 305 case ASYNC_EVENT_QOS_SPEED: 306 be_async_grp5_qos_speed_process(adapter, compl); 307 break; 308 case ASYNC_EVENT_PVID_STATE: 309 be_async_grp5_pvid_state_process(adapter, compl); 310 break; 311 default: 312 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n", 313 event_type); 314 break; 315 } 316 } 317 318 static void be_async_dbg_evt_process(struct be_adapter *adapter, 319 struct be_mcc_compl *cmp) 320 { 321 u8 event_type = 0; 322 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp; 323 324 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & 325 ASYNC_EVENT_TYPE_MASK; 326 327 switch (event_type) { 328 case ASYNC_DEBUG_EVENT_TYPE_QNQ: 329 if (evt->valid) 330 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag); 331 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD; 332 break; 333 default: 334 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n", 335 event_type); 336 break; 337 } 338 } 339 340 static inline bool is_link_state_evt(u32 flags) 341 { 342 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 343 ASYNC_EVENT_CODE_LINK_STATE; 344 } 345 346 static inline bool is_grp5_evt(u32 flags) 347 { 348 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 349 ASYNC_EVENT_CODE_GRP_5; 350 } 351 352 static inline bool is_dbg_evt(u32 flags) 353 { 354 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 355 ASYNC_EVENT_CODE_QNQ; 356 } 357 358 static void be_mcc_event_process(struct be_adapter *adapter, 359 struct be_mcc_compl *compl) 360 { 361 if (is_link_state_evt(compl->flags)) 362 be_async_link_state_process(adapter, compl); 363 else if (is_grp5_evt(compl->flags)) 364 be_async_grp5_evt_process(adapter, compl); 365 else if (is_dbg_evt(compl->flags)) 366 be_async_dbg_evt_process(adapter, compl); 367 } 368 369 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 370 { 371 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; 372 struct be_mcc_compl *compl = queue_tail_node(mcc_cq); 373 374 if (be_mcc_compl_is_new(compl)) { 375 queue_tail_inc(mcc_cq); 376 return compl; 377 } 378 return NULL; 379 } 380 381 void be_async_mcc_enable(struct be_adapter *adapter) 382 { 383 spin_lock_bh(&adapter->mcc_cq_lock); 384 385 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); 386 adapter->mcc_obj.rearm_cq = true; 387 388 spin_unlock_bh(&adapter->mcc_cq_lock); 389 } 390 391 void be_async_mcc_disable(struct be_adapter *adapter) 392 { 393 spin_lock_bh(&adapter->mcc_cq_lock); 394 395 adapter->mcc_obj.rearm_cq = false; 396 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); 397 398 spin_unlock_bh(&adapter->mcc_cq_lock); 399 } 400 401 int be_process_mcc(struct be_adapter *adapter) 402 { 403 struct be_mcc_compl *compl; 404 int num = 0, status = 0; 405 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 406 407 spin_lock(&adapter->mcc_cq_lock); 408 409 while ((compl = be_mcc_compl_get(adapter))) { 410 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 411 be_mcc_event_process(adapter, compl); 412 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 413 status = be_mcc_compl_process(adapter, compl); 414 atomic_dec(&mcc_obj->q.used); 415 } 416 be_mcc_compl_use(compl); 417 num++; 418 } 419 420 if (num) 421 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); 422 423 spin_unlock(&adapter->mcc_cq_lock); 424 return status; 425 } 426 427 /* Wait till no more pending mcc requests are present */ 428 static int be_mcc_wait_compl(struct be_adapter *adapter) 429 { 430 #define mcc_timeout 120000 /* 12s timeout */ 431 int i, status = 0; 432 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 433 434 for (i = 0; i < mcc_timeout; i++) { 435 if (be_error(adapter)) 436 return -EIO; 437 438 local_bh_disable(); 439 status = be_process_mcc(adapter); 440 local_bh_enable(); 441 442 if (atomic_read(&mcc_obj->q.used) == 0) 443 break; 444 udelay(100); 445 } 446 if (i == mcc_timeout) { 447 dev_err(&adapter->pdev->dev, "FW not responding\n"); 448 adapter->fw_timeout = true; 449 return -EIO; 450 } 451 return status; 452 } 453 454 /* Notify MCC requests and wait for completion */ 455 static int be_mcc_notify_wait(struct be_adapter *adapter) 456 { 457 int status; 458 struct be_mcc_wrb *wrb; 459 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 460 u16 index = mcc_obj->q.head; 461 struct be_cmd_resp_hdr *resp; 462 463 index_dec(&index, mcc_obj->q.len); 464 wrb = queue_index_node(&mcc_obj->q, index); 465 466 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1); 467 468 be_mcc_notify(adapter); 469 470 status = be_mcc_wait_compl(adapter); 471 if (status == -EIO) 472 goto out; 473 474 status = (resp->base_status | 475 ((resp->addl_status & CQE_ADDL_STATUS_MASK) << 476 CQE_ADDL_STATUS_SHIFT)); 477 out: 478 return status; 479 } 480 481 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) 482 { 483 int msecs = 0; 484 u32 ready; 485 486 do { 487 if (be_error(adapter)) 488 return -EIO; 489 490 ready = ioread32(db); 491 if (ready == 0xffffffff) 492 return -1; 493 494 ready &= MPU_MAILBOX_DB_RDY_MASK; 495 if (ready) 496 break; 497 498 if (msecs > 4000) { 499 dev_err(&adapter->pdev->dev, "FW not responding\n"); 500 adapter->fw_timeout = true; 501 be_detect_error(adapter); 502 return -1; 503 } 504 505 msleep(1); 506 msecs++; 507 } while (true); 508 509 return 0; 510 } 511 512 /* 513 * Insert the mailbox address into the doorbell in two steps 514 * Polls on the mbox doorbell till a command completion (or a timeout) occurs 515 */ 516 static int be_mbox_notify_wait(struct be_adapter *adapter) 517 { 518 int status; 519 u32 val = 0; 520 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; 521 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 522 struct be_mcc_mailbox *mbox = mbox_mem->va; 523 struct be_mcc_compl *compl = &mbox->compl; 524 525 /* wait for ready to be set */ 526 status = be_mbox_db_ready_wait(adapter, db); 527 if (status != 0) 528 return status; 529 530 val |= MPU_MAILBOX_DB_HI_MASK; 531 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ 532 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 533 iowrite32(val, db); 534 535 /* wait for ready to be set */ 536 status = be_mbox_db_ready_wait(adapter, db); 537 if (status != 0) 538 return status; 539 540 val = 0; 541 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ 542 val |= (u32)(mbox_mem->dma >> 4) << 2; 543 iowrite32(val, db); 544 545 status = be_mbox_db_ready_wait(adapter, db); 546 if (status != 0) 547 return status; 548 549 /* A cq entry has been made now */ 550 if (be_mcc_compl_is_new(compl)) { 551 status = be_mcc_compl_process(adapter, &mbox->compl); 552 be_mcc_compl_use(compl); 553 if (status) 554 return status; 555 } else { 556 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); 557 return -1; 558 } 559 return 0; 560 } 561 562 static u16 be_POST_stage_get(struct be_adapter *adapter) 563 { 564 u32 sem; 565 566 if (BEx_chip(adapter)) 567 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx); 568 else 569 pci_read_config_dword(adapter->pdev, 570 SLIPORT_SEMAPHORE_OFFSET_SH, &sem); 571 572 return sem & POST_STAGE_MASK; 573 } 574 575 static int lancer_wait_ready(struct be_adapter *adapter) 576 { 577 #define SLIPORT_READY_TIMEOUT 30 578 u32 sliport_status; 579 int status = 0, i; 580 581 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { 582 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 583 if (sliport_status & SLIPORT_STATUS_RDY_MASK) 584 break; 585 586 msleep(1000); 587 } 588 589 if (i == SLIPORT_READY_TIMEOUT) 590 status = -1; 591 592 return status; 593 } 594 595 static bool lancer_provisioning_error(struct be_adapter *adapter) 596 { 597 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; 598 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 599 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 600 sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET); 601 sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET); 602 603 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 && 604 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2) 605 return true; 606 } 607 return false; 608 } 609 610 int lancer_test_and_set_rdy_state(struct be_adapter *adapter) 611 { 612 int status; 613 u32 sliport_status, err, reset_needed; 614 bool resource_error; 615 616 resource_error = lancer_provisioning_error(adapter); 617 if (resource_error) 618 return -EAGAIN; 619 620 status = lancer_wait_ready(adapter); 621 if (!status) { 622 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 623 err = sliport_status & SLIPORT_STATUS_ERR_MASK; 624 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK; 625 if (err && reset_needed) { 626 iowrite32(SLI_PORT_CONTROL_IP_MASK, 627 adapter->db + SLIPORT_CONTROL_OFFSET); 628 629 /* check adapter has corrected the error */ 630 status = lancer_wait_ready(adapter); 631 sliport_status = ioread32(adapter->db + 632 SLIPORT_STATUS_OFFSET); 633 sliport_status &= (SLIPORT_STATUS_ERR_MASK | 634 SLIPORT_STATUS_RN_MASK); 635 if (status || sliport_status) 636 status = -1; 637 } else if (err || reset_needed) { 638 status = -1; 639 } 640 } 641 /* Stop error recovery if error is not recoverable. 642 * No resource error is temporary errors and will go away 643 * when PF provisions resources. 644 */ 645 resource_error = lancer_provisioning_error(adapter); 646 if (resource_error) 647 status = -EAGAIN; 648 649 return status; 650 } 651 652 int be_fw_wait_ready(struct be_adapter *adapter) 653 { 654 u16 stage; 655 int status, timeout = 0; 656 struct device *dev = &adapter->pdev->dev; 657 658 if (lancer_chip(adapter)) { 659 status = lancer_wait_ready(adapter); 660 return status; 661 } 662 663 do { 664 stage = be_POST_stage_get(adapter); 665 if (stage == POST_STAGE_ARMFW_RDY) 666 return 0; 667 668 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout); 669 if (msleep_interruptible(2000)) { 670 dev_err(dev, "Waiting for POST aborted\n"); 671 return -EINTR; 672 } 673 timeout += 2; 674 } while (timeout < 60); 675 676 dev_err(dev, "POST timeout; stage=0x%x\n", stage); 677 return -1; 678 } 679 680 681 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) 682 { 683 return &wrb->payload.sgl[0]; 684 } 685 686 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr) 687 { 688 wrb->tag0 = addr & 0xFFFFFFFF; 689 wrb->tag1 = upper_32_bits(addr); 690 } 691 692 /* Don't touch the hdr after it's prepared */ 693 /* mem will be NULL for embedded commands */ 694 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 695 u8 subsystem, u8 opcode, int cmd_len, 696 struct be_mcc_wrb *wrb, 697 struct be_dma_mem *mem) 698 { 699 struct be_sge *sge; 700 701 req_hdr->opcode = opcode; 702 req_hdr->subsystem = subsystem; 703 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 704 req_hdr->version = 0; 705 fill_wrb_tags(wrb, (ulong) req_hdr); 706 wrb->payload_length = cmd_len; 707 if (mem) { 708 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << 709 MCC_WRB_SGE_CNT_SHIFT; 710 sge = nonembedded_sgl(wrb); 711 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); 712 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); 713 sge->len = cpu_to_le32(mem->size); 714 } else 715 wrb->embedded |= MCC_WRB_EMBEDDED_MASK; 716 be_dws_cpu_to_le(wrb, 8); 717 } 718 719 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 720 struct be_dma_mem *mem) 721 { 722 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); 723 u64 dma = (u64)mem->dma; 724 725 for (i = 0; i < buf_pages; i++) { 726 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); 727 pages[i].hi = cpu_to_le32(upper_32_bits(dma)); 728 dma += PAGE_SIZE_4K; 729 } 730 } 731 732 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) 733 { 734 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 735 struct be_mcc_wrb *wrb 736 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; 737 memset(wrb, 0, sizeof(*wrb)); 738 return wrb; 739 } 740 741 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) 742 { 743 struct be_queue_info *mccq = &adapter->mcc_obj.q; 744 struct be_mcc_wrb *wrb; 745 746 if (!mccq->created) 747 return NULL; 748 749 if (atomic_read(&mccq->used) >= mccq->len) 750 return NULL; 751 752 wrb = queue_head_node(mccq); 753 queue_head_inc(mccq); 754 atomic_inc(&mccq->used); 755 memset(wrb, 0, sizeof(*wrb)); 756 return wrb; 757 } 758 759 static bool use_mcc(struct be_adapter *adapter) 760 { 761 return adapter->mcc_obj.q.created; 762 } 763 764 /* Must be used only in process context */ 765 static int be_cmd_lock(struct be_adapter *adapter) 766 { 767 if (use_mcc(adapter)) { 768 spin_lock_bh(&adapter->mcc_lock); 769 return 0; 770 } else { 771 return mutex_lock_interruptible(&adapter->mbox_lock); 772 } 773 } 774 775 /* Must be used only in process context */ 776 static void be_cmd_unlock(struct be_adapter *adapter) 777 { 778 if (use_mcc(adapter)) 779 spin_unlock_bh(&adapter->mcc_lock); 780 else 781 return mutex_unlock(&adapter->mbox_lock); 782 } 783 784 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter, 785 struct be_mcc_wrb *wrb) 786 { 787 struct be_mcc_wrb *dest_wrb; 788 789 if (use_mcc(adapter)) { 790 dest_wrb = wrb_from_mccq(adapter); 791 if (!dest_wrb) 792 return NULL; 793 } else { 794 dest_wrb = wrb_from_mbox(adapter); 795 } 796 797 memcpy(dest_wrb, wrb, sizeof(*wrb)); 798 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK)) 799 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb)); 800 801 return dest_wrb; 802 } 803 804 /* Must be used only in process context */ 805 static int be_cmd_notify_wait(struct be_adapter *adapter, 806 struct be_mcc_wrb *wrb) 807 { 808 struct be_mcc_wrb *dest_wrb; 809 int status; 810 811 status = be_cmd_lock(adapter); 812 if (status) 813 return status; 814 815 dest_wrb = be_cmd_copy(adapter, wrb); 816 if (!dest_wrb) 817 return -EBUSY; 818 819 if (use_mcc(adapter)) 820 status = be_mcc_notify_wait(adapter); 821 else 822 status = be_mbox_notify_wait(adapter); 823 824 if (!status) 825 memcpy(wrb, dest_wrb, sizeof(*wrb)); 826 827 be_cmd_unlock(adapter); 828 return status; 829 } 830 831 /* Tell fw we're about to start firing cmds by writing a 832 * special pattern across the wrb hdr; uses mbox 833 */ 834 int be_cmd_fw_init(struct be_adapter *adapter) 835 { 836 u8 *wrb; 837 int status; 838 839 if (lancer_chip(adapter)) 840 return 0; 841 842 if (mutex_lock_interruptible(&adapter->mbox_lock)) 843 return -1; 844 845 wrb = (u8 *)wrb_from_mbox(adapter); 846 *wrb++ = 0xFF; 847 *wrb++ = 0x12; 848 *wrb++ = 0x34; 849 *wrb++ = 0xFF; 850 *wrb++ = 0xFF; 851 *wrb++ = 0x56; 852 *wrb++ = 0x78; 853 *wrb = 0xFF; 854 855 status = be_mbox_notify_wait(adapter); 856 857 mutex_unlock(&adapter->mbox_lock); 858 return status; 859 } 860 861 /* Tell fw we're done with firing cmds by writing a 862 * special pattern across the wrb hdr; uses mbox 863 */ 864 int be_cmd_fw_clean(struct be_adapter *adapter) 865 { 866 u8 *wrb; 867 int status; 868 869 if (lancer_chip(adapter)) 870 return 0; 871 872 if (mutex_lock_interruptible(&adapter->mbox_lock)) 873 return -1; 874 875 wrb = (u8 *)wrb_from_mbox(adapter); 876 *wrb++ = 0xFF; 877 *wrb++ = 0xAA; 878 *wrb++ = 0xBB; 879 *wrb++ = 0xFF; 880 *wrb++ = 0xFF; 881 *wrb++ = 0xCC; 882 *wrb++ = 0xDD; 883 *wrb = 0xFF; 884 885 status = be_mbox_notify_wait(adapter); 886 887 mutex_unlock(&adapter->mbox_lock); 888 return status; 889 } 890 891 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo) 892 { 893 struct be_mcc_wrb *wrb; 894 struct be_cmd_req_eq_create *req; 895 struct be_dma_mem *q_mem = &eqo->q.dma_mem; 896 int status, ver = 0; 897 898 if (mutex_lock_interruptible(&adapter->mbox_lock)) 899 return -1; 900 901 wrb = wrb_from_mbox(adapter); 902 req = embedded_payload(wrb); 903 904 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 905 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, 906 NULL); 907 908 /* Support for EQ_CREATEv2 available only SH-R onwards */ 909 if (!(BEx_chip(adapter) || lancer_chip(adapter))) 910 ver = 2; 911 912 req->hdr.version = ver; 913 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 914 915 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); 916 /* 4byte eqe*/ 917 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); 918 AMAP_SET_BITS(struct amap_eq_context, count, req->context, 919 __ilog2_u32(eqo->q.len / 256)); 920 be_dws_cpu_to_le(req->context, sizeof(req->context)); 921 922 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 923 924 status = be_mbox_notify_wait(adapter); 925 if (!status) { 926 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); 927 eqo->q.id = le16_to_cpu(resp->eq_id); 928 eqo->msix_idx = 929 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx; 930 eqo->q.created = true; 931 } 932 933 mutex_unlock(&adapter->mbox_lock); 934 return status; 935 } 936 937 /* Use MCC */ 938 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 939 bool permanent, u32 if_handle, u32 pmac_id) 940 { 941 struct be_mcc_wrb *wrb; 942 struct be_cmd_req_mac_query *req; 943 int status; 944 945 spin_lock_bh(&adapter->mcc_lock); 946 947 wrb = wrb_from_mccq(adapter); 948 if (!wrb) { 949 status = -EBUSY; 950 goto err; 951 } 952 req = embedded_payload(wrb); 953 954 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 955 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, 956 NULL); 957 req->type = MAC_ADDRESS_TYPE_NETWORK; 958 if (permanent) { 959 req->permanent = 1; 960 } else { 961 req->if_id = cpu_to_le16((u16) if_handle); 962 req->pmac_id = cpu_to_le32(pmac_id); 963 req->permanent = 0; 964 } 965 966 status = be_mcc_notify_wait(adapter); 967 if (!status) { 968 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); 969 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 970 } 971 972 err: 973 spin_unlock_bh(&adapter->mcc_lock); 974 return status; 975 } 976 977 /* Uses synchronous MCCQ */ 978 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 979 u32 if_id, u32 *pmac_id, u32 domain) 980 { 981 struct be_mcc_wrb *wrb; 982 struct be_cmd_req_pmac_add *req; 983 int status; 984 985 spin_lock_bh(&adapter->mcc_lock); 986 987 wrb = wrb_from_mccq(adapter); 988 if (!wrb) { 989 status = -EBUSY; 990 goto err; 991 } 992 req = embedded_payload(wrb); 993 994 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 995 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, 996 NULL); 997 998 req->hdr.domain = domain; 999 req->if_id = cpu_to_le32(if_id); 1000 memcpy(req->mac_address, mac_addr, ETH_ALEN); 1001 1002 status = be_mcc_notify_wait(adapter); 1003 if (!status) { 1004 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); 1005 *pmac_id = le32_to_cpu(resp->pmac_id); 1006 } 1007 1008 err: 1009 spin_unlock_bh(&adapter->mcc_lock); 1010 1011 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) 1012 status = -EPERM; 1013 1014 return status; 1015 } 1016 1017 /* Uses synchronous MCCQ */ 1018 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) 1019 { 1020 struct be_mcc_wrb *wrb; 1021 struct be_cmd_req_pmac_del *req; 1022 int status; 1023 1024 if (pmac_id == -1) 1025 return 0; 1026 1027 spin_lock_bh(&adapter->mcc_lock); 1028 1029 wrb = wrb_from_mccq(adapter); 1030 if (!wrb) { 1031 status = -EBUSY; 1032 goto err; 1033 } 1034 req = embedded_payload(wrb); 1035 1036 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1037 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL); 1038 1039 req->hdr.domain = dom; 1040 req->if_id = cpu_to_le32(if_id); 1041 req->pmac_id = cpu_to_le32(pmac_id); 1042 1043 status = be_mcc_notify_wait(adapter); 1044 1045 err: 1046 spin_unlock_bh(&adapter->mcc_lock); 1047 return status; 1048 } 1049 1050 /* Uses Mbox */ 1051 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, 1052 struct be_queue_info *eq, bool no_delay, int coalesce_wm) 1053 { 1054 struct be_mcc_wrb *wrb; 1055 struct be_cmd_req_cq_create *req; 1056 struct be_dma_mem *q_mem = &cq->dma_mem; 1057 void *ctxt; 1058 int status; 1059 1060 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1061 return -1; 1062 1063 wrb = wrb_from_mbox(adapter); 1064 req = embedded_payload(wrb); 1065 ctxt = &req->context; 1066 1067 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1068 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, 1069 NULL); 1070 1071 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1072 1073 if (BEx_chip(adapter)) { 1074 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 1075 coalesce_wm); 1076 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 1077 ctxt, no_delay); 1078 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, 1079 __ilog2_u32(cq->len / 256)); 1080 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 1081 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 1082 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); 1083 } else { 1084 req->hdr.version = 2; 1085 req->page_size = 1; /* 1 for 4K */ 1086 1087 /* coalesce-wm field in this cmd is not relevant to Lancer. 1088 * Lancer uses COMMON_MODIFY_CQ to set this field 1089 */ 1090 if (!lancer_chip(adapter)) 1091 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, 1092 ctxt, coalesce_wm); 1093 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1094 no_delay); 1095 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, 1096 __ilog2_u32(cq->len / 256)); 1097 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); 1098 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); 1099 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); 1100 } 1101 1102 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1103 1104 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1105 1106 status = be_mbox_notify_wait(adapter); 1107 if (!status) { 1108 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); 1109 cq->id = le16_to_cpu(resp->cq_id); 1110 cq->created = true; 1111 } 1112 1113 mutex_unlock(&adapter->mbox_lock); 1114 1115 return status; 1116 } 1117 1118 static u32 be_encoded_q_len(int q_len) 1119 { 1120 u32 len_encoded = fls(q_len); /* log2(len) + 1 */ 1121 if (len_encoded == 16) 1122 len_encoded = 0; 1123 return len_encoded; 1124 } 1125 1126 static int be_cmd_mccq_ext_create(struct be_adapter *adapter, 1127 struct be_queue_info *mccq, 1128 struct be_queue_info *cq) 1129 { 1130 struct be_mcc_wrb *wrb; 1131 struct be_cmd_req_mcc_ext_create *req; 1132 struct be_dma_mem *q_mem = &mccq->dma_mem; 1133 void *ctxt; 1134 int status; 1135 1136 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1137 return -1; 1138 1139 wrb = wrb_from_mbox(adapter); 1140 req = embedded_payload(wrb); 1141 ctxt = &req->context; 1142 1143 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1144 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, 1145 NULL); 1146 1147 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1148 if (BEx_chip(adapter)) { 1149 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1150 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1151 be_encoded_q_len(mccq->len)); 1152 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1153 } else { 1154 req->hdr.version = 1; 1155 req->cq_id = cpu_to_le16(cq->id); 1156 1157 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt, 1158 be_encoded_q_len(mccq->len)); 1159 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1); 1160 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id, 1161 ctxt, cq->id); 1162 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid, 1163 ctxt, 1); 1164 } 1165 1166 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ 1167 req->async_event_bitmap[0] = cpu_to_le32(0x00000022); 1168 req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ); 1169 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1170 1171 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1172 1173 status = be_mbox_notify_wait(adapter); 1174 if (!status) { 1175 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1176 mccq->id = le16_to_cpu(resp->id); 1177 mccq->created = true; 1178 } 1179 mutex_unlock(&adapter->mbox_lock); 1180 1181 return status; 1182 } 1183 1184 static int be_cmd_mccq_org_create(struct be_adapter *adapter, 1185 struct be_queue_info *mccq, 1186 struct be_queue_info *cq) 1187 { 1188 struct be_mcc_wrb *wrb; 1189 struct be_cmd_req_mcc_create *req; 1190 struct be_dma_mem *q_mem = &mccq->dma_mem; 1191 void *ctxt; 1192 int status; 1193 1194 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1195 return -1; 1196 1197 wrb = wrb_from_mbox(adapter); 1198 req = embedded_payload(wrb); 1199 ctxt = &req->context; 1200 1201 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1202 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, 1203 NULL); 1204 1205 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1206 1207 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1208 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1209 be_encoded_q_len(mccq->len)); 1210 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1211 1212 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1213 1214 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1215 1216 status = be_mbox_notify_wait(adapter); 1217 if (!status) { 1218 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1219 mccq->id = le16_to_cpu(resp->id); 1220 mccq->created = true; 1221 } 1222 1223 mutex_unlock(&adapter->mbox_lock); 1224 return status; 1225 } 1226 1227 int be_cmd_mccq_create(struct be_adapter *adapter, 1228 struct be_queue_info *mccq, struct be_queue_info *cq) 1229 { 1230 int status; 1231 1232 status = be_cmd_mccq_ext_create(adapter, mccq, cq); 1233 if (status && BEx_chip(adapter)) { 1234 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " 1235 "or newer to avoid conflicting priorities between NIC " 1236 "and FCoE traffic"); 1237 status = be_cmd_mccq_org_create(adapter, mccq, cq); 1238 } 1239 return status; 1240 } 1241 1242 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) 1243 { 1244 struct be_mcc_wrb wrb = {0}; 1245 struct be_cmd_req_eth_tx_create *req; 1246 struct be_queue_info *txq = &txo->q; 1247 struct be_queue_info *cq = &txo->cq; 1248 struct be_dma_mem *q_mem = &txq->dma_mem; 1249 int status, ver = 0; 1250 1251 req = embedded_payload(&wrb); 1252 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1253 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); 1254 1255 if (lancer_chip(adapter)) { 1256 req->hdr.version = 1; 1257 } else if (BEx_chip(adapter)) { 1258 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) 1259 req->hdr.version = 2; 1260 } else { /* For SH */ 1261 req->hdr.version = 2; 1262 } 1263 1264 if (req->hdr.version > 0) 1265 req->if_id = cpu_to_le16(adapter->if_handle); 1266 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1267 req->ulp_num = BE_ULP1_NUM; 1268 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 1269 req->cq_id = cpu_to_le16(cq->id); 1270 req->queue_size = be_encoded_q_len(txq->len); 1271 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1272 ver = req->hdr.version; 1273 1274 status = be_cmd_notify_wait(adapter, &wrb); 1275 if (!status) { 1276 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb); 1277 txq->id = le16_to_cpu(resp->cid); 1278 if (ver == 2) 1279 txo->db_offset = le32_to_cpu(resp->db_offset); 1280 else 1281 txo->db_offset = DB_TXULP1_OFFSET; 1282 txq->created = true; 1283 } 1284 1285 return status; 1286 } 1287 1288 /* Uses MCC */ 1289 int be_cmd_rxq_create(struct be_adapter *adapter, 1290 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 1291 u32 if_id, u32 rss, u8 *rss_id) 1292 { 1293 struct be_mcc_wrb *wrb; 1294 struct be_cmd_req_eth_rx_create *req; 1295 struct be_dma_mem *q_mem = &rxq->dma_mem; 1296 int status; 1297 1298 spin_lock_bh(&adapter->mcc_lock); 1299 1300 wrb = wrb_from_mccq(adapter); 1301 if (!wrb) { 1302 status = -EBUSY; 1303 goto err; 1304 } 1305 req = embedded_payload(wrb); 1306 1307 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1308 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); 1309 1310 req->cq_id = cpu_to_le16(cq_id); 1311 req->frag_size = fls(frag_size) - 1; 1312 req->num_pages = 2; 1313 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1314 req->interface_id = cpu_to_le32(if_id); 1315 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE); 1316 req->rss_queue = cpu_to_le32(rss); 1317 1318 status = be_mcc_notify_wait(adapter); 1319 if (!status) { 1320 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); 1321 rxq->id = le16_to_cpu(resp->id); 1322 rxq->created = true; 1323 *rss_id = resp->rss_id; 1324 } 1325 1326 err: 1327 spin_unlock_bh(&adapter->mcc_lock); 1328 return status; 1329 } 1330 1331 /* Generic destroyer function for all types of queues 1332 * Uses Mbox 1333 */ 1334 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1335 int queue_type) 1336 { 1337 struct be_mcc_wrb *wrb; 1338 struct be_cmd_req_q_destroy *req; 1339 u8 subsys = 0, opcode = 0; 1340 int status; 1341 1342 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1343 return -1; 1344 1345 wrb = wrb_from_mbox(adapter); 1346 req = embedded_payload(wrb); 1347 1348 switch (queue_type) { 1349 case QTYPE_EQ: 1350 subsys = CMD_SUBSYSTEM_COMMON; 1351 opcode = OPCODE_COMMON_EQ_DESTROY; 1352 break; 1353 case QTYPE_CQ: 1354 subsys = CMD_SUBSYSTEM_COMMON; 1355 opcode = OPCODE_COMMON_CQ_DESTROY; 1356 break; 1357 case QTYPE_TXQ: 1358 subsys = CMD_SUBSYSTEM_ETH; 1359 opcode = OPCODE_ETH_TX_DESTROY; 1360 break; 1361 case QTYPE_RXQ: 1362 subsys = CMD_SUBSYSTEM_ETH; 1363 opcode = OPCODE_ETH_RX_DESTROY; 1364 break; 1365 case QTYPE_MCCQ: 1366 subsys = CMD_SUBSYSTEM_COMMON; 1367 opcode = OPCODE_COMMON_MCC_DESTROY; 1368 break; 1369 default: 1370 BUG(); 1371 } 1372 1373 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, 1374 NULL); 1375 req->id = cpu_to_le16(q->id); 1376 1377 status = be_mbox_notify_wait(adapter); 1378 q->created = false; 1379 1380 mutex_unlock(&adapter->mbox_lock); 1381 return status; 1382 } 1383 1384 /* Uses MCC */ 1385 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) 1386 { 1387 struct be_mcc_wrb *wrb; 1388 struct be_cmd_req_q_destroy *req; 1389 int status; 1390 1391 spin_lock_bh(&adapter->mcc_lock); 1392 1393 wrb = wrb_from_mccq(adapter); 1394 if (!wrb) { 1395 status = -EBUSY; 1396 goto err; 1397 } 1398 req = embedded_payload(wrb); 1399 1400 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1401 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); 1402 req->id = cpu_to_le16(q->id); 1403 1404 status = be_mcc_notify_wait(adapter); 1405 q->created = false; 1406 1407 err: 1408 spin_unlock_bh(&adapter->mcc_lock); 1409 return status; 1410 } 1411 1412 /* Create an rx filtering policy configuration on an i/f 1413 * Will use MBOX only if MCCQ has not been created. 1414 */ 1415 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 1416 u32 *if_handle, u32 domain) 1417 { 1418 struct be_mcc_wrb wrb = {0}; 1419 struct be_cmd_req_if_create *req; 1420 int status; 1421 1422 req = embedded_payload(&wrb); 1423 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1424 OPCODE_COMMON_NTWK_INTERFACE_CREATE, 1425 sizeof(*req), &wrb, NULL); 1426 req->hdr.domain = domain; 1427 req->capability_flags = cpu_to_le32(cap_flags); 1428 req->enable_flags = cpu_to_le32(en_flags); 1429 req->pmac_invalid = true; 1430 1431 status = be_cmd_notify_wait(adapter, &wrb); 1432 if (!status) { 1433 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb); 1434 *if_handle = le32_to_cpu(resp->interface_id); 1435 1436 /* Hack to retrieve VF's pmac-id on BE3 */ 1437 if (BE3_chip(adapter) && !be_physfn(adapter)) 1438 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id); 1439 } 1440 return status; 1441 } 1442 1443 /* Uses MCCQ */ 1444 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) 1445 { 1446 struct be_mcc_wrb *wrb; 1447 struct be_cmd_req_if_destroy *req; 1448 int status; 1449 1450 if (interface_id == -1) 1451 return 0; 1452 1453 spin_lock_bh(&adapter->mcc_lock); 1454 1455 wrb = wrb_from_mccq(adapter); 1456 if (!wrb) { 1457 status = -EBUSY; 1458 goto err; 1459 } 1460 req = embedded_payload(wrb); 1461 1462 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1463 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, 1464 sizeof(*req), wrb, NULL); 1465 req->hdr.domain = domain; 1466 req->interface_id = cpu_to_le32(interface_id); 1467 1468 status = be_mcc_notify_wait(adapter); 1469 err: 1470 spin_unlock_bh(&adapter->mcc_lock); 1471 return status; 1472 } 1473 1474 /* Get stats is a non embedded command: the request is not embedded inside 1475 * WRB but is a separate dma memory block 1476 * Uses asynchronous MCC 1477 */ 1478 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) 1479 { 1480 struct be_mcc_wrb *wrb; 1481 struct be_cmd_req_hdr *hdr; 1482 int status = 0; 1483 1484 spin_lock_bh(&adapter->mcc_lock); 1485 1486 wrb = wrb_from_mccq(adapter); 1487 if (!wrb) { 1488 status = -EBUSY; 1489 goto err; 1490 } 1491 hdr = nonemb_cmd->va; 1492 1493 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1494 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, 1495 nonemb_cmd); 1496 1497 /* version 1 of the cmd is not supported only by BE2 */ 1498 if (BE2_chip(adapter)) 1499 hdr->version = 0; 1500 if (BE3_chip(adapter) || lancer_chip(adapter)) 1501 hdr->version = 1; 1502 else 1503 hdr->version = 2; 1504 1505 be_mcc_notify(adapter); 1506 adapter->stats_cmd_sent = true; 1507 1508 err: 1509 spin_unlock_bh(&adapter->mcc_lock); 1510 return status; 1511 } 1512 1513 /* Lancer Stats */ 1514 int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1515 struct be_dma_mem *nonemb_cmd) 1516 { 1517 1518 struct be_mcc_wrb *wrb; 1519 struct lancer_cmd_req_pport_stats *req; 1520 int status = 0; 1521 1522 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS, 1523 CMD_SUBSYSTEM_ETH)) 1524 return -EPERM; 1525 1526 spin_lock_bh(&adapter->mcc_lock); 1527 1528 wrb = wrb_from_mccq(adapter); 1529 if (!wrb) { 1530 status = -EBUSY; 1531 goto err; 1532 } 1533 req = nonemb_cmd->va; 1534 1535 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1536 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, 1537 wrb, nonemb_cmd); 1538 1539 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); 1540 req->cmd_params.params.reset_stats = 0; 1541 1542 be_mcc_notify(adapter); 1543 adapter->stats_cmd_sent = true; 1544 1545 err: 1546 spin_unlock_bh(&adapter->mcc_lock); 1547 return status; 1548 } 1549 1550 static int be_mac_to_link_speed(int mac_speed) 1551 { 1552 switch (mac_speed) { 1553 case PHY_LINK_SPEED_ZERO: 1554 return 0; 1555 case PHY_LINK_SPEED_10MBPS: 1556 return 10; 1557 case PHY_LINK_SPEED_100MBPS: 1558 return 100; 1559 case PHY_LINK_SPEED_1GBPS: 1560 return 1000; 1561 case PHY_LINK_SPEED_10GBPS: 1562 return 10000; 1563 case PHY_LINK_SPEED_20GBPS: 1564 return 20000; 1565 case PHY_LINK_SPEED_25GBPS: 1566 return 25000; 1567 case PHY_LINK_SPEED_40GBPS: 1568 return 40000; 1569 } 1570 return 0; 1571 } 1572 1573 /* Uses synchronous mcc 1574 * Returns link_speed in Mbps 1575 */ 1576 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, 1577 u8 *link_status, u32 dom) 1578 { 1579 struct be_mcc_wrb *wrb; 1580 struct be_cmd_req_link_status *req; 1581 int status; 1582 1583 spin_lock_bh(&adapter->mcc_lock); 1584 1585 if (link_status) 1586 *link_status = LINK_DOWN; 1587 1588 wrb = wrb_from_mccq(adapter); 1589 if (!wrb) { 1590 status = -EBUSY; 1591 goto err; 1592 } 1593 req = embedded_payload(wrb); 1594 1595 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1596 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, 1597 sizeof(*req), wrb, NULL); 1598 1599 /* version 1 of the cmd is not supported only by BE2 */ 1600 if (!BE2_chip(adapter)) 1601 req->hdr.version = 1; 1602 1603 req->hdr.domain = dom; 1604 1605 status = be_mcc_notify_wait(adapter); 1606 if (!status) { 1607 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 1608 if (link_speed) { 1609 *link_speed = resp->link_speed ? 1610 le16_to_cpu(resp->link_speed) * 10 : 1611 be_mac_to_link_speed(resp->mac_speed); 1612 1613 if (!resp->logical_link_status) 1614 *link_speed = 0; 1615 } 1616 if (link_status) 1617 *link_status = resp->logical_link_status; 1618 } 1619 1620 err: 1621 spin_unlock_bh(&adapter->mcc_lock); 1622 return status; 1623 } 1624 1625 /* Uses synchronous mcc */ 1626 int be_cmd_get_die_temperature(struct be_adapter *adapter) 1627 { 1628 struct be_mcc_wrb *wrb; 1629 struct be_cmd_req_get_cntl_addnl_attribs *req; 1630 int status = 0; 1631 1632 spin_lock_bh(&adapter->mcc_lock); 1633 1634 wrb = wrb_from_mccq(adapter); 1635 if (!wrb) { 1636 status = -EBUSY; 1637 goto err; 1638 } 1639 req = embedded_payload(wrb); 1640 1641 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1642 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, 1643 sizeof(*req), wrb, NULL); 1644 1645 be_mcc_notify(adapter); 1646 1647 err: 1648 spin_unlock_bh(&adapter->mcc_lock); 1649 return status; 1650 } 1651 1652 /* Uses synchronous mcc */ 1653 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) 1654 { 1655 struct be_mcc_wrb *wrb; 1656 struct be_cmd_req_get_fat *req; 1657 int status; 1658 1659 spin_lock_bh(&adapter->mcc_lock); 1660 1661 wrb = wrb_from_mccq(adapter); 1662 if (!wrb) { 1663 status = -EBUSY; 1664 goto err; 1665 } 1666 req = embedded_payload(wrb); 1667 1668 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1669 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, 1670 NULL); 1671 req->fat_operation = cpu_to_le32(QUERY_FAT); 1672 status = be_mcc_notify_wait(adapter); 1673 if (!status) { 1674 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); 1675 if (log_size && resp->log_size) 1676 *log_size = le32_to_cpu(resp->log_size) - 1677 sizeof(u32); 1678 } 1679 err: 1680 spin_unlock_bh(&adapter->mcc_lock); 1681 return status; 1682 } 1683 1684 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) 1685 { 1686 struct be_dma_mem get_fat_cmd; 1687 struct be_mcc_wrb *wrb; 1688 struct be_cmd_req_get_fat *req; 1689 u32 offset = 0, total_size, buf_size, 1690 log_offset = sizeof(u32), payload_len; 1691 int status; 1692 1693 if (buf_len == 0) 1694 return; 1695 1696 total_size = buf_len; 1697 1698 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1699 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, 1700 get_fat_cmd.size, 1701 &get_fat_cmd.dma); 1702 if (!get_fat_cmd.va) { 1703 status = -ENOMEM; 1704 dev_err(&adapter->pdev->dev, 1705 "Memory allocation failure while retrieving FAT data\n"); 1706 return; 1707 } 1708 1709 spin_lock_bh(&adapter->mcc_lock); 1710 1711 while (total_size) { 1712 buf_size = min(total_size, (u32)60*1024); 1713 total_size -= buf_size; 1714 1715 wrb = wrb_from_mccq(adapter); 1716 if (!wrb) { 1717 status = -EBUSY; 1718 goto err; 1719 } 1720 req = get_fat_cmd.va; 1721 1722 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; 1723 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1724 OPCODE_COMMON_MANAGE_FAT, payload_len, 1725 wrb, &get_fat_cmd); 1726 1727 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); 1728 req->read_log_offset = cpu_to_le32(log_offset); 1729 req->read_log_length = cpu_to_le32(buf_size); 1730 req->data_buffer_size = cpu_to_le32(buf_size); 1731 1732 status = be_mcc_notify_wait(adapter); 1733 if (!status) { 1734 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1735 memcpy(buf + offset, 1736 resp->data_buffer, 1737 le32_to_cpu(resp->read_log_length)); 1738 } else { 1739 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); 1740 goto err; 1741 } 1742 offset += buf_size; 1743 log_offset += buf_size; 1744 } 1745 err: 1746 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1747 get_fat_cmd.va, get_fat_cmd.dma); 1748 spin_unlock_bh(&adapter->mcc_lock); 1749 } 1750 1751 /* Uses synchronous mcc */ 1752 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, 1753 char *fw_on_flash) 1754 { 1755 struct be_mcc_wrb *wrb; 1756 struct be_cmd_req_get_fw_version *req; 1757 int status; 1758 1759 spin_lock_bh(&adapter->mcc_lock); 1760 1761 wrb = wrb_from_mccq(adapter); 1762 if (!wrb) { 1763 status = -EBUSY; 1764 goto err; 1765 } 1766 1767 req = embedded_payload(wrb); 1768 1769 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1770 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, 1771 NULL); 1772 status = be_mcc_notify_wait(adapter); 1773 if (!status) { 1774 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); 1775 strcpy(fw_ver, resp->firmware_version_string); 1776 if (fw_on_flash) 1777 strcpy(fw_on_flash, resp->fw_on_flash_version_string); 1778 } 1779 err: 1780 spin_unlock_bh(&adapter->mcc_lock); 1781 return status; 1782 } 1783 1784 /* set the EQ delay interval of an EQ to specified value 1785 * Uses async mcc 1786 */ 1787 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, 1788 int num) 1789 { 1790 struct be_mcc_wrb *wrb; 1791 struct be_cmd_req_modify_eq_delay *req; 1792 int status = 0, i; 1793 1794 spin_lock_bh(&adapter->mcc_lock); 1795 1796 wrb = wrb_from_mccq(adapter); 1797 if (!wrb) { 1798 status = -EBUSY; 1799 goto err; 1800 } 1801 req = embedded_payload(wrb); 1802 1803 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1804 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, 1805 NULL); 1806 1807 req->num_eq = cpu_to_le32(num); 1808 for (i = 0; i < num; i++) { 1809 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); 1810 req->set_eqd[i].phase = 0; 1811 req->set_eqd[i].delay_multiplier = 1812 cpu_to_le32(set_eqd[i].delay_multiplier); 1813 } 1814 1815 be_mcc_notify(adapter); 1816 err: 1817 spin_unlock_bh(&adapter->mcc_lock); 1818 return status; 1819 } 1820 1821 /* Uses sycnhronous mcc */ 1822 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1823 u32 num) 1824 { 1825 struct be_mcc_wrb *wrb; 1826 struct be_cmd_req_vlan_config *req; 1827 int status; 1828 1829 spin_lock_bh(&adapter->mcc_lock); 1830 1831 wrb = wrb_from_mccq(adapter); 1832 if (!wrb) { 1833 status = -EBUSY; 1834 goto err; 1835 } 1836 req = embedded_payload(wrb); 1837 1838 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1839 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), 1840 wrb, NULL); 1841 1842 req->interface_id = if_id; 1843 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1844 req->num_vlan = num; 1845 memcpy(req->normal_vlan, vtag_array, 1846 req->num_vlan * sizeof(vtag_array[0])); 1847 1848 status = be_mcc_notify_wait(adapter); 1849 err: 1850 spin_unlock_bh(&adapter->mcc_lock); 1851 return status; 1852 } 1853 1854 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1855 { 1856 struct be_mcc_wrb *wrb; 1857 struct be_dma_mem *mem = &adapter->rx_filter; 1858 struct be_cmd_req_rx_filter *req = mem->va; 1859 int status; 1860 1861 spin_lock_bh(&adapter->mcc_lock); 1862 1863 wrb = wrb_from_mccq(adapter); 1864 if (!wrb) { 1865 status = -EBUSY; 1866 goto err; 1867 } 1868 memset(req, 0, sizeof(*req)); 1869 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1870 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), 1871 wrb, mem); 1872 1873 req->if_id = cpu_to_le32(adapter->if_handle); 1874 if (flags & IFF_PROMISC) { 1875 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1876 BE_IF_FLAGS_VLAN_PROMISCUOUS | 1877 BE_IF_FLAGS_MCAST_PROMISCUOUS); 1878 if (value == ON) 1879 req->if_flags = 1880 cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1881 BE_IF_FLAGS_VLAN_PROMISCUOUS | 1882 BE_IF_FLAGS_MCAST_PROMISCUOUS); 1883 } else if (flags & IFF_ALLMULTI) { 1884 req->if_flags_mask = req->if_flags = 1885 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1886 } else if (flags & BE_FLAGS_VLAN_PROMISC) { 1887 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS); 1888 1889 if (value == ON) 1890 req->if_flags = 1891 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS); 1892 } else { 1893 struct netdev_hw_addr *ha; 1894 int i = 0; 1895 1896 req->if_flags_mask = req->if_flags = 1897 cpu_to_le32(BE_IF_FLAGS_MULTICAST); 1898 1899 /* Reset mcast promisc mode if already set by setting mask 1900 * and not setting flags field 1901 */ 1902 req->if_flags_mask |= 1903 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS & 1904 be_if_cap_flags(adapter)); 1905 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); 1906 netdev_for_each_mc_addr(ha, adapter->netdev) 1907 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1908 } 1909 1910 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) != 1911 req->if_flags_mask) { 1912 dev_warn(&adapter->pdev->dev, 1913 "Cannot set rx filter flags 0x%x\n", 1914 req->if_flags_mask); 1915 dev_warn(&adapter->pdev->dev, 1916 "Interface is capable of 0x%x flags only\n", 1917 be_if_cap_flags(adapter)); 1918 } 1919 req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter)); 1920 1921 status = be_mcc_notify_wait(adapter); 1922 1923 err: 1924 spin_unlock_bh(&adapter->mcc_lock); 1925 return status; 1926 } 1927 1928 /* Uses synchrounous mcc */ 1929 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) 1930 { 1931 struct be_mcc_wrb *wrb; 1932 struct be_cmd_req_set_flow_control *req; 1933 int status; 1934 1935 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL, 1936 CMD_SUBSYSTEM_COMMON)) 1937 return -EPERM; 1938 1939 spin_lock_bh(&adapter->mcc_lock); 1940 1941 wrb = wrb_from_mccq(adapter); 1942 if (!wrb) { 1943 status = -EBUSY; 1944 goto err; 1945 } 1946 req = embedded_payload(wrb); 1947 1948 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1949 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), 1950 wrb, NULL); 1951 1952 req->tx_flow_control = cpu_to_le16((u16)tx_fc); 1953 req->rx_flow_control = cpu_to_le16((u16)rx_fc); 1954 1955 status = be_mcc_notify_wait(adapter); 1956 1957 err: 1958 spin_unlock_bh(&adapter->mcc_lock); 1959 return status; 1960 } 1961 1962 /* Uses sycn mcc */ 1963 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) 1964 { 1965 struct be_mcc_wrb *wrb; 1966 struct be_cmd_req_get_flow_control *req; 1967 int status; 1968 1969 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL, 1970 CMD_SUBSYSTEM_COMMON)) 1971 return -EPERM; 1972 1973 spin_lock_bh(&adapter->mcc_lock); 1974 1975 wrb = wrb_from_mccq(adapter); 1976 if (!wrb) { 1977 status = -EBUSY; 1978 goto err; 1979 } 1980 req = embedded_payload(wrb); 1981 1982 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1983 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), 1984 wrb, NULL); 1985 1986 status = be_mcc_notify_wait(adapter); 1987 if (!status) { 1988 struct be_cmd_resp_get_flow_control *resp = 1989 embedded_payload(wrb); 1990 *tx_fc = le16_to_cpu(resp->tx_flow_control); 1991 *rx_fc = le16_to_cpu(resp->rx_flow_control); 1992 } 1993 1994 err: 1995 spin_unlock_bh(&adapter->mcc_lock); 1996 return status; 1997 } 1998 1999 /* Uses mbox */ 2000 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, 2001 u32 *mode, u32 *caps, u16 *asic_rev) 2002 { 2003 struct be_mcc_wrb *wrb; 2004 struct be_cmd_req_query_fw_cfg *req; 2005 int status; 2006 2007 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2008 return -1; 2009 2010 wrb = wrb_from_mbox(adapter); 2011 req = embedded_payload(wrb); 2012 2013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2014 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, 2015 sizeof(*req), wrb, NULL); 2016 2017 status = be_mbox_notify_wait(adapter); 2018 if (!status) { 2019 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 2020 *port_num = le32_to_cpu(resp->phys_port); 2021 *mode = le32_to_cpu(resp->function_mode); 2022 *caps = le32_to_cpu(resp->function_caps); 2023 *asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF; 2024 } 2025 2026 mutex_unlock(&adapter->mbox_lock); 2027 return status; 2028 } 2029 2030 /* Uses mbox */ 2031 int be_cmd_reset_function(struct be_adapter *adapter) 2032 { 2033 struct be_mcc_wrb *wrb; 2034 struct be_cmd_req_hdr *req; 2035 int status; 2036 2037 if (lancer_chip(adapter)) { 2038 status = lancer_wait_ready(adapter); 2039 if (!status) { 2040 iowrite32(SLI_PORT_CONTROL_IP_MASK, 2041 adapter->db + SLIPORT_CONTROL_OFFSET); 2042 status = lancer_test_and_set_rdy_state(adapter); 2043 } 2044 if (status) { 2045 dev_err(&adapter->pdev->dev, 2046 "Adapter in non recoverable error\n"); 2047 } 2048 return status; 2049 } 2050 2051 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2052 return -1; 2053 2054 wrb = wrb_from_mbox(adapter); 2055 req = embedded_payload(wrb); 2056 2057 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, 2058 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, 2059 NULL); 2060 2061 status = be_mbox_notify_wait(adapter); 2062 2063 mutex_unlock(&adapter->mbox_lock); 2064 return status; 2065 } 2066 2067 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 2068 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey) 2069 { 2070 struct be_mcc_wrb *wrb; 2071 struct be_cmd_req_rss_config *req; 2072 int status; 2073 2074 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) 2075 return 0; 2076 2077 spin_lock_bh(&adapter->mcc_lock); 2078 2079 wrb = wrb_from_mccq(adapter); 2080 if (!wrb) { 2081 status = -EBUSY; 2082 goto err; 2083 } 2084 req = embedded_payload(wrb); 2085 2086 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2087 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 2088 2089 req->if_id = cpu_to_le32(adapter->if_handle); 2090 req->enable_rss = cpu_to_le16(rss_hash_opts); 2091 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 2092 2093 if (!BEx_chip(adapter)) 2094 req->hdr.version = 1; 2095 2096 memcpy(req->cpu_table, rsstable, table_size); 2097 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN); 2098 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 2099 2100 status = be_mcc_notify_wait(adapter); 2101 err: 2102 spin_unlock_bh(&adapter->mcc_lock); 2103 return status; 2104 } 2105 2106 /* Uses sync mcc */ 2107 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 2108 u8 bcn, u8 sts, u8 state) 2109 { 2110 struct be_mcc_wrb *wrb; 2111 struct be_cmd_req_enable_disable_beacon *req; 2112 int status; 2113 2114 spin_lock_bh(&adapter->mcc_lock); 2115 2116 wrb = wrb_from_mccq(adapter); 2117 if (!wrb) { 2118 status = -EBUSY; 2119 goto err; 2120 } 2121 req = embedded_payload(wrb); 2122 2123 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2124 OPCODE_COMMON_ENABLE_DISABLE_BEACON, 2125 sizeof(*req), wrb, NULL); 2126 2127 req->port_num = port_num; 2128 req->beacon_state = state; 2129 req->beacon_duration = bcn; 2130 req->status_duration = sts; 2131 2132 status = be_mcc_notify_wait(adapter); 2133 2134 err: 2135 spin_unlock_bh(&adapter->mcc_lock); 2136 return status; 2137 } 2138 2139 /* Uses sync mcc */ 2140 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) 2141 { 2142 struct be_mcc_wrb *wrb; 2143 struct be_cmd_req_get_beacon_state *req; 2144 int status; 2145 2146 spin_lock_bh(&adapter->mcc_lock); 2147 2148 wrb = wrb_from_mccq(adapter); 2149 if (!wrb) { 2150 status = -EBUSY; 2151 goto err; 2152 } 2153 req = embedded_payload(wrb); 2154 2155 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2156 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), 2157 wrb, NULL); 2158 2159 req->port_num = port_num; 2160 2161 status = be_mcc_notify_wait(adapter); 2162 if (!status) { 2163 struct be_cmd_resp_get_beacon_state *resp = 2164 embedded_payload(wrb); 2165 *state = resp->beacon_state; 2166 } 2167 2168 err: 2169 spin_unlock_bh(&adapter->mcc_lock); 2170 return status; 2171 } 2172 2173 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2174 u32 data_size, u32 data_offset, 2175 const char *obj_name, u32 *data_written, 2176 u8 *change_status, u8 *addn_status) 2177 { 2178 struct be_mcc_wrb *wrb; 2179 struct lancer_cmd_req_write_object *req; 2180 struct lancer_cmd_resp_write_object *resp; 2181 void *ctxt = NULL; 2182 int status; 2183 2184 spin_lock_bh(&adapter->mcc_lock); 2185 adapter->flash_status = 0; 2186 2187 wrb = wrb_from_mccq(adapter); 2188 if (!wrb) { 2189 status = -EBUSY; 2190 goto err_unlock; 2191 } 2192 2193 req = embedded_payload(wrb); 2194 2195 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2196 OPCODE_COMMON_WRITE_OBJECT, 2197 sizeof(struct lancer_cmd_req_write_object), wrb, 2198 NULL); 2199 2200 ctxt = &req->context; 2201 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2202 write_length, ctxt, data_size); 2203 2204 if (data_size == 0) 2205 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2206 eof, ctxt, 1); 2207 else 2208 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2209 eof, ctxt, 0); 2210 2211 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 2212 req->write_offset = cpu_to_le32(data_offset); 2213 strcpy(req->object_name, obj_name); 2214 req->descriptor_count = cpu_to_le32(1); 2215 req->buf_len = cpu_to_le32(data_size); 2216 req->addr_low = cpu_to_le32((cmd->dma + 2217 sizeof(struct lancer_cmd_req_write_object)) 2218 & 0xFFFFFFFF); 2219 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + 2220 sizeof(struct lancer_cmd_req_write_object))); 2221 2222 be_mcc_notify(adapter); 2223 spin_unlock_bh(&adapter->mcc_lock); 2224 2225 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2226 msecs_to_jiffies(60000))) 2227 status = -1; 2228 else 2229 status = adapter->flash_status; 2230 2231 resp = embedded_payload(wrb); 2232 if (!status) { 2233 *data_written = le32_to_cpu(resp->actual_write_len); 2234 *change_status = resp->change_status; 2235 } else { 2236 *addn_status = resp->additional_status; 2237 } 2238 2239 return status; 2240 2241 err_unlock: 2242 spin_unlock_bh(&adapter->mcc_lock); 2243 return status; 2244 } 2245 2246 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2247 u32 data_size, u32 data_offset, const char *obj_name, 2248 u32 *data_read, u32 *eof, u8 *addn_status) 2249 { 2250 struct be_mcc_wrb *wrb; 2251 struct lancer_cmd_req_read_object *req; 2252 struct lancer_cmd_resp_read_object *resp; 2253 int status; 2254 2255 spin_lock_bh(&adapter->mcc_lock); 2256 2257 wrb = wrb_from_mccq(adapter); 2258 if (!wrb) { 2259 status = -EBUSY; 2260 goto err_unlock; 2261 } 2262 2263 req = embedded_payload(wrb); 2264 2265 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2266 OPCODE_COMMON_READ_OBJECT, 2267 sizeof(struct lancer_cmd_req_read_object), wrb, 2268 NULL); 2269 2270 req->desired_read_len = cpu_to_le32(data_size); 2271 req->read_offset = cpu_to_le32(data_offset); 2272 strcpy(req->object_name, obj_name); 2273 req->descriptor_count = cpu_to_le32(1); 2274 req->buf_len = cpu_to_le32(data_size); 2275 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); 2276 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); 2277 2278 status = be_mcc_notify_wait(adapter); 2279 2280 resp = embedded_payload(wrb); 2281 if (!status) { 2282 *data_read = le32_to_cpu(resp->actual_read_len); 2283 *eof = le32_to_cpu(resp->eof); 2284 } else { 2285 *addn_status = resp->additional_status; 2286 } 2287 2288 err_unlock: 2289 spin_unlock_bh(&adapter->mcc_lock); 2290 return status; 2291 } 2292 2293 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 2294 u32 flash_type, u32 flash_opcode, u32 buf_size) 2295 { 2296 struct be_mcc_wrb *wrb; 2297 struct be_cmd_write_flashrom *req; 2298 int status; 2299 2300 spin_lock_bh(&adapter->mcc_lock); 2301 adapter->flash_status = 0; 2302 2303 wrb = wrb_from_mccq(adapter); 2304 if (!wrb) { 2305 status = -EBUSY; 2306 goto err_unlock; 2307 } 2308 req = cmd->va; 2309 2310 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2311 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, 2312 cmd); 2313 2314 req->params.op_type = cpu_to_le32(flash_type); 2315 req->params.op_code = cpu_to_le32(flash_opcode); 2316 req->params.data_buf_size = cpu_to_le32(buf_size); 2317 2318 be_mcc_notify(adapter); 2319 spin_unlock_bh(&adapter->mcc_lock); 2320 2321 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2322 msecs_to_jiffies(40000))) 2323 status = -1; 2324 else 2325 status = adapter->flash_status; 2326 2327 return status; 2328 2329 err_unlock: 2330 spin_unlock_bh(&adapter->mcc_lock); 2331 return status; 2332 } 2333 2334 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2335 u16 optype, int offset) 2336 { 2337 struct be_mcc_wrb *wrb; 2338 struct be_cmd_read_flash_crc *req; 2339 int status; 2340 2341 spin_lock_bh(&adapter->mcc_lock); 2342 2343 wrb = wrb_from_mccq(adapter); 2344 if (!wrb) { 2345 status = -EBUSY; 2346 goto err; 2347 } 2348 req = embedded_payload(wrb); 2349 2350 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2351 OPCODE_COMMON_READ_FLASHROM, sizeof(*req), 2352 wrb, NULL); 2353 2354 req->params.op_type = cpu_to_le32(optype); 2355 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2356 req->params.offset = cpu_to_le32(offset); 2357 req->params.data_buf_size = cpu_to_le32(0x4); 2358 2359 status = be_mcc_notify_wait(adapter); 2360 if (!status) 2361 memcpy(flashed_crc, req->crc, 4); 2362 2363 err: 2364 spin_unlock_bh(&adapter->mcc_lock); 2365 return status; 2366 } 2367 2368 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2369 struct be_dma_mem *nonemb_cmd) 2370 { 2371 struct be_mcc_wrb *wrb; 2372 struct be_cmd_req_acpi_wol_magic_config *req; 2373 int status; 2374 2375 spin_lock_bh(&adapter->mcc_lock); 2376 2377 wrb = wrb_from_mccq(adapter); 2378 if (!wrb) { 2379 status = -EBUSY; 2380 goto err; 2381 } 2382 req = nonemb_cmd->va; 2383 2384 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2385 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), 2386 wrb, nonemb_cmd); 2387 memcpy(req->magic_mac, mac, ETH_ALEN); 2388 2389 status = be_mcc_notify_wait(adapter); 2390 2391 err: 2392 spin_unlock_bh(&adapter->mcc_lock); 2393 return status; 2394 } 2395 2396 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 2397 u8 loopback_type, u8 enable) 2398 { 2399 struct be_mcc_wrb *wrb; 2400 struct be_cmd_req_set_lmode *req; 2401 int status; 2402 2403 spin_lock_bh(&adapter->mcc_lock); 2404 2405 wrb = wrb_from_mccq(adapter); 2406 if (!wrb) { 2407 status = -EBUSY; 2408 goto err; 2409 } 2410 2411 req = embedded_payload(wrb); 2412 2413 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2414 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), 2415 wrb, NULL); 2416 2417 req->src_port = port_num; 2418 req->dest_port = port_num; 2419 req->loopback_type = loopback_type; 2420 req->loopback_state = enable; 2421 2422 status = be_mcc_notify_wait(adapter); 2423 err: 2424 spin_unlock_bh(&adapter->mcc_lock); 2425 return status; 2426 } 2427 2428 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 2429 u32 loopback_type, u32 pkt_size, u32 num_pkts, 2430 u64 pattern) 2431 { 2432 struct be_mcc_wrb *wrb; 2433 struct be_cmd_req_loopback_test *req; 2434 struct be_cmd_resp_loopback_test *resp; 2435 int status; 2436 2437 spin_lock_bh(&adapter->mcc_lock); 2438 2439 wrb = wrb_from_mccq(adapter); 2440 if (!wrb) { 2441 status = -EBUSY; 2442 goto err; 2443 } 2444 2445 req = embedded_payload(wrb); 2446 2447 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2448 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, 2449 NULL); 2450 2451 req->hdr.timeout = cpu_to_le32(15); 2452 req->pattern = cpu_to_le64(pattern); 2453 req->src_port = cpu_to_le32(port_num); 2454 req->dest_port = cpu_to_le32(port_num); 2455 req->pkt_size = cpu_to_le32(pkt_size); 2456 req->num_pkts = cpu_to_le32(num_pkts); 2457 req->loopback_type = cpu_to_le32(loopback_type); 2458 2459 be_mcc_notify(adapter); 2460 2461 spin_unlock_bh(&adapter->mcc_lock); 2462 2463 wait_for_completion(&adapter->et_cmd_compl); 2464 resp = embedded_payload(wrb); 2465 status = le32_to_cpu(resp->status); 2466 2467 return status; 2468 err: 2469 spin_unlock_bh(&adapter->mcc_lock); 2470 return status; 2471 } 2472 2473 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 2474 u32 byte_cnt, struct be_dma_mem *cmd) 2475 { 2476 struct be_mcc_wrb *wrb; 2477 struct be_cmd_req_ddrdma_test *req; 2478 int status; 2479 int i, j = 0; 2480 2481 spin_lock_bh(&adapter->mcc_lock); 2482 2483 wrb = wrb_from_mccq(adapter); 2484 if (!wrb) { 2485 status = -EBUSY; 2486 goto err; 2487 } 2488 req = cmd->va; 2489 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2490 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, 2491 cmd); 2492 2493 req->pattern = cpu_to_le64(pattern); 2494 req->byte_count = cpu_to_le32(byte_cnt); 2495 for (i = 0; i < byte_cnt; i++) { 2496 req->snd_buff[i] = (u8)(pattern >> (j*8)); 2497 j++; 2498 if (j > 7) 2499 j = 0; 2500 } 2501 2502 status = be_mcc_notify_wait(adapter); 2503 2504 if (!status) { 2505 struct be_cmd_resp_ddrdma_test *resp; 2506 resp = cmd->va; 2507 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || 2508 resp->snd_err) { 2509 status = -1; 2510 } 2511 } 2512 2513 err: 2514 spin_unlock_bh(&adapter->mcc_lock); 2515 return status; 2516 } 2517 2518 int be_cmd_get_seeprom_data(struct be_adapter *adapter, 2519 struct be_dma_mem *nonemb_cmd) 2520 { 2521 struct be_mcc_wrb *wrb; 2522 struct be_cmd_req_seeprom_read *req; 2523 int status; 2524 2525 spin_lock_bh(&adapter->mcc_lock); 2526 2527 wrb = wrb_from_mccq(adapter); 2528 if (!wrb) { 2529 status = -EBUSY; 2530 goto err; 2531 } 2532 req = nonemb_cmd->va; 2533 2534 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2535 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 2536 nonemb_cmd); 2537 2538 status = be_mcc_notify_wait(adapter); 2539 2540 err: 2541 spin_unlock_bh(&adapter->mcc_lock); 2542 return status; 2543 } 2544 2545 int be_cmd_get_phy_info(struct be_adapter *adapter) 2546 { 2547 struct be_mcc_wrb *wrb; 2548 struct be_cmd_req_get_phy_info *req; 2549 struct be_dma_mem cmd; 2550 int status; 2551 2552 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS, 2553 CMD_SUBSYSTEM_COMMON)) 2554 return -EPERM; 2555 2556 spin_lock_bh(&adapter->mcc_lock); 2557 2558 wrb = wrb_from_mccq(adapter); 2559 if (!wrb) { 2560 status = -EBUSY; 2561 goto err; 2562 } 2563 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2564 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2565 if (!cmd.va) { 2566 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2567 status = -ENOMEM; 2568 goto err; 2569 } 2570 2571 req = cmd.va; 2572 2573 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2574 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), 2575 wrb, &cmd); 2576 2577 status = be_mcc_notify_wait(adapter); 2578 if (!status) { 2579 struct be_phy_info *resp_phy_info = 2580 cmd.va + sizeof(struct be_cmd_req_hdr); 2581 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type); 2582 adapter->phy.interface_type = 2583 le16_to_cpu(resp_phy_info->interface_type); 2584 adapter->phy.auto_speeds_supported = 2585 le16_to_cpu(resp_phy_info->auto_speeds_supported); 2586 adapter->phy.fixed_speeds_supported = 2587 le16_to_cpu(resp_phy_info->fixed_speeds_supported); 2588 adapter->phy.misc_params = 2589 le32_to_cpu(resp_phy_info->misc_params); 2590 2591 if (BE2_chip(adapter)) { 2592 adapter->phy.fixed_speeds_supported = 2593 BE_SUPPORTED_SPEED_10GBPS | 2594 BE_SUPPORTED_SPEED_1GBPS; 2595 } 2596 } 2597 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2598 err: 2599 spin_unlock_bh(&adapter->mcc_lock); 2600 return status; 2601 } 2602 2603 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) 2604 { 2605 struct be_mcc_wrb *wrb; 2606 struct be_cmd_req_set_qos *req; 2607 int status; 2608 2609 spin_lock_bh(&adapter->mcc_lock); 2610 2611 wrb = wrb_from_mccq(adapter); 2612 if (!wrb) { 2613 status = -EBUSY; 2614 goto err; 2615 } 2616 2617 req = embedded_payload(wrb); 2618 2619 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2620 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); 2621 2622 req->hdr.domain = domain; 2623 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); 2624 req->max_bps_nic = cpu_to_le32(bps); 2625 2626 status = be_mcc_notify_wait(adapter); 2627 2628 err: 2629 spin_unlock_bh(&adapter->mcc_lock); 2630 return status; 2631 } 2632 2633 int be_cmd_get_cntl_attributes(struct be_adapter *adapter) 2634 { 2635 struct be_mcc_wrb *wrb; 2636 struct be_cmd_req_cntl_attribs *req; 2637 struct be_cmd_resp_cntl_attribs *resp; 2638 int status; 2639 int payload_len = max(sizeof(*req), sizeof(*resp)); 2640 struct mgmt_controller_attrib *attribs; 2641 struct be_dma_mem attribs_cmd; 2642 2643 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2644 return -1; 2645 2646 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2647 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2648 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2649 &attribs_cmd.dma); 2650 if (!attribs_cmd.va) { 2651 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 2652 status = -ENOMEM; 2653 goto err; 2654 } 2655 2656 wrb = wrb_from_mbox(adapter); 2657 if (!wrb) { 2658 status = -EBUSY; 2659 goto err; 2660 } 2661 req = attribs_cmd.va; 2662 2663 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2664 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, 2665 wrb, &attribs_cmd); 2666 2667 status = be_mbox_notify_wait(adapter); 2668 if (!status) { 2669 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); 2670 adapter->hba_port_num = attribs->hba_attribs.phy_port; 2671 } 2672 2673 err: 2674 mutex_unlock(&adapter->mbox_lock); 2675 if (attribs_cmd.va) 2676 pci_free_consistent(adapter->pdev, attribs_cmd.size, 2677 attribs_cmd.va, attribs_cmd.dma); 2678 return status; 2679 } 2680 2681 /* Uses mbox */ 2682 int be_cmd_req_native_mode(struct be_adapter *adapter) 2683 { 2684 struct be_mcc_wrb *wrb; 2685 struct be_cmd_req_set_func_cap *req; 2686 int status; 2687 2688 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2689 return -1; 2690 2691 wrb = wrb_from_mbox(adapter); 2692 if (!wrb) { 2693 status = -EBUSY; 2694 goto err; 2695 } 2696 2697 req = embedded_payload(wrb); 2698 2699 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2700 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, 2701 sizeof(*req), wrb, NULL); 2702 2703 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | 2704 CAPABILITY_BE3_NATIVE_ERX_API); 2705 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); 2706 2707 status = be_mbox_notify_wait(adapter); 2708 if (!status) { 2709 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); 2710 adapter->be3_native = le32_to_cpu(resp->cap_flags) & 2711 CAPABILITY_BE3_NATIVE_ERX_API; 2712 if (!adapter->be3_native) 2713 dev_warn(&adapter->pdev->dev, 2714 "adapter not in advanced mode\n"); 2715 } 2716 err: 2717 mutex_unlock(&adapter->mbox_lock); 2718 return status; 2719 } 2720 2721 /* Get privilege(s) for a function */ 2722 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, 2723 u32 domain) 2724 { 2725 struct be_mcc_wrb *wrb; 2726 struct be_cmd_req_get_fn_privileges *req; 2727 int status; 2728 2729 spin_lock_bh(&adapter->mcc_lock); 2730 2731 wrb = wrb_from_mccq(adapter); 2732 if (!wrb) { 2733 status = -EBUSY; 2734 goto err; 2735 } 2736 2737 req = embedded_payload(wrb); 2738 2739 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2740 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req), 2741 wrb, NULL); 2742 2743 req->hdr.domain = domain; 2744 2745 status = be_mcc_notify_wait(adapter); 2746 if (!status) { 2747 struct be_cmd_resp_get_fn_privileges *resp = 2748 embedded_payload(wrb); 2749 *privilege = le32_to_cpu(resp->privilege_mask); 2750 2751 /* In UMC mode FW does not return right privileges. 2752 * Override with correct privilege equivalent to PF. 2753 */ 2754 if (BEx_chip(adapter) && be_is_mc(adapter) && 2755 be_physfn(adapter)) 2756 *privilege = MAX_PRIVILEGES; 2757 } 2758 2759 err: 2760 spin_unlock_bh(&adapter->mcc_lock); 2761 return status; 2762 } 2763 2764 /* Set privilege(s) for a function */ 2765 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, 2766 u32 domain) 2767 { 2768 struct be_mcc_wrb *wrb; 2769 struct be_cmd_req_set_fn_privileges *req; 2770 int status; 2771 2772 spin_lock_bh(&adapter->mcc_lock); 2773 2774 wrb = wrb_from_mccq(adapter); 2775 if (!wrb) { 2776 status = -EBUSY; 2777 goto err; 2778 } 2779 2780 req = embedded_payload(wrb); 2781 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2782 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req), 2783 wrb, NULL); 2784 req->hdr.domain = domain; 2785 if (lancer_chip(adapter)) 2786 req->privileges_lancer = cpu_to_le32(privileges); 2787 else 2788 req->privileges = cpu_to_le32(privileges); 2789 2790 status = be_mcc_notify_wait(adapter); 2791 err: 2792 spin_unlock_bh(&adapter->mcc_lock); 2793 return status; 2794 } 2795 2796 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested. 2797 * pmac_id_valid: false => pmac_id or MAC address is requested. 2798 * If pmac_id is returned, pmac_id_valid is returned as true 2799 */ 2800 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 2801 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle, 2802 u8 domain) 2803 { 2804 struct be_mcc_wrb *wrb; 2805 struct be_cmd_req_get_mac_list *req; 2806 int status; 2807 int mac_count; 2808 struct be_dma_mem get_mac_list_cmd; 2809 int i; 2810 2811 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 2812 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 2813 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, 2814 get_mac_list_cmd.size, 2815 &get_mac_list_cmd.dma); 2816 2817 if (!get_mac_list_cmd.va) { 2818 dev_err(&adapter->pdev->dev, 2819 "Memory allocation failure during GET_MAC_LIST\n"); 2820 return -ENOMEM; 2821 } 2822 2823 spin_lock_bh(&adapter->mcc_lock); 2824 2825 wrb = wrb_from_mccq(adapter); 2826 if (!wrb) { 2827 status = -EBUSY; 2828 goto out; 2829 } 2830 2831 req = get_mac_list_cmd.va; 2832 2833 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2834 OPCODE_COMMON_GET_MAC_LIST, 2835 get_mac_list_cmd.size, wrb, &get_mac_list_cmd); 2836 req->hdr.domain = domain; 2837 req->mac_type = MAC_ADDRESS_TYPE_NETWORK; 2838 if (*pmac_id_valid) { 2839 req->mac_id = cpu_to_le32(*pmac_id); 2840 req->iface_id = cpu_to_le16(if_handle); 2841 req->perm_override = 0; 2842 } else { 2843 req->perm_override = 1; 2844 } 2845 2846 status = be_mcc_notify_wait(adapter); 2847 if (!status) { 2848 struct be_cmd_resp_get_mac_list *resp = 2849 get_mac_list_cmd.va; 2850 2851 if (*pmac_id_valid) { 2852 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr, 2853 ETH_ALEN); 2854 goto out; 2855 } 2856 2857 mac_count = resp->true_mac_count + resp->pseudo_mac_count; 2858 /* Mac list returned could contain one or more active mac_ids 2859 * or one or more true or pseudo permanant mac addresses. 2860 * If an active mac_id is present, return first active mac_id 2861 * found. 2862 */ 2863 for (i = 0; i < mac_count; i++) { 2864 struct get_list_macaddr *mac_entry; 2865 u16 mac_addr_size; 2866 u32 mac_id; 2867 2868 mac_entry = &resp->macaddr_list[i]; 2869 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size); 2870 /* mac_id is a 32 bit value and mac_addr size 2871 * is 6 bytes 2872 */ 2873 if (mac_addr_size == sizeof(u32)) { 2874 *pmac_id_valid = true; 2875 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; 2876 *pmac_id = le32_to_cpu(mac_id); 2877 goto out; 2878 } 2879 } 2880 /* If no active mac_id found, return first mac addr */ 2881 *pmac_id_valid = false; 2882 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 2883 ETH_ALEN); 2884 } 2885 2886 out: 2887 spin_unlock_bh(&adapter->mcc_lock); 2888 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, 2889 get_mac_list_cmd.va, get_mac_list_cmd.dma); 2890 return status; 2891 } 2892 2893 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, 2894 u8 *mac, u32 if_handle, bool active, u32 domain) 2895 { 2896 2897 if (!active) 2898 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id, 2899 if_handle, domain); 2900 if (BEx_chip(adapter)) 2901 return be_cmd_mac_addr_query(adapter, mac, false, 2902 if_handle, curr_pmac_id); 2903 else 2904 /* Fetch the MAC address using pmac_id */ 2905 return be_cmd_get_mac_from_list(adapter, mac, &active, 2906 &curr_pmac_id, 2907 if_handle, domain); 2908 } 2909 2910 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) 2911 { 2912 int status; 2913 bool pmac_valid = false; 2914 2915 memset(mac, 0, ETH_ALEN); 2916 2917 if (BEx_chip(adapter)) { 2918 if (be_physfn(adapter)) 2919 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 2920 0); 2921 else 2922 status = be_cmd_mac_addr_query(adapter, mac, false, 2923 adapter->if_handle, 0); 2924 } else { 2925 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid, 2926 NULL, adapter->if_handle, 0); 2927 } 2928 2929 return status; 2930 } 2931 2932 /* Uses synchronous MCCQ */ 2933 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 2934 u8 mac_count, u32 domain) 2935 { 2936 struct be_mcc_wrb *wrb; 2937 struct be_cmd_req_set_mac_list *req; 2938 int status; 2939 struct be_dma_mem cmd; 2940 2941 memset(&cmd, 0, sizeof(struct be_dma_mem)); 2942 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 2943 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 2944 &cmd.dma, GFP_KERNEL); 2945 if (!cmd.va) 2946 return -ENOMEM; 2947 2948 spin_lock_bh(&adapter->mcc_lock); 2949 2950 wrb = wrb_from_mccq(adapter); 2951 if (!wrb) { 2952 status = -EBUSY; 2953 goto err; 2954 } 2955 2956 req = cmd.va; 2957 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2958 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), 2959 wrb, &cmd); 2960 2961 req->hdr.domain = domain; 2962 req->mac_count = mac_count; 2963 if (mac_count) 2964 memcpy(req->mac, mac_array, ETH_ALEN*mac_count); 2965 2966 status = be_mcc_notify_wait(adapter); 2967 2968 err: 2969 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 2970 spin_unlock_bh(&adapter->mcc_lock); 2971 return status; 2972 } 2973 2974 /* Wrapper to delete any active MACs and provision the new mac. 2975 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the 2976 * current list are active. 2977 */ 2978 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom) 2979 { 2980 bool active_mac = false; 2981 u8 old_mac[ETH_ALEN]; 2982 u32 pmac_id; 2983 int status; 2984 2985 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, 2986 &pmac_id, if_id, dom); 2987 2988 if (!status && active_mac) 2989 be_cmd_pmac_del(adapter, if_id, pmac_id, dom); 2990 2991 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom); 2992 } 2993 2994 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 2995 u32 domain, u16 intf_id, u16 hsw_mode) 2996 { 2997 struct be_mcc_wrb *wrb; 2998 struct be_cmd_req_set_hsw_config *req; 2999 void *ctxt; 3000 int status; 3001 3002 spin_lock_bh(&adapter->mcc_lock); 3003 3004 wrb = wrb_from_mccq(adapter); 3005 if (!wrb) { 3006 status = -EBUSY; 3007 goto err; 3008 } 3009 3010 req = embedded_payload(wrb); 3011 ctxt = &req->context; 3012 3013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3014 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, 3015 NULL); 3016 3017 req->hdr.domain = domain; 3018 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); 3019 if (pvid) { 3020 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); 3021 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); 3022 } 3023 if (!BEx_chip(adapter) && hsw_mode) { 3024 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, 3025 ctxt, adapter->hba_port_num); 3026 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); 3027 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type, 3028 ctxt, hsw_mode); 3029 } 3030 3031 be_dws_cpu_to_le(req->context, sizeof(req->context)); 3032 status = be_mcc_notify_wait(adapter); 3033 3034 err: 3035 spin_unlock_bh(&adapter->mcc_lock); 3036 return status; 3037 } 3038 3039 /* Get Hyper switch config */ 3040 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 3041 u32 domain, u16 intf_id, u8 *mode) 3042 { 3043 struct be_mcc_wrb *wrb; 3044 struct be_cmd_req_get_hsw_config *req; 3045 void *ctxt; 3046 int status; 3047 u16 vid; 3048 3049 spin_lock_bh(&adapter->mcc_lock); 3050 3051 wrb = wrb_from_mccq(adapter); 3052 if (!wrb) { 3053 status = -EBUSY; 3054 goto err; 3055 } 3056 3057 req = embedded_payload(wrb); 3058 ctxt = &req->context; 3059 3060 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3061 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, 3062 NULL); 3063 3064 req->hdr.domain = domain; 3065 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3066 ctxt, intf_id); 3067 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); 3068 3069 if (!BEx_chip(adapter) && mode) { 3070 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3071 ctxt, adapter->hba_port_num); 3072 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1); 3073 } 3074 be_dws_cpu_to_le(req->context, sizeof(req->context)); 3075 3076 status = be_mcc_notify_wait(adapter); 3077 if (!status) { 3078 struct be_cmd_resp_get_hsw_config *resp = 3079 embedded_payload(wrb); 3080 be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); 3081 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3082 pvid, &resp->context); 3083 if (pvid) 3084 *pvid = le16_to_cpu(vid); 3085 if (mode) 3086 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3087 port_fwd_type, &resp->context); 3088 } 3089 3090 err: 3091 spin_unlock_bh(&adapter->mcc_lock); 3092 return status; 3093 } 3094 3095 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) 3096 { 3097 struct be_mcc_wrb *wrb; 3098 struct be_cmd_req_acpi_wol_magic_config_v1 *req; 3099 int status = 0; 3100 struct be_dma_mem cmd; 3101 3102 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 3103 CMD_SUBSYSTEM_ETH)) 3104 return -EPERM; 3105 3106 if (be_is_wol_excluded(adapter)) 3107 return status; 3108 3109 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3110 return -1; 3111 3112 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3113 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 3114 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3115 if (!cmd.va) { 3116 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3117 status = -ENOMEM; 3118 goto err; 3119 } 3120 3121 wrb = wrb_from_mbox(adapter); 3122 if (!wrb) { 3123 status = -EBUSY; 3124 goto err; 3125 } 3126 3127 req = cmd.va; 3128 3129 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 3130 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 3131 sizeof(*req), wrb, &cmd); 3132 3133 req->hdr.version = 1; 3134 req->query_options = BE_GET_WOL_CAP; 3135 3136 status = be_mbox_notify_wait(adapter); 3137 if (!status) { 3138 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; 3139 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va; 3140 3141 adapter->wol_cap = resp->wol_settings; 3142 if (adapter->wol_cap & BE_WOL_CAP) 3143 adapter->wol_en = true; 3144 } 3145 err: 3146 mutex_unlock(&adapter->mbox_lock); 3147 if (cmd.va) 3148 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3149 return status; 3150 3151 } 3152 3153 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) 3154 { 3155 struct be_dma_mem extfat_cmd; 3156 struct be_fat_conf_params *cfgs; 3157 int status; 3158 int i, j; 3159 3160 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3161 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3162 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3163 &extfat_cmd.dma); 3164 if (!extfat_cmd.va) 3165 return -ENOMEM; 3166 3167 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); 3168 if (status) 3169 goto err; 3170 3171 cfgs = (struct be_fat_conf_params *) 3172 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); 3173 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { 3174 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); 3175 for (j = 0; j < num_modes; j++) { 3176 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) 3177 cfgs->module[i].trace_lvl[j].dbg_lvl = 3178 cpu_to_le32(level); 3179 } 3180 } 3181 3182 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); 3183 err: 3184 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, 3185 extfat_cmd.dma); 3186 return status; 3187 } 3188 3189 int be_cmd_get_fw_log_level(struct be_adapter *adapter) 3190 { 3191 struct be_dma_mem extfat_cmd; 3192 struct be_fat_conf_params *cfgs; 3193 int status, j; 3194 int level = 0; 3195 3196 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3197 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3198 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3199 &extfat_cmd.dma); 3200 3201 if (!extfat_cmd.va) { 3202 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", 3203 __func__); 3204 goto err; 3205 } 3206 3207 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); 3208 if (!status) { 3209 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + 3210 sizeof(struct be_cmd_resp_hdr)); 3211 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { 3212 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) 3213 level = cfgs->module[0].trace_lvl[j].dbg_lvl; 3214 } 3215 } 3216 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, 3217 extfat_cmd.dma); 3218 err: 3219 return level; 3220 } 3221 3222 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, 3223 struct be_dma_mem *cmd) 3224 { 3225 struct be_mcc_wrb *wrb; 3226 struct be_cmd_req_get_ext_fat_caps *req; 3227 int status; 3228 3229 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3230 return -1; 3231 3232 wrb = wrb_from_mbox(adapter); 3233 if (!wrb) { 3234 status = -EBUSY; 3235 goto err; 3236 } 3237 3238 req = cmd->va; 3239 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3240 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES, 3241 cmd->size, wrb, cmd); 3242 req->parameter_type = cpu_to_le32(1); 3243 3244 status = be_mbox_notify_wait(adapter); 3245 err: 3246 mutex_unlock(&adapter->mbox_lock); 3247 return status; 3248 } 3249 3250 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, 3251 struct be_dma_mem *cmd, 3252 struct be_fat_conf_params *configs) 3253 { 3254 struct be_mcc_wrb *wrb; 3255 struct be_cmd_req_set_ext_fat_caps *req; 3256 int status; 3257 3258 spin_lock_bh(&adapter->mcc_lock); 3259 3260 wrb = wrb_from_mccq(adapter); 3261 if (!wrb) { 3262 status = -EBUSY; 3263 goto err; 3264 } 3265 3266 req = cmd->va; 3267 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params)); 3268 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3269 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES, 3270 cmd->size, wrb, cmd); 3271 3272 status = be_mcc_notify_wait(adapter); 3273 err: 3274 spin_unlock_bh(&adapter->mcc_lock); 3275 return status; 3276 } 3277 3278 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name) 3279 { 3280 struct be_mcc_wrb *wrb; 3281 struct be_cmd_req_get_port_name *req; 3282 int status; 3283 3284 if (!lancer_chip(adapter)) { 3285 *port_name = adapter->hba_port_num + '0'; 3286 return 0; 3287 } 3288 3289 spin_lock_bh(&adapter->mcc_lock); 3290 3291 wrb = wrb_from_mccq(adapter); 3292 if (!wrb) { 3293 status = -EBUSY; 3294 goto err; 3295 } 3296 3297 req = embedded_payload(wrb); 3298 3299 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3300 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb, 3301 NULL); 3302 req->hdr.version = 1; 3303 3304 status = be_mcc_notify_wait(adapter); 3305 if (!status) { 3306 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb); 3307 *port_name = resp->port_name[adapter->hba_port_num]; 3308 } else { 3309 *port_name = adapter->hba_port_num + '0'; 3310 } 3311 err: 3312 spin_unlock_bh(&adapter->mcc_lock); 3313 return status; 3314 } 3315 3316 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count) 3317 { 3318 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3319 int i; 3320 3321 for (i = 0; i < desc_count; i++) { 3322 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || 3323 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) 3324 return (struct be_nic_res_desc *)hdr; 3325 3326 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3327 hdr = (void *)hdr + hdr->desc_len; 3328 } 3329 return NULL; 3330 } 3331 3332 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf, 3333 u32 desc_count) 3334 { 3335 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3336 struct be_pcie_res_desc *pcie; 3337 int i; 3338 3339 for (i = 0; i < desc_count; i++) { 3340 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || 3341 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) { 3342 pcie = (struct be_pcie_res_desc *)hdr; 3343 if (pcie->pf_num == devfn) 3344 return pcie; 3345 } 3346 3347 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3348 hdr = (void *)hdr + hdr->desc_len; 3349 } 3350 return NULL; 3351 } 3352 3353 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count) 3354 { 3355 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3356 int i; 3357 3358 for (i = 0; i < desc_count; i++) { 3359 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1) 3360 return (struct be_port_res_desc *)hdr; 3361 3362 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3363 hdr = (void *)hdr + hdr->desc_len; 3364 } 3365 return NULL; 3366 } 3367 3368 static void be_copy_nic_desc(struct be_resources *res, 3369 struct be_nic_res_desc *desc) 3370 { 3371 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count); 3372 res->max_vlans = le16_to_cpu(desc->vlan_count); 3373 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count); 3374 res->max_tx_qs = le16_to_cpu(desc->txq_count); 3375 res->max_rss_qs = le16_to_cpu(desc->rssq_count); 3376 res->max_rx_qs = le16_to_cpu(desc->rq_count); 3377 res->max_evt_qs = le16_to_cpu(desc->eq_count); 3378 /* Clear flags that driver is not interested in */ 3379 res->if_cap_flags = le32_to_cpu(desc->cap_flags) & 3380 BE_IF_CAP_FLAGS_WANT; 3381 /* Need 1 RXQ as the default RXQ */ 3382 if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs) 3383 res->max_rss_qs -= 1; 3384 } 3385 3386 /* Uses Mbox */ 3387 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) 3388 { 3389 struct be_mcc_wrb *wrb; 3390 struct be_cmd_req_get_func_config *req; 3391 int status; 3392 struct be_dma_mem cmd; 3393 3394 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3395 return -1; 3396 3397 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3398 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 3399 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3400 if (!cmd.va) { 3401 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3402 status = -ENOMEM; 3403 goto err; 3404 } 3405 3406 wrb = wrb_from_mbox(adapter); 3407 if (!wrb) { 3408 status = -EBUSY; 3409 goto err; 3410 } 3411 3412 req = cmd.va; 3413 3414 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3415 OPCODE_COMMON_GET_FUNC_CONFIG, 3416 cmd.size, wrb, &cmd); 3417 3418 if (skyhawk_chip(adapter)) 3419 req->hdr.version = 1; 3420 3421 status = be_mbox_notify_wait(adapter); 3422 if (!status) { 3423 struct be_cmd_resp_get_func_config *resp = cmd.va; 3424 u32 desc_count = le32_to_cpu(resp->desc_count); 3425 struct be_nic_res_desc *desc; 3426 3427 desc = be_get_nic_desc(resp->func_param, desc_count); 3428 if (!desc) { 3429 status = -EINVAL; 3430 goto err; 3431 } 3432 3433 adapter->pf_number = desc->pf_num; 3434 be_copy_nic_desc(res, desc); 3435 } 3436 err: 3437 mutex_unlock(&adapter->mbox_lock); 3438 if (cmd.va) 3439 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3440 return status; 3441 } 3442 3443 /* Uses mbox */ 3444 static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter, 3445 u8 domain, struct be_dma_mem *cmd) 3446 { 3447 struct be_mcc_wrb *wrb; 3448 struct be_cmd_req_get_profile_config *req; 3449 int status; 3450 3451 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3452 return -1; 3453 wrb = wrb_from_mbox(adapter); 3454 3455 req = cmd->va; 3456 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3457 OPCODE_COMMON_GET_PROFILE_CONFIG, 3458 cmd->size, wrb, cmd); 3459 3460 req->type = ACTIVE_PROFILE_TYPE; 3461 req->hdr.domain = domain; 3462 if (!lancer_chip(adapter)) 3463 req->hdr.version = 1; 3464 3465 status = be_mbox_notify_wait(adapter); 3466 3467 mutex_unlock(&adapter->mbox_lock); 3468 return status; 3469 } 3470 3471 /* Uses sync mcc */ 3472 static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter, 3473 u8 domain, struct be_dma_mem *cmd) 3474 { 3475 struct be_mcc_wrb *wrb; 3476 struct be_cmd_req_get_profile_config *req; 3477 int status; 3478 3479 spin_lock_bh(&adapter->mcc_lock); 3480 3481 wrb = wrb_from_mccq(adapter); 3482 if (!wrb) { 3483 status = -EBUSY; 3484 goto err; 3485 } 3486 3487 req = cmd->va; 3488 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3489 OPCODE_COMMON_GET_PROFILE_CONFIG, 3490 cmd->size, wrb, cmd); 3491 3492 req->type = ACTIVE_PROFILE_TYPE; 3493 req->hdr.domain = domain; 3494 if (!lancer_chip(adapter)) 3495 req->hdr.version = 1; 3496 3497 status = be_mcc_notify_wait(adapter); 3498 3499 err: 3500 spin_unlock_bh(&adapter->mcc_lock); 3501 return status; 3502 } 3503 3504 /* Uses sync mcc, if MCCQ is already created otherwise mbox */ 3505 int be_cmd_get_profile_config(struct be_adapter *adapter, 3506 struct be_resources *res, u8 domain) 3507 { 3508 struct be_cmd_resp_get_profile_config *resp; 3509 struct be_pcie_res_desc *pcie; 3510 struct be_port_res_desc *port; 3511 struct be_nic_res_desc *nic; 3512 struct be_queue_info *mccq = &adapter->mcc_obj.q; 3513 struct be_dma_mem cmd; 3514 u32 desc_count; 3515 int status; 3516 3517 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3518 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 3519 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3520 if (!cmd.va) 3521 return -ENOMEM; 3522 3523 if (!mccq->created) 3524 status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd); 3525 else 3526 status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd); 3527 if (status) 3528 goto err; 3529 3530 resp = cmd.va; 3531 desc_count = le32_to_cpu(resp->desc_count); 3532 3533 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, 3534 desc_count); 3535 if (pcie) 3536 res->max_vfs = le16_to_cpu(pcie->num_vfs); 3537 3538 port = be_get_port_desc(resp->func_param, desc_count); 3539 if (port) 3540 adapter->mc_type = port->mc_type; 3541 3542 nic = be_get_nic_desc(resp->func_param, desc_count); 3543 if (nic) 3544 be_copy_nic_desc(res, nic); 3545 3546 err: 3547 if (cmd.va) 3548 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3549 return status; 3550 } 3551 3552 int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, 3553 int size, u8 version, u8 domain) 3554 { 3555 struct be_cmd_req_set_profile_config *req; 3556 struct be_mcc_wrb *wrb; 3557 int status; 3558 3559 spin_lock_bh(&adapter->mcc_lock); 3560 3561 wrb = wrb_from_mccq(adapter); 3562 if (!wrb) { 3563 status = -EBUSY; 3564 goto err; 3565 } 3566 3567 req = embedded_payload(wrb); 3568 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3569 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req), 3570 wrb, NULL); 3571 req->hdr.version = version; 3572 req->hdr.domain = domain; 3573 req->desc_count = cpu_to_le32(1); 3574 memcpy(req->desc, desc, size); 3575 3576 status = be_mcc_notify_wait(adapter); 3577 err: 3578 spin_unlock_bh(&adapter->mcc_lock); 3579 return status; 3580 } 3581 3582 /* Mark all fields invalid */ 3583 void be_reset_nic_desc(struct be_nic_res_desc *nic) 3584 { 3585 memset(nic, 0, sizeof(*nic)); 3586 nic->unicast_mac_count = 0xFFFF; 3587 nic->mcc_count = 0xFFFF; 3588 nic->vlan_count = 0xFFFF; 3589 nic->mcast_mac_count = 0xFFFF; 3590 nic->txq_count = 0xFFFF; 3591 nic->rq_count = 0xFFFF; 3592 nic->rssq_count = 0xFFFF; 3593 nic->lro_count = 0xFFFF; 3594 nic->cq_count = 0xFFFF; 3595 nic->toe_conn_count = 0xFFFF; 3596 nic->eq_count = 0xFFFF; 3597 nic->iface_count = 0xFFFF; 3598 nic->link_param = 0xFF; 3599 nic->channel_id_param = cpu_to_le16(0xF000); 3600 nic->acpi_params = 0xFF; 3601 nic->wol_param = 0x0F; 3602 nic->tunnel_iface_count = 0xFFFF; 3603 nic->direct_tenant_iface_count = 0xFFFF; 3604 nic->bw_max = 0xFFFFFFFF; 3605 } 3606 3607 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, 3608 u8 domain) 3609 { 3610 struct be_nic_res_desc nic_desc; 3611 u32 bw_percent; 3612 u16 version = 0; 3613 3614 if (BE3_chip(adapter)) 3615 return be_cmd_set_qos(adapter, max_rate / 10, domain); 3616 3617 be_reset_nic_desc(&nic_desc); 3618 nic_desc.pf_num = adapter->pf_number; 3619 nic_desc.vf_num = domain; 3620 if (lancer_chip(adapter)) { 3621 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 3622 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; 3623 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) | 3624 (1 << NOSV_SHIFT); 3625 nic_desc.bw_max = cpu_to_le32(max_rate / 10); 3626 } else { 3627 version = 1; 3628 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; 3629 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3630 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); 3631 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100; 3632 nic_desc.bw_max = cpu_to_le32(bw_percent); 3633 } 3634 3635 return be_cmd_set_profile_config(adapter, &nic_desc, 3636 nic_desc.hdr.desc_len, 3637 version, domain); 3638 } 3639 3640 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) 3641 { 3642 struct be_mcc_wrb *wrb; 3643 struct be_cmd_req_manage_iface_filters *req; 3644 int status; 3645 3646 if (iface == 0xFFFFFFFF) 3647 return -1; 3648 3649 spin_lock_bh(&adapter->mcc_lock); 3650 3651 wrb = wrb_from_mccq(adapter); 3652 if (!wrb) { 3653 status = -EBUSY; 3654 goto err; 3655 } 3656 req = embedded_payload(wrb); 3657 3658 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3659 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req), 3660 wrb, NULL); 3661 req->op = op; 3662 req->target_iface_id = cpu_to_le32(iface); 3663 3664 status = be_mcc_notify_wait(adapter); 3665 err: 3666 spin_unlock_bh(&adapter->mcc_lock); 3667 return status; 3668 } 3669 3670 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port) 3671 { 3672 struct be_port_res_desc port_desc; 3673 3674 memset(&port_desc, 0, sizeof(port_desc)); 3675 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1; 3676 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3677 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); 3678 port_desc.link_num = adapter->hba_port_num; 3679 if (port) { 3680 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) | 3681 (1 << RCVID_SHIFT); 3682 port_desc.nv_port = swab16(port); 3683 } else { 3684 port_desc.nv_flags = NV_TYPE_DISABLED; 3685 port_desc.nv_port = 0; 3686 } 3687 3688 return be_cmd_set_profile_config(adapter, &port_desc, 3689 RESOURCE_DESC_SIZE_V1, 1, 0); 3690 } 3691 3692 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, 3693 int vf_num) 3694 { 3695 struct be_mcc_wrb *wrb; 3696 struct be_cmd_req_get_iface_list *req; 3697 struct be_cmd_resp_get_iface_list *resp; 3698 int status; 3699 3700 spin_lock_bh(&adapter->mcc_lock); 3701 3702 wrb = wrb_from_mccq(adapter); 3703 if (!wrb) { 3704 status = -EBUSY; 3705 goto err; 3706 } 3707 req = embedded_payload(wrb); 3708 3709 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3710 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp), 3711 wrb, NULL); 3712 req->hdr.domain = vf_num + 1; 3713 3714 status = be_mcc_notify_wait(adapter); 3715 if (!status) { 3716 resp = (struct be_cmd_resp_get_iface_list *)req; 3717 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id); 3718 } 3719 3720 err: 3721 spin_unlock_bh(&adapter->mcc_lock); 3722 return status; 3723 } 3724 3725 static int lancer_wait_idle(struct be_adapter *adapter) 3726 { 3727 #define SLIPORT_IDLE_TIMEOUT 30 3728 u32 reg_val; 3729 int status = 0, i; 3730 3731 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) { 3732 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET); 3733 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0) 3734 break; 3735 3736 ssleep(1); 3737 } 3738 3739 if (i == SLIPORT_IDLE_TIMEOUT) 3740 status = -1; 3741 3742 return status; 3743 } 3744 3745 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask) 3746 { 3747 int status = 0; 3748 3749 status = lancer_wait_idle(adapter); 3750 if (status) 3751 return status; 3752 3753 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET); 3754 3755 return status; 3756 } 3757 3758 /* Routine to check whether dump image is present or not */ 3759 bool dump_present(struct be_adapter *adapter) 3760 { 3761 u32 sliport_status = 0; 3762 3763 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 3764 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK); 3765 } 3766 3767 int lancer_initiate_dump(struct be_adapter *adapter) 3768 { 3769 int status; 3770 3771 /* give firmware reset and diagnostic dump */ 3772 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK | 3773 PHYSDEV_CONTROL_DD_MASK); 3774 if (status < 0) { 3775 dev_err(&adapter->pdev->dev, "Firmware reset failed\n"); 3776 return status; 3777 } 3778 3779 status = lancer_wait_idle(adapter); 3780 if (status) 3781 return status; 3782 3783 if (!dump_present(adapter)) { 3784 dev_err(&adapter->pdev->dev, "Dump image not present\n"); 3785 return -1; 3786 } 3787 3788 return 0; 3789 } 3790 3791 /* Uses sync mcc */ 3792 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) 3793 { 3794 struct be_mcc_wrb *wrb; 3795 struct be_cmd_enable_disable_vf *req; 3796 int status; 3797 3798 if (BEx_chip(adapter)) 3799 return 0; 3800 3801 spin_lock_bh(&adapter->mcc_lock); 3802 3803 wrb = wrb_from_mccq(adapter); 3804 if (!wrb) { 3805 status = -EBUSY; 3806 goto err; 3807 } 3808 3809 req = embedded_payload(wrb); 3810 3811 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3812 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req), 3813 wrb, NULL); 3814 3815 req->hdr.domain = domain; 3816 req->enable = 1; 3817 status = be_mcc_notify_wait(adapter); 3818 err: 3819 spin_unlock_bh(&adapter->mcc_lock); 3820 return status; 3821 } 3822 3823 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable) 3824 { 3825 struct be_mcc_wrb *wrb; 3826 struct be_cmd_req_intr_set *req; 3827 int status; 3828 3829 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3830 return -1; 3831 3832 wrb = wrb_from_mbox(adapter); 3833 3834 req = embedded_payload(wrb); 3835 3836 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3837 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req), 3838 wrb, NULL); 3839 3840 req->intr_enabled = intr_enable; 3841 3842 status = be_mbox_notify_wait(adapter); 3843 3844 mutex_unlock(&adapter->mbox_lock); 3845 return status; 3846 } 3847 3848 /* Uses MBOX */ 3849 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id) 3850 { 3851 struct be_cmd_req_get_active_profile *req; 3852 struct be_mcc_wrb *wrb; 3853 int status; 3854 3855 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3856 return -1; 3857 3858 wrb = wrb_from_mbox(adapter); 3859 if (!wrb) { 3860 status = -EBUSY; 3861 goto err; 3862 } 3863 3864 req = embedded_payload(wrb); 3865 3866 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3867 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req), 3868 wrb, NULL); 3869 3870 status = be_mbox_notify_wait(adapter); 3871 if (!status) { 3872 struct be_cmd_resp_get_active_profile *resp = 3873 embedded_payload(wrb); 3874 *profile_id = le16_to_cpu(resp->active_profile_id); 3875 } 3876 3877 err: 3878 mutex_unlock(&adapter->mbox_lock); 3879 return status; 3880 } 3881 3882 int be_cmd_set_logical_link_config(struct be_adapter *adapter, 3883 int link_state, u8 domain) 3884 { 3885 struct be_mcc_wrb *wrb; 3886 struct be_cmd_req_set_ll_link *req; 3887 int status; 3888 3889 if (BEx_chip(adapter) || lancer_chip(adapter)) 3890 return 0; 3891 3892 spin_lock_bh(&adapter->mcc_lock); 3893 3894 wrb = wrb_from_mccq(adapter); 3895 if (!wrb) { 3896 status = -EBUSY; 3897 goto err; 3898 } 3899 3900 req = embedded_payload(wrb); 3901 3902 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3903 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG, 3904 sizeof(*req), wrb, NULL); 3905 3906 req->hdr.version = 1; 3907 req->hdr.domain = domain; 3908 3909 if (link_state == IFLA_VF_LINK_STATE_ENABLE) 3910 req->link_config |= 1; 3911 3912 if (link_state == IFLA_VF_LINK_STATE_AUTO) 3913 req->link_config |= 1 << PLINK_TRACK_SHIFT; 3914 3915 status = be_mcc_notify_wait(adapter); 3916 err: 3917 spin_unlock_bh(&adapter->mcc_lock); 3918 return status; 3919 } 3920 3921 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 3922 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 3923 { 3924 struct be_adapter *adapter = netdev_priv(netdev_handle); 3925 struct be_mcc_wrb *wrb; 3926 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload; 3927 struct be_cmd_req_hdr *req; 3928 struct be_cmd_resp_hdr *resp; 3929 int status; 3930 3931 spin_lock_bh(&adapter->mcc_lock); 3932 3933 wrb = wrb_from_mccq(adapter); 3934 if (!wrb) { 3935 status = -EBUSY; 3936 goto err; 3937 } 3938 req = embedded_payload(wrb); 3939 resp = embedded_payload(wrb); 3940 3941 be_wrb_cmd_hdr_prepare(req, hdr->subsystem, 3942 hdr->opcode, wrb_payload_size, wrb, NULL); 3943 memcpy(req, wrb_payload, wrb_payload_size); 3944 be_dws_cpu_to_le(req, wrb_payload_size); 3945 3946 status = be_mcc_notify_wait(adapter); 3947 if (cmd_status) 3948 *cmd_status = (status & 0xffff); 3949 if (ext_status) 3950 *ext_status = 0; 3951 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); 3952 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); 3953 err: 3954 spin_unlock_bh(&adapter->mcc_lock); 3955 return status; 3956 } 3957 EXPORT_SYMBOL(be_roce_mcc_cmd); 3958