1 /* 2 * Copyright (C) 2005 - 2014 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Contact Information: 11 * linux-drivers@emulex.com 12 * 13 * Emulex 14 * 3333 Susan Street 15 * Costa Mesa, CA 92626 16 */ 17 18 #include <linux/module.h> 19 #include "be.h" 20 #include "be_cmds.h" 21 22 static struct be_cmd_priv_map cmd_priv_map[] = { 23 { 24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 25 CMD_SUBSYSTEM_ETH, 26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 28 }, 29 { 30 OPCODE_COMMON_GET_FLOW_CONTROL, 31 CMD_SUBSYSTEM_COMMON, 32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM | 33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 34 }, 35 { 36 OPCODE_COMMON_SET_FLOW_CONTROL, 37 CMD_SUBSYSTEM_COMMON, 38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 40 }, 41 { 42 OPCODE_ETH_GET_PPORT_STATS, 43 CMD_SUBSYSTEM_ETH, 44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 46 }, 47 { 48 OPCODE_COMMON_GET_PHY_DETAILS, 49 CMD_SUBSYSTEM_COMMON, 50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 52 } 53 }; 54 55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) 56 { 57 int i; 58 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map); 59 u32 cmd_privileges = adapter->cmd_privileges; 60 61 for (i = 0; i < num_entries; i++) 62 if (opcode == cmd_priv_map[i].opcode && 63 subsystem == cmd_priv_map[i].subsystem) 64 if (!(cmd_privileges & cmd_priv_map[i].priv_mask)) 65 return false; 66 67 return true; 68 } 69 70 static inline void *embedded_payload(struct be_mcc_wrb *wrb) 71 { 72 return wrb->payload.embedded_payload; 73 } 74 75 static void be_mcc_notify(struct be_adapter *adapter) 76 { 77 struct be_queue_info *mccq = &adapter->mcc_obj.q; 78 u32 val = 0; 79 80 if (be_error(adapter)) 81 return; 82 83 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 84 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 85 86 wmb(); 87 iowrite32(val, adapter->db + DB_MCCQ_OFFSET); 88 } 89 90 /* To check if valid bit is set, check the entire word as we don't know 91 * the endianness of the data (old entry is host endian while a new entry is 92 * little endian) */ 93 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) 94 { 95 u32 flags; 96 97 if (compl->flags != 0) { 98 flags = le32_to_cpu(compl->flags); 99 if (flags & CQE_FLAGS_VALID_MASK) { 100 compl->flags = flags; 101 return true; 102 } 103 } 104 return false; 105 } 106 107 /* Need to reset the entire word that houses the valid bit */ 108 static inline void be_mcc_compl_use(struct be_mcc_compl *compl) 109 { 110 compl->flags = 0; 111 } 112 113 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1) 114 { 115 unsigned long addr; 116 117 addr = tag1; 118 addr = ((addr << 16) << 16) | tag0; 119 return (void *)addr; 120 } 121 122 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status) 123 { 124 if (base_status == MCC_STATUS_NOT_SUPPORTED || 125 base_status == MCC_STATUS_ILLEGAL_REQUEST || 126 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES || 127 (opcode == OPCODE_COMMON_WRITE_FLASHROM && 128 (base_status == MCC_STATUS_ILLEGAL_FIELD || 129 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH))) 130 return true; 131 else 132 return false; 133 } 134 135 /* Place holder for all the async MCC cmds wherein the caller is not in a busy 136 * loop (has not issued be_mcc_notify_wait()) 137 */ 138 static void be_async_cmd_process(struct be_adapter *adapter, 139 struct be_mcc_compl *compl, 140 struct be_cmd_resp_hdr *resp_hdr) 141 { 142 enum mcc_base_status base_status = base_status(compl->status); 143 u8 opcode = 0, subsystem = 0; 144 145 if (resp_hdr) { 146 opcode = resp_hdr->opcode; 147 subsystem = resp_hdr->subsystem; 148 } 149 150 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && 151 subsystem == CMD_SUBSYSTEM_LOWLEVEL) { 152 complete(&adapter->et_cmd_compl); 153 return; 154 } 155 156 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM || 157 opcode == OPCODE_COMMON_WRITE_OBJECT) && 158 subsystem == CMD_SUBSYSTEM_COMMON) { 159 adapter->flash_status = compl->status; 160 complete(&adapter->et_cmd_compl); 161 return; 162 } 163 164 if ((opcode == OPCODE_ETH_GET_STATISTICS || 165 opcode == OPCODE_ETH_GET_PPORT_STATS) && 166 subsystem == CMD_SUBSYSTEM_ETH && 167 base_status == MCC_STATUS_SUCCESS) { 168 be_parse_stats(adapter); 169 adapter->stats_cmd_sent = false; 170 return; 171 } 172 173 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && 174 subsystem == CMD_SUBSYSTEM_COMMON) { 175 if (base_status == MCC_STATUS_SUCCESS) { 176 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 177 (void *)resp_hdr; 178 adapter->drv_stats.be_on_die_temperature = 179 resp->on_die_temperature; 180 } else { 181 adapter->be_get_temp_freq = 0; 182 } 183 return; 184 } 185 } 186 187 static int be_mcc_compl_process(struct be_adapter *adapter, 188 struct be_mcc_compl *compl) 189 { 190 enum mcc_base_status base_status; 191 enum mcc_addl_status addl_status; 192 struct be_cmd_resp_hdr *resp_hdr; 193 u8 opcode = 0, subsystem = 0; 194 195 /* Just swap the status to host endian; mcc tag is opaquely copied 196 * from mcc_wrb */ 197 be_dws_le_to_cpu(compl, 4); 198 199 base_status = base_status(compl->status); 200 addl_status = addl_status(compl->status); 201 202 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); 203 if (resp_hdr) { 204 opcode = resp_hdr->opcode; 205 subsystem = resp_hdr->subsystem; 206 } 207 208 be_async_cmd_process(adapter, compl, resp_hdr); 209 210 if (base_status != MCC_STATUS_SUCCESS && 211 !be_skip_err_log(opcode, base_status, addl_status)) { 212 213 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 214 dev_warn(&adapter->pdev->dev, 215 "VF is not privileged to issue opcode %d-%d\n", 216 opcode, subsystem); 217 } else { 218 dev_err(&adapter->pdev->dev, 219 "opcode %d-%d failed:status %d-%d\n", 220 opcode, subsystem, base_status, addl_status); 221 } 222 } 223 return compl->status; 224 } 225 226 /* Link state evt is a string of bytes; no need for endian swapping */ 227 static void be_async_link_state_process(struct be_adapter *adapter, 228 struct be_mcc_compl *compl) 229 { 230 struct be_async_event_link_state *evt = 231 (struct be_async_event_link_state *)compl; 232 233 /* When link status changes, link speed must be re-queried from FW */ 234 adapter->phy.link_speed = -1; 235 236 /* On BEx the FW does not send a separate link status 237 * notification for physical and logical link. 238 * On other chips just process the logical link 239 * status notification 240 */ 241 if (!BEx_chip(adapter) && 242 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK)) 243 return; 244 245 /* For the initial link status do not rely on the ASYNC event as 246 * it may not be received in some cases. 247 */ 248 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) 249 be_link_status_update(adapter, 250 evt->port_link_status & LINK_STATUS_MASK); 251 } 252 253 /* Grp5 CoS Priority evt */ 254 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 255 struct be_mcc_compl *compl) 256 { 257 struct be_async_event_grp5_cos_priority *evt = 258 (struct be_async_event_grp5_cos_priority *)compl; 259 260 if (evt->valid) { 261 adapter->vlan_prio_bmap = evt->available_priority_bmap; 262 adapter->recommended_prio &= ~VLAN_PRIO_MASK; 263 adapter->recommended_prio = 264 evt->reco_default_priority << VLAN_PRIO_SHIFT; 265 } 266 } 267 268 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */ 269 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 270 struct be_mcc_compl *compl) 271 { 272 struct be_async_event_grp5_qos_link_speed *evt = 273 (struct be_async_event_grp5_qos_link_speed *)compl; 274 275 if (adapter->phy.link_speed >= 0 && 276 evt->physical_port == adapter->port_num) 277 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10; 278 } 279 280 /*Grp5 PVID evt*/ 281 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 282 struct be_mcc_compl *compl) 283 { 284 struct be_async_event_grp5_pvid_state *evt = 285 (struct be_async_event_grp5_pvid_state *)compl; 286 287 if (evt->enabled) { 288 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 289 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid); 290 } else { 291 adapter->pvid = 0; 292 } 293 } 294 295 static void be_async_grp5_evt_process(struct be_adapter *adapter, 296 struct be_mcc_compl *compl) 297 { 298 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) & 299 ASYNC_EVENT_TYPE_MASK; 300 301 switch (event_type) { 302 case ASYNC_EVENT_COS_PRIORITY: 303 be_async_grp5_cos_priority_process(adapter, compl); 304 break; 305 case ASYNC_EVENT_QOS_SPEED: 306 be_async_grp5_qos_speed_process(adapter, compl); 307 break; 308 case ASYNC_EVENT_PVID_STATE: 309 be_async_grp5_pvid_state_process(adapter, compl); 310 break; 311 default: 312 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n", 313 event_type); 314 break; 315 } 316 } 317 318 static void be_async_dbg_evt_process(struct be_adapter *adapter, 319 struct be_mcc_compl *cmp) 320 { 321 u8 event_type = 0; 322 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp; 323 324 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & 325 ASYNC_EVENT_TYPE_MASK; 326 327 switch (event_type) { 328 case ASYNC_DEBUG_EVENT_TYPE_QNQ: 329 if (evt->valid) 330 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag); 331 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD; 332 break; 333 default: 334 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n", 335 event_type); 336 break; 337 } 338 } 339 340 static inline bool is_link_state_evt(u32 flags) 341 { 342 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 343 ASYNC_EVENT_CODE_LINK_STATE; 344 } 345 346 static inline bool is_grp5_evt(u32 flags) 347 { 348 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 349 ASYNC_EVENT_CODE_GRP_5; 350 } 351 352 static inline bool is_dbg_evt(u32 flags) 353 { 354 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 355 ASYNC_EVENT_CODE_QNQ; 356 } 357 358 static void be_mcc_event_process(struct be_adapter *adapter, 359 struct be_mcc_compl *compl) 360 { 361 if (is_link_state_evt(compl->flags)) 362 be_async_link_state_process(adapter, compl); 363 else if (is_grp5_evt(compl->flags)) 364 be_async_grp5_evt_process(adapter, compl); 365 else if (is_dbg_evt(compl->flags)) 366 be_async_dbg_evt_process(adapter, compl); 367 } 368 369 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 370 { 371 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; 372 struct be_mcc_compl *compl = queue_tail_node(mcc_cq); 373 374 if (be_mcc_compl_is_new(compl)) { 375 queue_tail_inc(mcc_cq); 376 return compl; 377 } 378 return NULL; 379 } 380 381 void be_async_mcc_enable(struct be_adapter *adapter) 382 { 383 spin_lock_bh(&adapter->mcc_cq_lock); 384 385 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); 386 adapter->mcc_obj.rearm_cq = true; 387 388 spin_unlock_bh(&adapter->mcc_cq_lock); 389 } 390 391 void be_async_mcc_disable(struct be_adapter *adapter) 392 { 393 spin_lock_bh(&adapter->mcc_cq_lock); 394 395 adapter->mcc_obj.rearm_cq = false; 396 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); 397 398 spin_unlock_bh(&adapter->mcc_cq_lock); 399 } 400 401 int be_process_mcc(struct be_adapter *adapter) 402 { 403 struct be_mcc_compl *compl; 404 int num = 0, status = 0; 405 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 406 407 spin_lock(&adapter->mcc_cq_lock); 408 409 while ((compl = be_mcc_compl_get(adapter))) { 410 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 411 be_mcc_event_process(adapter, compl); 412 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 413 status = be_mcc_compl_process(adapter, compl); 414 atomic_dec(&mcc_obj->q.used); 415 } 416 be_mcc_compl_use(compl); 417 num++; 418 } 419 420 if (num) 421 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); 422 423 spin_unlock(&adapter->mcc_cq_lock); 424 return status; 425 } 426 427 /* Wait till no more pending mcc requests are present */ 428 static int be_mcc_wait_compl(struct be_adapter *adapter) 429 { 430 #define mcc_timeout 120000 /* 12s timeout */ 431 int i, status = 0; 432 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 433 434 for (i = 0; i < mcc_timeout; i++) { 435 if (be_error(adapter)) 436 return -EIO; 437 438 local_bh_disable(); 439 status = be_process_mcc(adapter); 440 local_bh_enable(); 441 442 if (atomic_read(&mcc_obj->q.used) == 0) 443 break; 444 udelay(100); 445 } 446 if (i == mcc_timeout) { 447 dev_err(&adapter->pdev->dev, "FW not responding\n"); 448 adapter->fw_timeout = true; 449 return -EIO; 450 } 451 return status; 452 } 453 454 /* Notify MCC requests and wait for completion */ 455 static int be_mcc_notify_wait(struct be_adapter *adapter) 456 { 457 int status; 458 struct be_mcc_wrb *wrb; 459 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 460 u16 index = mcc_obj->q.head; 461 struct be_cmd_resp_hdr *resp; 462 463 index_dec(&index, mcc_obj->q.len); 464 wrb = queue_index_node(&mcc_obj->q, index); 465 466 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1); 467 468 be_mcc_notify(adapter); 469 470 status = be_mcc_wait_compl(adapter); 471 if (status == -EIO) 472 goto out; 473 474 status = (resp->base_status | 475 ((resp->addl_status & CQE_ADDL_STATUS_MASK) << 476 CQE_ADDL_STATUS_SHIFT)); 477 out: 478 return status; 479 } 480 481 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) 482 { 483 int msecs = 0; 484 u32 ready; 485 486 do { 487 if (be_error(adapter)) 488 return -EIO; 489 490 ready = ioread32(db); 491 if (ready == 0xffffffff) 492 return -1; 493 494 ready &= MPU_MAILBOX_DB_RDY_MASK; 495 if (ready) 496 break; 497 498 if (msecs > 4000) { 499 dev_err(&adapter->pdev->dev, "FW not responding\n"); 500 adapter->fw_timeout = true; 501 be_detect_error(adapter); 502 return -1; 503 } 504 505 msleep(1); 506 msecs++; 507 } while (true); 508 509 return 0; 510 } 511 512 /* 513 * Insert the mailbox address into the doorbell in two steps 514 * Polls on the mbox doorbell till a command completion (or a timeout) occurs 515 */ 516 static int be_mbox_notify_wait(struct be_adapter *adapter) 517 { 518 int status; 519 u32 val = 0; 520 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; 521 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 522 struct be_mcc_mailbox *mbox = mbox_mem->va; 523 struct be_mcc_compl *compl = &mbox->compl; 524 525 /* wait for ready to be set */ 526 status = be_mbox_db_ready_wait(adapter, db); 527 if (status != 0) 528 return status; 529 530 val |= MPU_MAILBOX_DB_HI_MASK; 531 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ 532 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 533 iowrite32(val, db); 534 535 /* wait for ready to be set */ 536 status = be_mbox_db_ready_wait(adapter, db); 537 if (status != 0) 538 return status; 539 540 val = 0; 541 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ 542 val |= (u32)(mbox_mem->dma >> 4) << 2; 543 iowrite32(val, db); 544 545 status = be_mbox_db_ready_wait(adapter, db); 546 if (status != 0) 547 return status; 548 549 /* A cq entry has been made now */ 550 if (be_mcc_compl_is_new(compl)) { 551 status = be_mcc_compl_process(adapter, &mbox->compl); 552 be_mcc_compl_use(compl); 553 if (status) 554 return status; 555 } else { 556 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); 557 return -1; 558 } 559 return 0; 560 } 561 562 static u16 be_POST_stage_get(struct be_adapter *adapter) 563 { 564 u32 sem; 565 566 if (BEx_chip(adapter)) 567 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx); 568 else 569 pci_read_config_dword(adapter->pdev, 570 SLIPORT_SEMAPHORE_OFFSET_SH, &sem); 571 572 return sem & POST_STAGE_MASK; 573 } 574 575 static int lancer_wait_ready(struct be_adapter *adapter) 576 { 577 #define SLIPORT_READY_TIMEOUT 30 578 u32 sliport_status; 579 int status = 0, i; 580 581 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { 582 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 583 if (sliport_status & SLIPORT_STATUS_RDY_MASK) 584 break; 585 586 msleep(1000); 587 } 588 589 if (i == SLIPORT_READY_TIMEOUT) 590 status = -1; 591 592 return status; 593 } 594 595 static bool lancer_provisioning_error(struct be_adapter *adapter) 596 { 597 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; 598 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 599 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 600 sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET); 601 sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET); 602 603 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 && 604 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2) 605 return true; 606 } 607 return false; 608 } 609 610 int lancer_test_and_set_rdy_state(struct be_adapter *adapter) 611 { 612 int status; 613 u32 sliport_status, err, reset_needed; 614 bool resource_error; 615 616 resource_error = lancer_provisioning_error(adapter); 617 if (resource_error) 618 return -EAGAIN; 619 620 status = lancer_wait_ready(adapter); 621 if (!status) { 622 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 623 err = sliport_status & SLIPORT_STATUS_ERR_MASK; 624 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK; 625 if (err && reset_needed) { 626 iowrite32(SLI_PORT_CONTROL_IP_MASK, 627 adapter->db + SLIPORT_CONTROL_OFFSET); 628 629 /* check adapter has corrected the error */ 630 status = lancer_wait_ready(adapter); 631 sliport_status = ioread32(adapter->db + 632 SLIPORT_STATUS_OFFSET); 633 sliport_status &= (SLIPORT_STATUS_ERR_MASK | 634 SLIPORT_STATUS_RN_MASK); 635 if (status || sliport_status) 636 status = -1; 637 } else if (err || reset_needed) { 638 status = -1; 639 } 640 } 641 /* Stop error recovery if error is not recoverable. 642 * No resource error is temporary errors and will go away 643 * when PF provisions resources. 644 */ 645 resource_error = lancer_provisioning_error(adapter); 646 if (resource_error) 647 status = -EAGAIN; 648 649 return status; 650 } 651 652 int be_fw_wait_ready(struct be_adapter *adapter) 653 { 654 u16 stage; 655 int status, timeout = 0; 656 struct device *dev = &adapter->pdev->dev; 657 658 if (lancer_chip(adapter)) { 659 status = lancer_wait_ready(adapter); 660 return status; 661 } 662 663 do { 664 stage = be_POST_stage_get(adapter); 665 if (stage == POST_STAGE_ARMFW_RDY) 666 return 0; 667 668 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout); 669 if (msleep_interruptible(2000)) { 670 dev_err(dev, "Waiting for POST aborted\n"); 671 return -EINTR; 672 } 673 timeout += 2; 674 } while (timeout < 60); 675 676 dev_err(dev, "POST timeout; stage=0x%x\n", stage); 677 return -1; 678 } 679 680 681 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) 682 { 683 return &wrb->payload.sgl[0]; 684 } 685 686 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr) 687 { 688 wrb->tag0 = addr & 0xFFFFFFFF; 689 wrb->tag1 = upper_32_bits(addr); 690 } 691 692 /* Don't touch the hdr after it's prepared */ 693 /* mem will be NULL for embedded commands */ 694 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 695 u8 subsystem, u8 opcode, int cmd_len, 696 struct be_mcc_wrb *wrb, 697 struct be_dma_mem *mem) 698 { 699 struct be_sge *sge; 700 701 req_hdr->opcode = opcode; 702 req_hdr->subsystem = subsystem; 703 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 704 req_hdr->version = 0; 705 fill_wrb_tags(wrb, (ulong) req_hdr); 706 wrb->payload_length = cmd_len; 707 if (mem) { 708 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << 709 MCC_WRB_SGE_CNT_SHIFT; 710 sge = nonembedded_sgl(wrb); 711 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); 712 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); 713 sge->len = cpu_to_le32(mem->size); 714 } else 715 wrb->embedded |= MCC_WRB_EMBEDDED_MASK; 716 be_dws_cpu_to_le(wrb, 8); 717 } 718 719 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 720 struct be_dma_mem *mem) 721 { 722 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); 723 u64 dma = (u64)mem->dma; 724 725 for (i = 0; i < buf_pages; i++) { 726 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); 727 pages[i].hi = cpu_to_le32(upper_32_bits(dma)); 728 dma += PAGE_SIZE_4K; 729 } 730 } 731 732 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) 733 { 734 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 735 struct be_mcc_wrb *wrb 736 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; 737 memset(wrb, 0, sizeof(*wrb)); 738 return wrb; 739 } 740 741 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) 742 { 743 struct be_queue_info *mccq = &adapter->mcc_obj.q; 744 struct be_mcc_wrb *wrb; 745 746 if (!mccq->created) 747 return NULL; 748 749 if (atomic_read(&mccq->used) >= mccq->len) 750 return NULL; 751 752 wrb = queue_head_node(mccq); 753 queue_head_inc(mccq); 754 atomic_inc(&mccq->used); 755 memset(wrb, 0, sizeof(*wrb)); 756 return wrb; 757 } 758 759 static bool use_mcc(struct be_adapter *adapter) 760 { 761 return adapter->mcc_obj.q.created; 762 } 763 764 /* Must be used only in process context */ 765 static int be_cmd_lock(struct be_adapter *adapter) 766 { 767 if (use_mcc(adapter)) { 768 spin_lock_bh(&adapter->mcc_lock); 769 return 0; 770 } else { 771 return mutex_lock_interruptible(&adapter->mbox_lock); 772 } 773 } 774 775 /* Must be used only in process context */ 776 static void be_cmd_unlock(struct be_adapter *adapter) 777 { 778 if (use_mcc(adapter)) 779 spin_unlock_bh(&adapter->mcc_lock); 780 else 781 return mutex_unlock(&adapter->mbox_lock); 782 } 783 784 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter, 785 struct be_mcc_wrb *wrb) 786 { 787 struct be_mcc_wrb *dest_wrb; 788 789 if (use_mcc(adapter)) { 790 dest_wrb = wrb_from_mccq(adapter); 791 if (!dest_wrb) 792 return NULL; 793 } else { 794 dest_wrb = wrb_from_mbox(adapter); 795 } 796 797 memcpy(dest_wrb, wrb, sizeof(*wrb)); 798 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK)) 799 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb)); 800 801 return dest_wrb; 802 } 803 804 /* Must be used only in process context */ 805 static int be_cmd_notify_wait(struct be_adapter *adapter, 806 struct be_mcc_wrb *wrb) 807 { 808 struct be_mcc_wrb *dest_wrb; 809 int status; 810 811 status = be_cmd_lock(adapter); 812 if (status) 813 return status; 814 815 dest_wrb = be_cmd_copy(adapter, wrb); 816 if (!dest_wrb) 817 return -EBUSY; 818 819 if (use_mcc(adapter)) 820 status = be_mcc_notify_wait(adapter); 821 else 822 status = be_mbox_notify_wait(adapter); 823 824 if (!status) 825 memcpy(wrb, dest_wrb, sizeof(*wrb)); 826 827 be_cmd_unlock(adapter); 828 return status; 829 } 830 831 /* Tell fw we're about to start firing cmds by writing a 832 * special pattern across the wrb hdr; uses mbox 833 */ 834 int be_cmd_fw_init(struct be_adapter *adapter) 835 { 836 u8 *wrb; 837 int status; 838 839 if (lancer_chip(adapter)) 840 return 0; 841 842 if (mutex_lock_interruptible(&adapter->mbox_lock)) 843 return -1; 844 845 wrb = (u8 *)wrb_from_mbox(adapter); 846 *wrb++ = 0xFF; 847 *wrb++ = 0x12; 848 *wrb++ = 0x34; 849 *wrb++ = 0xFF; 850 *wrb++ = 0xFF; 851 *wrb++ = 0x56; 852 *wrb++ = 0x78; 853 *wrb = 0xFF; 854 855 status = be_mbox_notify_wait(adapter); 856 857 mutex_unlock(&adapter->mbox_lock); 858 return status; 859 } 860 861 /* Tell fw we're done with firing cmds by writing a 862 * special pattern across the wrb hdr; uses mbox 863 */ 864 int be_cmd_fw_clean(struct be_adapter *adapter) 865 { 866 u8 *wrb; 867 int status; 868 869 if (lancer_chip(adapter)) 870 return 0; 871 872 if (mutex_lock_interruptible(&adapter->mbox_lock)) 873 return -1; 874 875 wrb = (u8 *)wrb_from_mbox(adapter); 876 *wrb++ = 0xFF; 877 *wrb++ = 0xAA; 878 *wrb++ = 0xBB; 879 *wrb++ = 0xFF; 880 *wrb++ = 0xFF; 881 *wrb++ = 0xCC; 882 *wrb++ = 0xDD; 883 *wrb = 0xFF; 884 885 status = be_mbox_notify_wait(adapter); 886 887 mutex_unlock(&adapter->mbox_lock); 888 return status; 889 } 890 891 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo) 892 { 893 struct be_mcc_wrb *wrb; 894 struct be_cmd_req_eq_create *req; 895 struct be_dma_mem *q_mem = &eqo->q.dma_mem; 896 int status, ver = 0; 897 898 if (mutex_lock_interruptible(&adapter->mbox_lock)) 899 return -1; 900 901 wrb = wrb_from_mbox(adapter); 902 req = embedded_payload(wrb); 903 904 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 905 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, 906 NULL); 907 908 /* Support for EQ_CREATEv2 available only SH-R onwards */ 909 if (!(BEx_chip(adapter) || lancer_chip(adapter))) 910 ver = 2; 911 912 req->hdr.version = ver; 913 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 914 915 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); 916 /* 4byte eqe*/ 917 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); 918 AMAP_SET_BITS(struct amap_eq_context, count, req->context, 919 __ilog2_u32(eqo->q.len / 256)); 920 be_dws_cpu_to_le(req->context, sizeof(req->context)); 921 922 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 923 924 status = be_mbox_notify_wait(adapter); 925 if (!status) { 926 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); 927 eqo->q.id = le16_to_cpu(resp->eq_id); 928 eqo->msix_idx = 929 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx; 930 eqo->q.created = true; 931 } 932 933 mutex_unlock(&adapter->mbox_lock); 934 return status; 935 } 936 937 /* Use MCC */ 938 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 939 bool permanent, u32 if_handle, u32 pmac_id) 940 { 941 struct be_mcc_wrb *wrb; 942 struct be_cmd_req_mac_query *req; 943 int status; 944 945 spin_lock_bh(&adapter->mcc_lock); 946 947 wrb = wrb_from_mccq(adapter); 948 if (!wrb) { 949 status = -EBUSY; 950 goto err; 951 } 952 req = embedded_payload(wrb); 953 954 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 955 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, 956 NULL); 957 req->type = MAC_ADDRESS_TYPE_NETWORK; 958 if (permanent) { 959 req->permanent = 1; 960 } else { 961 req->if_id = cpu_to_le16((u16) if_handle); 962 req->pmac_id = cpu_to_le32(pmac_id); 963 req->permanent = 0; 964 } 965 966 status = be_mcc_notify_wait(adapter); 967 if (!status) { 968 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); 969 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 970 } 971 972 err: 973 spin_unlock_bh(&adapter->mcc_lock); 974 return status; 975 } 976 977 /* Uses synchronous MCCQ */ 978 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 979 u32 if_id, u32 *pmac_id, u32 domain) 980 { 981 struct be_mcc_wrb *wrb; 982 struct be_cmd_req_pmac_add *req; 983 int status; 984 985 spin_lock_bh(&adapter->mcc_lock); 986 987 wrb = wrb_from_mccq(adapter); 988 if (!wrb) { 989 status = -EBUSY; 990 goto err; 991 } 992 req = embedded_payload(wrb); 993 994 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 995 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, 996 NULL); 997 998 req->hdr.domain = domain; 999 req->if_id = cpu_to_le32(if_id); 1000 memcpy(req->mac_address, mac_addr, ETH_ALEN); 1001 1002 status = be_mcc_notify_wait(adapter); 1003 if (!status) { 1004 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); 1005 *pmac_id = le32_to_cpu(resp->pmac_id); 1006 } 1007 1008 err: 1009 spin_unlock_bh(&adapter->mcc_lock); 1010 1011 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) 1012 status = -EPERM; 1013 1014 return status; 1015 } 1016 1017 /* Uses synchronous MCCQ */ 1018 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) 1019 { 1020 struct be_mcc_wrb *wrb; 1021 struct be_cmd_req_pmac_del *req; 1022 int status; 1023 1024 if (pmac_id == -1) 1025 return 0; 1026 1027 spin_lock_bh(&adapter->mcc_lock); 1028 1029 wrb = wrb_from_mccq(adapter); 1030 if (!wrb) { 1031 status = -EBUSY; 1032 goto err; 1033 } 1034 req = embedded_payload(wrb); 1035 1036 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1037 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL); 1038 1039 req->hdr.domain = dom; 1040 req->if_id = cpu_to_le32(if_id); 1041 req->pmac_id = cpu_to_le32(pmac_id); 1042 1043 status = be_mcc_notify_wait(adapter); 1044 1045 err: 1046 spin_unlock_bh(&adapter->mcc_lock); 1047 return status; 1048 } 1049 1050 /* Uses Mbox */ 1051 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, 1052 struct be_queue_info *eq, bool no_delay, int coalesce_wm) 1053 { 1054 struct be_mcc_wrb *wrb; 1055 struct be_cmd_req_cq_create *req; 1056 struct be_dma_mem *q_mem = &cq->dma_mem; 1057 void *ctxt; 1058 int status; 1059 1060 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1061 return -1; 1062 1063 wrb = wrb_from_mbox(adapter); 1064 req = embedded_payload(wrb); 1065 ctxt = &req->context; 1066 1067 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1068 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, 1069 NULL); 1070 1071 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1072 1073 if (BEx_chip(adapter)) { 1074 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 1075 coalesce_wm); 1076 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 1077 ctxt, no_delay); 1078 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, 1079 __ilog2_u32(cq->len / 256)); 1080 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 1081 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 1082 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); 1083 } else { 1084 req->hdr.version = 2; 1085 req->page_size = 1; /* 1 for 4K */ 1086 1087 /* coalesce-wm field in this cmd is not relevant to Lancer. 1088 * Lancer uses COMMON_MODIFY_CQ to set this field 1089 */ 1090 if (!lancer_chip(adapter)) 1091 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, 1092 ctxt, coalesce_wm); 1093 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1094 no_delay); 1095 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, 1096 __ilog2_u32(cq->len / 256)); 1097 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); 1098 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); 1099 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); 1100 } 1101 1102 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1103 1104 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1105 1106 status = be_mbox_notify_wait(adapter); 1107 if (!status) { 1108 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); 1109 cq->id = le16_to_cpu(resp->cq_id); 1110 cq->created = true; 1111 } 1112 1113 mutex_unlock(&adapter->mbox_lock); 1114 1115 return status; 1116 } 1117 1118 static u32 be_encoded_q_len(int q_len) 1119 { 1120 u32 len_encoded = fls(q_len); /* log2(len) + 1 */ 1121 if (len_encoded == 16) 1122 len_encoded = 0; 1123 return len_encoded; 1124 } 1125 1126 static int be_cmd_mccq_ext_create(struct be_adapter *adapter, 1127 struct be_queue_info *mccq, 1128 struct be_queue_info *cq) 1129 { 1130 struct be_mcc_wrb *wrb; 1131 struct be_cmd_req_mcc_ext_create *req; 1132 struct be_dma_mem *q_mem = &mccq->dma_mem; 1133 void *ctxt; 1134 int status; 1135 1136 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1137 return -1; 1138 1139 wrb = wrb_from_mbox(adapter); 1140 req = embedded_payload(wrb); 1141 ctxt = &req->context; 1142 1143 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1144 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, 1145 NULL); 1146 1147 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1148 if (BEx_chip(adapter)) { 1149 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1150 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1151 be_encoded_q_len(mccq->len)); 1152 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1153 } else { 1154 req->hdr.version = 1; 1155 req->cq_id = cpu_to_le16(cq->id); 1156 1157 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt, 1158 be_encoded_q_len(mccq->len)); 1159 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1); 1160 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id, 1161 ctxt, cq->id); 1162 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid, 1163 ctxt, 1); 1164 } 1165 1166 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ 1167 req->async_event_bitmap[0] = cpu_to_le32(0x00000022); 1168 req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ); 1169 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1170 1171 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1172 1173 status = be_mbox_notify_wait(adapter); 1174 if (!status) { 1175 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1176 mccq->id = le16_to_cpu(resp->id); 1177 mccq->created = true; 1178 } 1179 mutex_unlock(&adapter->mbox_lock); 1180 1181 return status; 1182 } 1183 1184 static int be_cmd_mccq_org_create(struct be_adapter *adapter, 1185 struct be_queue_info *mccq, 1186 struct be_queue_info *cq) 1187 { 1188 struct be_mcc_wrb *wrb; 1189 struct be_cmd_req_mcc_create *req; 1190 struct be_dma_mem *q_mem = &mccq->dma_mem; 1191 void *ctxt; 1192 int status; 1193 1194 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1195 return -1; 1196 1197 wrb = wrb_from_mbox(adapter); 1198 req = embedded_payload(wrb); 1199 ctxt = &req->context; 1200 1201 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1202 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, 1203 NULL); 1204 1205 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1206 1207 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1208 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1209 be_encoded_q_len(mccq->len)); 1210 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1211 1212 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1213 1214 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1215 1216 status = be_mbox_notify_wait(adapter); 1217 if (!status) { 1218 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1219 mccq->id = le16_to_cpu(resp->id); 1220 mccq->created = true; 1221 } 1222 1223 mutex_unlock(&adapter->mbox_lock); 1224 return status; 1225 } 1226 1227 int be_cmd_mccq_create(struct be_adapter *adapter, 1228 struct be_queue_info *mccq, struct be_queue_info *cq) 1229 { 1230 int status; 1231 1232 status = be_cmd_mccq_ext_create(adapter, mccq, cq); 1233 if (status && BEx_chip(adapter)) { 1234 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " 1235 "or newer to avoid conflicting priorities between NIC " 1236 "and FCoE traffic"); 1237 status = be_cmd_mccq_org_create(adapter, mccq, cq); 1238 } 1239 return status; 1240 } 1241 1242 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) 1243 { 1244 struct be_mcc_wrb wrb = {0}; 1245 struct be_cmd_req_eth_tx_create *req; 1246 struct be_queue_info *txq = &txo->q; 1247 struct be_queue_info *cq = &txo->cq; 1248 struct be_dma_mem *q_mem = &txq->dma_mem; 1249 int status, ver = 0; 1250 1251 req = embedded_payload(&wrb); 1252 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1253 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); 1254 1255 if (lancer_chip(adapter)) { 1256 req->hdr.version = 1; 1257 } else if (BEx_chip(adapter)) { 1258 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) 1259 req->hdr.version = 2; 1260 } else { /* For SH */ 1261 req->hdr.version = 2; 1262 } 1263 1264 if (req->hdr.version > 0) 1265 req->if_id = cpu_to_le16(adapter->if_handle); 1266 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1267 req->ulp_num = BE_ULP1_NUM; 1268 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 1269 req->cq_id = cpu_to_le16(cq->id); 1270 req->queue_size = be_encoded_q_len(txq->len); 1271 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1272 ver = req->hdr.version; 1273 1274 status = be_cmd_notify_wait(adapter, &wrb); 1275 if (!status) { 1276 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb); 1277 txq->id = le16_to_cpu(resp->cid); 1278 if (ver == 2) 1279 txo->db_offset = le32_to_cpu(resp->db_offset); 1280 else 1281 txo->db_offset = DB_TXULP1_OFFSET; 1282 txq->created = true; 1283 } 1284 1285 return status; 1286 } 1287 1288 /* Uses MCC */ 1289 int be_cmd_rxq_create(struct be_adapter *adapter, 1290 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 1291 u32 if_id, u32 rss, u8 *rss_id) 1292 { 1293 struct be_mcc_wrb *wrb; 1294 struct be_cmd_req_eth_rx_create *req; 1295 struct be_dma_mem *q_mem = &rxq->dma_mem; 1296 int status; 1297 1298 spin_lock_bh(&adapter->mcc_lock); 1299 1300 wrb = wrb_from_mccq(adapter); 1301 if (!wrb) { 1302 status = -EBUSY; 1303 goto err; 1304 } 1305 req = embedded_payload(wrb); 1306 1307 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1308 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); 1309 1310 req->cq_id = cpu_to_le16(cq_id); 1311 req->frag_size = fls(frag_size) - 1; 1312 req->num_pages = 2; 1313 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1314 req->interface_id = cpu_to_le32(if_id); 1315 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE); 1316 req->rss_queue = cpu_to_le32(rss); 1317 1318 status = be_mcc_notify_wait(adapter); 1319 if (!status) { 1320 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); 1321 rxq->id = le16_to_cpu(resp->id); 1322 rxq->created = true; 1323 *rss_id = resp->rss_id; 1324 } 1325 1326 err: 1327 spin_unlock_bh(&adapter->mcc_lock); 1328 return status; 1329 } 1330 1331 /* Generic destroyer function for all types of queues 1332 * Uses Mbox 1333 */ 1334 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1335 int queue_type) 1336 { 1337 struct be_mcc_wrb *wrb; 1338 struct be_cmd_req_q_destroy *req; 1339 u8 subsys = 0, opcode = 0; 1340 int status; 1341 1342 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1343 return -1; 1344 1345 wrb = wrb_from_mbox(adapter); 1346 req = embedded_payload(wrb); 1347 1348 switch (queue_type) { 1349 case QTYPE_EQ: 1350 subsys = CMD_SUBSYSTEM_COMMON; 1351 opcode = OPCODE_COMMON_EQ_DESTROY; 1352 break; 1353 case QTYPE_CQ: 1354 subsys = CMD_SUBSYSTEM_COMMON; 1355 opcode = OPCODE_COMMON_CQ_DESTROY; 1356 break; 1357 case QTYPE_TXQ: 1358 subsys = CMD_SUBSYSTEM_ETH; 1359 opcode = OPCODE_ETH_TX_DESTROY; 1360 break; 1361 case QTYPE_RXQ: 1362 subsys = CMD_SUBSYSTEM_ETH; 1363 opcode = OPCODE_ETH_RX_DESTROY; 1364 break; 1365 case QTYPE_MCCQ: 1366 subsys = CMD_SUBSYSTEM_COMMON; 1367 opcode = OPCODE_COMMON_MCC_DESTROY; 1368 break; 1369 default: 1370 BUG(); 1371 } 1372 1373 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, 1374 NULL); 1375 req->id = cpu_to_le16(q->id); 1376 1377 status = be_mbox_notify_wait(adapter); 1378 q->created = false; 1379 1380 mutex_unlock(&adapter->mbox_lock); 1381 return status; 1382 } 1383 1384 /* Uses MCC */ 1385 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) 1386 { 1387 struct be_mcc_wrb *wrb; 1388 struct be_cmd_req_q_destroy *req; 1389 int status; 1390 1391 spin_lock_bh(&adapter->mcc_lock); 1392 1393 wrb = wrb_from_mccq(adapter); 1394 if (!wrb) { 1395 status = -EBUSY; 1396 goto err; 1397 } 1398 req = embedded_payload(wrb); 1399 1400 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1401 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); 1402 req->id = cpu_to_le16(q->id); 1403 1404 status = be_mcc_notify_wait(adapter); 1405 q->created = false; 1406 1407 err: 1408 spin_unlock_bh(&adapter->mcc_lock); 1409 return status; 1410 } 1411 1412 /* Create an rx filtering policy configuration on an i/f 1413 * Will use MBOX only if MCCQ has not been created. 1414 */ 1415 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 1416 u32 *if_handle, u32 domain) 1417 { 1418 struct be_mcc_wrb wrb = {0}; 1419 struct be_cmd_req_if_create *req; 1420 int status; 1421 1422 req = embedded_payload(&wrb); 1423 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1424 OPCODE_COMMON_NTWK_INTERFACE_CREATE, 1425 sizeof(*req), &wrb, NULL); 1426 req->hdr.domain = domain; 1427 req->capability_flags = cpu_to_le32(cap_flags); 1428 req->enable_flags = cpu_to_le32(en_flags); 1429 req->pmac_invalid = true; 1430 1431 status = be_cmd_notify_wait(adapter, &wrb); 1432 if (!status) { 1433 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb); 1434 *if_handle = le32_to_cpu(resp->interface_id); 1435 1436 /* Hack to retrieve VF's pmac-id on BE3 */ 1437 if (BE3_chip(adapter) && !be_physfn(adapter)) 1438 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id); 1439 } 1440 return status; 1441 } 1442 1443 /* Uses MCCQ */ 1444 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) 1445 { 1446 struct be_mcc_wrb *wrb; 1447 struct be_cmd_req_if_destroy *req; 1448 int status; 1449 1450 if (interface_id == -1) 1451 return 0; 1452 1453 spin_lock_bh(&adapter->mcc_lock); 1454 1455 wrb = wrb_from_mccq(adapter); 1456 if (!wrb) { 1457 status = -EBUSY; 1458 goto err; 1459 } 1460 req = embedded_payload(wrb); 1461 1462 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1463 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, 1464 sizeof(*req), wrb, NULL); 1465 req->hdr.domain = domain; 1466 req->interface_id = cpu_to_le32(interface_id); 1467 1468 status = be_mcc_notify_wait(adapter); 1469 err: 1470 spin_unlock_bh(&adapter->mcc_lock); 1471 return status; 1472 } 1473 1474 /* Get stats is a non embedded command: the request is not embedded inside 1475 * WRB but is a separate dma memory block 1476 * Uses asynchronous MCC 1477 */ 1478 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) 1479 { 1480 struct be_mcc_wrb *wrb; 1481 struct be_cmd_req_hdr *hdr; 1482 int status = 0; 1483 1484 spin_lock_bh(&adapter->mcc_lock); 1485 1486 wrb = wrb_from_mccq(adapter); 1487 if (!wrb) { 1488 status = -EBUSY; 1489 goto err; 1490 } 1491 hdr = nonemb_cmd->va; 1492 1493 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1494 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, 1495 nonemb_cmd); 1496 1497 /* version 1 of the cmd is not supported only by BE2 */ 1498 if (BE2_chip(adapter)) 1499 hdr->version = 0; 1500 if (BE3_chip(adapter) || lancer_chip(adapter)) 1501 hdr->version = 1; 1502 else 1503 hdr->version = 2; 1504 1505 be_mcc_notify(adapter); 1506 adapter->stats_cmd_sent = true; 1507 1508 err: 1509 spin_unlock_bh(&adapter->mcc_lock); 1510 return status; 1511 } 1512 1513 /* Lancer Stats */ 1514 int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1515 struct be_dma_mem *nonemb_cmd) 1516 { 1517 1518 struct be_mcc_wrb *wrb; 1519 struct lancer_cmd_req_pport_stats *req; 1520 int status = 0; 1521 1522 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS, 1523 CMD_SUBSYSTEM_ETH)) 1524 return -EPERM; 1525 1526 spin_lock_bh(&adapter->mcc_lock); 1527 1528 wrb = wrb_from_mccq(adapter); 1529 if (!wrb) { 1530 status = -EBUSY; 1531 goto err; 1532 } 1533 req = nonemb_cmd->va; 1534 1535 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1536 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, 1537 wrb, nonemb_cmd); 1538 1539 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); 1540 req->cmd_params.params.reset_stats = 0; 1541 1542 be_mcc_notify(adapter); 1543 adapter->stats_cmd_sent = true; 1544 1545 err: 1546 spin_unlock_bh(&adapter->mcc_lock); 1547 return status; 1548 } 1549 1550 static int be_mac_to_link_speed(int mac_speed) 1551 { 1552 switch (mac_speed) { 1553 case PHY_LINK_SPEED_ZERO: 1554 return 0; 1555 case PHY_LINK_SPEED_10MBPS: 1556 return 10; 1557 case PHY_LINK_SPEED_100MBPS: 1558 return 100; 1559 case PHY_LINK_SPEED_1GBPS: 1560 return 1000; 1561 case PHY_LINK_SPEED_10GBPS: 1562 return 10000; 1563 case PHY_LINK_SPEED_20GBPS: 1564 return 20000; 1565 case PHY_LINK_SPEED_25GBPS: 1566 return 25000; 1567 case PHY_LINK_SPEED_40GBPS: 1568 return 40000; 1569 } 1570 return 0; 1571 } 1572 1573 /* Uses synchronous mcc 1574 * Returns link_speed in Mbps 1575 */ 1576 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, 1577 u8 *link_status, u32 dom) 1578 { 1579 struct be_mcc_wrb *wrb; 1580 struct be_cmd_req_link_status *req; 1581 int status; 1582 1583 spin_lock_bh(&adapter->mcc_lock); 1584 1585 if (link_status) 1586 *link_status = LINK_DOWN; 1587 1588 wrb = wrb_from_mccq(adapter); 1589 if (!wrb) { 1590 status = -EBUSY; 1591 goto err; 1592 } 1593 req = embedded_payload(wrb); 1594 1595 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1596 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, 1597 sizeof(*req), wrb, NULL); 1598 1599 /* version 1 of the cmd is not supported only by BE2 */ 1600 if (!BE2_chip(adapter)) 1601 req->hdr.version = 1; 1602 1603 req->hdr.domain = dom; 1604 1605 status = be_mcc_notify_wait(adapter); 1606 if (!status) { 1607 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 1608 if (link_speed) { 1609 *link_speed = resp->link_speed ? 1610 le16_to_cpu(resp->link_speed) * 10 : 1611 be_mac_to_link_speed(resp->mac_speed); 1612 1613 if (!resp->logical_link_status) 1614 *link_speed = 0; 1615 } 1616 if (link_status) 1617 *link_status = resp->logical_link_status; 1618 } 1619 1620 err: 1621 spin_unlock_bh(&adapter->mcc_lock); 1622 return status; 1623 } 1624 1625 /* Uses synchronous mcc */ 1626 int be_cmd_get_die_temperature(struct be_adapter *adapter) 1627 { 1628 struct be_mcc_wrb *wrb; 1629 struct be_cmd_req_get_cntl_addnl_attribs *req; 1630 int status = 0; 1631 1632 spin_lock_bh(&adapter->mcc_lock); 1633 1634 wrb = wrb_from_mccq(adapter); 1635 if (!wrb) { 1636 status = -EBUSY; 1637 goto err; 1638 } 1639 req = embedded_payload(wrb); 1640 1641 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1642 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, 1643 sizeof(*req), wrb, NULL); 1644 1645 be_mcc_notify(adapter); 1646 1647 err: 1648 spin_unlock_bh(&adapter->mcc_lock); 1649 return status; 1650 } 1651 1652 /* Uses synchronous mcc */ 1653 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) 1654 { 1655 struct be_mcc_wrb *wrb; 1656 struct be_cmd_req_get_fat *req; 1657 int status; 1658 1659 spin_lock_bh(&adapter->mcc_lock); 1660 1661 wrb = wrb_from_mccq(adapter); 1662 if (!wrb) { 1663 status = -EBUSY; 1664 goto err; 1665 } 1666 req = embedded_payload(wrb); 1667 1668 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1669 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, 1670 NULL); 1671 req->fat_operation = cpu_to_le32(QUERY_FAT); 1672 status = be_mcc_notify_wait(adapter); 1673 if (!status) { 1674 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); 1675 if (log_size && resp->log_size) 1676 *log_size = le32_to_cpu(resp->log_size) - 1677 sizeof(u32); 1678 } 1679 err: 1680 spin_unlock_bh(&adapter->mcc_lock); 1681 return status; 1682 } 1683 1684 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) 1685 { 1686 struct be_dma_mem get_fat_cmd; 1687 struct be_mcc_wrb *wrb; 1688 struct be_cmd_req_get_fat *req; 1689 u32 offset = 0, total_size, buf_size, 1690 log_offset = sizeof(u32), payload_len; 1691 int status; 1692 1693 if (buf_len == 0) 1694 return; 1695 1696 total_size = buf_len; 1697 1698 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1699 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, 1700 get_fat_cmd.size, 1701 &get_fat_cmd.dma); 1702 if (!get_fat_cmd.va) { 1703 status = -ENOMEM; 1704 dev_err(&adapter->pdev->dev, 1705 "Memory allocation failure while retrieving FAT data\n"); 1706 return; 1707 } 1708 1709 spin_lock_bh(&adapter->mcc_lock); 1710 1711 while (total_size) { 1712 buf_size = min(total_size, (u32)60*1024); 1713 total_size -= buf_size; 1714 1715 wrb = wrb_from_mccq(adapter); 1716 if (!wrb) { 1717 status = -EBUSY; 1718 goto err; 1719 } 1720 req = get_fat_cmd.va; 1721 1722 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; 1723 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1724 OPCODE_COMMON_MANAGE_FAT, payload_len, 1725 wrb, &get_fat_cmd); 1726 1727 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); 1728 req->read_log_offset = cpu_to_le32(log_offset); 1729 req->read_log_length = cpu_to_le32(buf_size); 1730 req->data_buffer_size = cpu_to_le32(buf_size); 1731 1732 status = be_mcc_notify_wait(adapter); 1733 if (!status) { 1734 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1735 memcpy(buf + offset, 1736 resp->data_buffer, 1737 le32_to_cpu(resp->read_log_length)); 1738 } else { 1739 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); 1740 goto err; 1741 } 1742 offset += buf_size; 1743 log_offset += buf_size; 1744 } 1745 err: 1746 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1747 get_fat_cmd.va, get_fat_cmd.dma); 1748 spin_unlock_bh(&adapter->mcc_lock); 1749 } 1750 1751 /* Uses synchronous mcc */ 1752 int be_cmd_get_fw_ver(struct be_adapter *adapter) 1753 { 1754 struct be_mcc_wrb *wrb; 1755 struct be_cmd_req_get_fw_version *req; 1756 int status; 1757 1758 spin_lock_bh(&adapter->mcc_lock); 1759 1760 wrb = wrb_from_mccq(adapter); 1761 if (!wrb) { 1762 status = -EBUSY; 1763 goto err; 1764 } 1765 1766 req = embedded_payload(wrb); 1767 1768 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1769 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, 1770 NULL); 1771 status = be_mcc_notify_wait(adapter); 1772 if (!status) { 1773 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); 1774 strcpy(adapter->fw_ver, resp->firmware_version_string); 1775 strcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string); 1776 } 1777 err: 1778 spin_unlock_bh(&adapter->mcc_lock); 1779 return status; 1780 } 1781 1782 /* set the EQ delay interval of an EQ to specified value 1783 * Uses async mcc 1784 */ 1785 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, 1786 int num) 1787 { 1788 struct be_mcc_wrb *wrb; 1789 struct be_cmd_req_modify_eq_delay *req; 1790 int status = 0, i; 1791 1792 spin_lock_bh(&adapter->mcc_lock); 1793 1794 wrb = wrb_from_mccq(adapter); 1795 if (!wrb) { 1796 status = -EBUSY; 1797 goto err; 1798 } 1799 req = embedded_payload(wrb); 1800 1801 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1802 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, 1803 NULL); 1804 1805 req->num_eq = cpu_to_le32(num); 1806 for (i = 0; i < num; i++) { 1807 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); 1808 req->set_eqd[i].phase = 0; 1809 req->set_eqd[i].delay_multiplier = 1810 cpu_to_le32(set_eqd[i].delay_multiplier); 1811 } 1812 1813 be_mcc_notify(adapter); 1814 err: 1815 spin_unlock_bh(&adapter->mcc_lock); 1816 return status; 1817 } 1818 1819 /* Uses sycnhronous mcc */ 1820 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1821 u32 num) 1822 { 1823 struct be_mcc_wrb *wrb; 1824 struct be_cmd_req_vlan_config *req; 1825 int status; 1826 1827 spin_lock_bh(&adapter->mcc_lock); 1828 1829 wrb = wrb_from_mccq(adapter); 1830 if (!wrb) { 1831 status = -EBUSY; 1832 goto err; 1833 } 1834 req = embedded_payload(wrb); 1835 1836 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1837 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), 1838 wrb, NULL); 1839 1840 req->interface_id = if_id; 1841 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1842 req->num_vlan = num; 1843 memcpy(req->normal_vlan, vtag_array, 1844 req->num_vlan * sizeof(vtag_array[0])); 1845 1846 status = be_mcc_notify_wait(adapter); 1847 err: 1848 spin_unlock_bh(&adapter->mcc_lock); 1849 return status; 1850 } 1851 1852 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1853 { 1854 struct be_mcc_wrb *wrb; 1855 struct be_dma_mem *mem = &adapter->rx_filter; 1856 struct be_cmd_req_rx_filter *req = mem->va; 1857 int status; 1858 1859 spin_lock_bh(&adapter->mcc_lock); 1860 1861 wrb = wrb_from_mccq(adapter); 1862 if (!wrb) { 1863 status = -EBUSY; 1864 goto err; 1865 } 1866 memset(req, 0, sizeof(*req)); 1867 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1868 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), 1869 wrb, mem); 1870 1871 req->if_id = cpu_to_le32(adapter->if_handle); 1872 if (flags & IFF_PROMISC) { 1873 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1874 BE_IF_FLAGS_VLAN_PROMISCUOUS | 1875 BE_IF_FLAGS_MCAST_PROMISCUOUS); 1876 if (value == ON) 1877 req->if_flags = 1878 cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1879 BE_IF_FLAGS_VLAN_PROMISCUOUS | 1880 BE_IF_FLAGS_MCAST_PROMISCUOUS); 1881 } else if (flags & IFF_ALLMULTI) { 1882 req->if_flags_mask = req->if_flags = 1883 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1884 } else if (flags & BE_FLAGS_VLAN_PROMISC) { 1885 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS); 1886 1887 if (value == ON) 1888 req->if_flags = 1889 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS); 1890 } else { 1891 struct netdev_hw_addr *ha; 1892 int i = 0; 1893 1894 req->if_flags_mask = req->if_flags = 1895 cpu_to_le32(BE_IF_FLAGS_MULTICAST); 1896 1897 /* Reset mcast promisc mode if already set by setting mask 1898 * and not setting flags field 1899 */ 1900 req->if_flags_mask |= 1901 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS & 1902 be_if_cap_flags(adapter)); 1903 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); 1904 netdev_for_each_mc_addr(ha, adapter->netdev) 1905 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1906 } 1907 1908 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) != 1909 req->if_flags_mask) { 1910 dev_warn(&adapter->pdev->dev, 1911 "Cannot set rx filter flags 0x%x\n", 1912 req->if_flags_mask); 1913 dev_warn(&adapter->pdev->dev, 1914 "Interface is capable of 0x%x flags only\n", 1915 be_if_cap_flags(adapter)); 1916 } 1917 req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter)); 1918 1919 status = be_mcc_notify_wait(adapter); 1920 1921 err: 1922 spin_unlock_bh(&adapter->mcc_lock); 1923 return status; 1924 } 1925 1926 /* Uses synchrounous mcc */ 1927 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) 1928 { 1929 struct be_mcc_wrb *wrb; 1930 struct be_cmd_req_set_flow_control *req; 1931 int status; 1932 1933 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL, 1934 CMD_SUBSYSTEM_COMMON)) 1935 return -EPERM; 1936 1937 spin_lock_bh(&adapter->mcc_lock); 1938 1939 wrb = wrb_from_mccq(adapter); 1940 if (!wrb) { 1941 status = -EBUSY; 1942 goto err; 1943 } 1944 req = embedded_payload(wrb); 1945 1946 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1947 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), 1948 wrb, NULL); 1949 1950 req->tx_flow_control = cpu_to_le16((u16)tx_fc); 1951 req->rx_flow_control = cpu_to_le16((u16)rx_fc); 1952 1953 status = be_mcc_notify_wait(adapter); 1954 1955 err: 1956 spin_unlock_bh(&adapter->mcc_lock); 1957 return status; 1958 } 1959 1960 /* Uses sycn mcc */ 1961 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) 1962 { 1963 struct be_mcc_wrb *wrb; 1964 struct be_cmd_req_get_flow_control *req; 1965 int status; 1966 1967 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL, 1968 CMD_SUBSYSTEM_COMMON)) 1969 return -EPERM; 1970 1971 spin_lock_bh(&adapter->mcc_lock); 1972 1973 wrb = wrb_from_mccq(adapter); 1974 if (!wrb) { 1975 status = -EBUSY; 1976 goto err; 1977 } 1978 req = embedded_payload(wrb); 1979 1980 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1981 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), 1982 wrb, NULL); 1983 1984 status = be_mcc_notify_wait(adapter); 1985 if (!status) { 1986 struct be_cmd_resp_get_flow_control *resp = 1987 embedded_payload(wrb); 1988 *tx_fc = le16_to_cpu(resp->tx_flow_control); 1989 *rx_fc = le16_to_cpu(resp->rx_flow_control); 1990 } 1991 1992 err: 1993 spin_unlock_bh(&adapter->mcc_lock); 1994 return status; 1995 } 1996 1997 /* Uses mbox */ 1998 int be_cmd_query_fw_cfg(struct be_adapter *adapter) 1999 { 2000 struct be_mcc_wrb *wrb; 2001 struct be_cmd_req_query_fw_cfg *req; 2002 int status; 2003 2004 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2005 return -1; 2006 2007 wrb = wrb_from_mbox(adapter); 2008 req = embedded_payload(wrb); 2009 2010 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2011 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, 2012 sizeof(*req), wrb, NULL); 2013 2014 status = be_mbox_notify_wait(adapter); 2015 if (!status) { 2016 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 2017 adapter->port_num = le32_to_cpu(resp->phys_port); 2018 adapter->function_mode = le32_to_cpu(resp->function_mode); 2019 adapter->function_caps = le32_to_cpu(resp->function_caps); 2020 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF; 2021 } 2022 2023 mutex_unlock(&adapter->mbox_lock); 2024 return status; 2025 } 2026 2027 /* Uses mbox */ 2028 int be_cmd_reset_function(struct be_adapter *adapter) 2029 { 2030 struct be_mcc_wrb *wrb; 2031 struct be_cmd_req_hdr *req; 2032 int status; 2033 2034 if (lancer_chip(adapter)) { 2035 status = lancer_wait_ready(adapter); 2036 if (!status) { 2037 iowrite32(SLI_PORT_CONTROL_IP_MASK, 2038 adapter->db + SLIPORT_CONTROL_OFFSET); 2039 status = lancer_test_and_set_rdy_state(adapter); 2040 } 2041 if (status) { 2042 dev_err(&adapter->pdev->dev, 2043 "Adapter in non recoverable error\n"); 2044 } 2045 return status; 2046 } 2047 2048 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2049 return -1; 2050 2051 wrb = wrb_from_mbox(adapter); 2052 req = embedded_payload(wrb); 2053 2054 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, 2055 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, 2056 NULL); 2057 2058 status = be_mbox_notify_wait(adapter); 2059 2060 mutex_unlock(&adapter->mbox_lock); 2061 return status; 2062 } 2063 2064 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 2065 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey) 2066 { 2067 struct be_mcc_wrb *wrb; 2068 struct be_cmd_req_rss_config *req; 2069 int status; 2070 2071 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) 2072 return 0; 2073 2074 spin_lock_bh(&adapter->mcc_lock); 2075 2076 wrb = wrb_from_mccq(adapter); 2077 if (!wrb) { 2078 status = -EBUSY; 2079 goto err; 2080 } 2081 req = embedded_payload(wrb); 2082 2083 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2084 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 2085 2086 req->if_id = cpu_to_le32(adapter->if_handle); 2087 req->enable_rss = cpu_to_le16(rss_hash_opts); 2088 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 2089 2090 if (!BEx_chip(adapter)) 2091 req->hdr.version = 1; 2092 2093 memcpy(req->cpu_table, rsstable, table_size); 2094 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN); 2095 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 2096 2097 status = be_mcc_notify_wait(adapter); 2098 err: 2099 spin_unlock_bh(&adapter->mcc_lock); 2100 return status; 2101 } 2102 2103 /* Uses sync mcc */ 2104 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 2105 u8 bcn, u8 sts, u8 state) 2106 { 2107 struct be_mcc_wrb *wrb; 2108 struct be_cmd_req_enable_disable_beacon *req; 2109 int status; 2110 2111 spin_lock_bh(&adapter->mcc_lock); 2112 2113 wrb = wrb_from_mccq(adapter); 2114 if (!wrb) { 2115 status = -EBUSY; 2116 goto err; 2117 } 2118 req = embedded_payload(wrb); 2119 2120 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2121 OPCODE_COMMON_ENABLE_DISABLE_BEACON, 2122 sizeof(*req), wrb, NULL); 2123 2124 req->port_num = port_num; 2125 req->beacon_state = state; 2126 req->beacon_duration = bcn; 2127 req->status_duration = sts; 2128 2129 status = be_mcc_notify_wait(adapter); 2130 2131 err: 2132 spin_unlock_bh(&adapter->mcc_lock); 2133 return status; 2134 } 2135 2136 /* Uses sync mcc */ 2137 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) 2138 { 2139 struct be_mcc_wrb *wrb; 2140 struct be_cmd_req_get_beacon_state *req; 2141 int status; 2142 2143 spin_lock_bh(&adapter->mcc_lock); 2144 2145 wrb = wrb_from_mccq(adapter); 2146 if (!wrb) { 2147 status = -EBUSY; 2148 goto err; 2149 } 2150 req = embedded_payload(wrb); 2151 2152 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2153 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), 2154 wrb, NULL); 2155 2156 req->port_num = port_num; 2157 2158 status = be_mcc_notify_wait(adapter); 2159 if (!status) { 2160 struct be_cmd_resp_get_beacon_state *resp = 2161 embedded_payload(wrb); 2162 *state = resp->beacon_state; 2163 } 2164 2165 err: 2166 spin_unlock_bh(&adapter->mcc_lock); 2167 return status; 2168 } 2169 2170 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2171 u32 data_size, u32 data_offset, 2172 const char *obj_name, u32 *data_written, 2173 u8 *change_status, u8 *addn_status) 2174 { 2175 struct be_mcc_wrb *wrb; 2176 struct lancer_cmd_req_write_object *req; 2177 struct lancer_cmd_resp_write_object *resp; 2178 void *ctxt = NULL; 2179 int status; 2180 2181 spin_lock_bh(&adapter->mcc_lock); 2182 adapter->flash_status = 0; 2183 2184 wrb = wrb_from_mccq(adapter); 2185 if (!wrb) { 2186 status = -EBUSY; 2187 goto err_unlock; 2188 } 2189 2190 req = embedded_payload(wrb); 2191 2192 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2193 OPCODE_COMMON_WRITE_OBJECT, 2194 sizeof(struct lancer_cmd_req_write_object), wrb, 2195 NULL); 2196 2197 ctxt = &req->context; 2198 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2199 write_length, ctxt, data_size); 2200 2201 if (data_size == 0) 2202 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2203 eof, ctxt, 1); 2204 else 2205 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2206 eof, ctxt, 0); 2207 2208 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 2209 req->write_offset = cpu_to_le32(data_offset); 2210 strcpy(req->object_name, obj_name); 2211 req->descriptor_count = cpu_to_le32(1); 2212 req->buf_len = cpu_to_le32(data_size); 2213 req->addr_low = cpu_to_le32((cmd->dma + 2214 sizeof(struct lancer_cmd_req_write_object)) 2215 & 0xFFFFFFFF); 2216 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + 2217 sizeof(struct lancer_cmd_req_write_object))); 2218 2219 be_mcc_notify(adapter); 2220 spin_unlock_bh(&adapter->mcc_lock); 2221 2222 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2223 msecs_to_jiffies(60000))) 2224 status = -ETIMEDOUT; 2225 else 2226 status = adapter->flash_status; 2227 2228 resp = embedded_payload(wrb); 2229 if (!status) { 2230 *data_written = le32_to_cpu(resp->actual_write_len); 2231 *change_status = resp->change_status; 2232 } else { 2233 *addn_status = resp->additional_status; 2234 } 2235 2236 return status; 2237 2238 err_unlock: 2239 spin_unlock_bh(&adapter->mcc_lock); 2240 return status; 2241 } 2242 2243 int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name) 2244 { 2245 struct lancer_cmd_req_delete_object *req; 2246 struct be_mcc_wrb *wrb; 2247 int status; 2248 2249 spin_lock_bh(&adapter->mcc_lock); 2250 2251 wrb = wrb_from_mccq(adapter); 2252 if (!wrb) { 2253 status = -EBUSY; 2254 goto err; 2255 } 2256 2257 req = embedded_payload(wrb); 2258 2259 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2260 OPCODE_COMMON_DELETE_OBJECT, 2261 sizeof(*req), wrb, NULL); 2262 2263 strcpy(req->object_name, obj_name); 2264 2265 status = be_mcc_notify_wait(adapter); 2266 err: 2267 spin_unlock_bh(&adapter->mcc_lock); 2268 return status; 2269 } 2270 2271 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2272 u32 data_size, u32 data_offset, const char *obj_name, 2273 u32 *data_read, u32 *eof, u8 *addn_status) 2274 { 2275 struct be_mcc_wrb *wrb; 2276 struct lancer_cmd_req_read_object *req; 2277 struct lancer_cmd_resp_read_object *resp; 2278 int status; 2279 2280 spin_lock_bh(&adapter->mcc_lock); 2281 2282 wrb = wrb_from_mccq(adapter); 2283 if (!wrb) { 2284 status = -EBUSY; 2285 goto err_unlock; 2286 } 2287 2288 req = embedded_payload(wrb); 2289 2290 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2291 OPCODE_COMMON_READ_OBJECT, 2292 sizeof(struct lancer_cmd_req_read_object), wrb, 2293 NULL); 2294 2295 req->desired_read_len = cpu_to_le32(data_size); 2296 req->read_offset = cpu_to_le32(data_offset); 2297 strcpy(req->object_name, obj_name); 2298 req->descriptor_count = cpu_to_le32(1); 2299 req->buf_len = cpu_to_le32(data_size); 2300 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); 2301 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); 2302 2303 status = be_mcc_notify_wait(adapter); 2304 2305 resp = embedded_payload(wrb); 2306 if (!status) { 2307 *data_read = le32_to_cpu(resp->actual_read_len); 2308 *eof = le32_to_cpu(resp->eof); 2309 } else { 2310 *addn_status = resp->additional_status; 2311 } 2312 2313 err_unlock: 2314 spin_unlock_bh(&adapter->mcc_lock); 2315 return status; 2316 } 2317 2318 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 2319 u32 flash_type, u32 flash_opcode, u32 buf_size) 2320 { 2321 struct be_mcc_wrb *wrb; 2322 struct be_cmd_write_flashrom *req; 2323 int status; 2324 2325 spin_lock_bh(&adapter->mcc_lock); 2326 adapter->flash_status = 0; 2327 2328 wrb = wrb_from_mccq(adapter); 2329 if (!wrb) { 2330 status = -EBUSY; 2331 goto err_unlock; 2332 } 2333 req = cmd->va; 2334 2335 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2336 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, 2337 cmd); 2338 2339 req->params.op_type = cpu_to_le32(flash_type); 2340 req->params.op_code = cpu_to_le32(flash_opcode); 2341 req->params.data_buf_size = cpu_to_le32(buf_size); 2342 2343 be_mcc_notify(adapter); 2344 spin_unlock_bh(&adapter->mcc_lock); 2345 2346 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2347 msecs_to_jiffies(40000))) 2348 status = -ETIMEDOUT; 2349 else 2350 status = adapter->flash_status; 2351 2352 return status; 2353 2354 err_unlock: 2355 spin_unlock_bh(&adapter->mcc_lock); 2356 return status; 2357 } 2358 2359 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2360 u16 optype, int offset) 2361 { 2362 struct be_mcc_wrb *wrb; 2363 struct be_cmd_read_flash_crc *req; 2364 int status; 2365 2366 spin_lock_bh(&adapter->mcc_lock); 2367 2368 wrb = wrb_from_mccq(adapter); 2369 if (!wrb) { 2370 status = -EBUSY; 2371 goto err; 2372 } 2373 req = embedded_payload(wrb); 2374 2375 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2376 OPCODE_COMMON_READ_FLASHROM, sizeof(*req), 2377 wrb, NULL); 2378 2379 req->params.op_type = cpu_to_le32(optype); 2380 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2381 req->params.offset = cpu_to_le32(offset); 2382 req->params.data_buf_size = cpu_to_le32(0x4); 2383 2384 status = be_mcc_notify_wait(adapter); 2385 if (!status) 2386 memcpy(flashed_crc, req->crc, 4); 2387 2388 err: 2389 spin_unlock_bh(&adapter->mcc_lock); 2390 return status; 2391 } 2392 2393 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2394 struct be_dma_mem *nonemb_cmd) 2395 { 2396 struct be_mcc_wrb *wrb; 2397 struct be_cmd_req_acpi_wol_magic_config *req; 2398 int status; 2399 2400 spin_lock_bh(&adapter->mcc_lock); 2401 2402 wrb = wrb_from_mccq(adapter); 2403 if (!wrb) { 2404 status = -EBUSY; 2405 goto err; 2406 } 2407 req = nonemb_cmd->va; 2408 2409 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2410 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), 2411 wrb, nonemb_cmd); 2412 memcpy(req->magic_mac, mac, ETH_ALEN); 2413 2414 status = be_mcc_notify_wait(adapter); 2415 2416 err: 2417 spin_unlock_bh(&adapter->mcc_lock); 2418 return status; 2419 } 2420 2421 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 2422 u8 loopback_type, u8 enable) 2423 { 2424 struct be_mcc_wrb *wrb; 2425 struct be_cmd_req_set_lmode *req; 2426 int status; 2427 2428 spin_lock_bh(&adapter->mcc_lock); 2429 2430 wrb = wrb_from_mccq(adapter); 2431 if (!wrb) { 2432 status = -EBUSY; 2433 goto err; 2434 } 2435 2436 req = embedded_payload(wrb); 2437 2438 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2439 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), 2440 wrb, NULL); 2441 2442 req->src_port = port_num; 2443 req->dest_port = port_num; 2444 req->loopback_type = loopback_type; 2445 req->loopback_state = enable; 2446 2447 status = be_mcc_notify_wait(adapter); 2448 err: 2449 spin_unlock_bh(&adapter->mcc_lock); 2450 return status; 2451 } 2452 2453 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 2454 u32 loopback_type, u32 pkt_size, u32 num_pkts, 2455 u64 pattern) 2456 { 2457 struct be_mcc_wrb *wrb; 2458 struct be_cmd_req_loopback_test *req; 2459 struct be_cmd_resp_loopback_test *resp; 2460 int status; 2461 2462 spin_lock_bh(&adapter->mcc_lock); 2463 2464 wrb = wrb_from_mccq(adapter); 2465 if (!wrb) { 2466 status = -EBUSY; 2467 goto err; 2468 } 2469 2470 req = embedded_payload(wrb); 2471 2472 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2473 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, 2474 NULL); 2475 2476 req->hdr.timeout = cpu_to_le32(15); 2477 req->pattern = cpu_to_le64(pattern); 2478 req->src_port = cpu_to_le32(port_num); 2479 req->dest_port = cpu_to_le32(port_num); 2480 req->pkt_size = cpu_to_le32(pkt_size); 2481 req->num_pkts = cpu_to_le32(num_pkts); 2482 req->loopback_type = cpu_to_le32(loopback_type); 2483 2484 be_mcc_notify(adapter); 2485 2486 spin_unlock_bh(&adapter->mcc_lock); 2487 2488 wait_for_completion(&adapter->et_cmd_compl); 2489 resp = embedded_payload(wrb); 2490 status = le32_to_cpu(resp->status); 2491 2492 return status; 2493 err: 2494 spin_unlock_bh(&adapter->mcc_lock); 2495 return status; 2496 } 2497 2498 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 2499 u32 byte_cnt, struct be_dma_mem *cmd) 2500 { 2501 struct be_mcc_wrb *wrb; 2502 struct be_cmd_req_ddrdma_test *req; 2503 int status; 2504 int i, j = 0; 2505 2506 spin_lock_bh(&adapter->mcc_lock); 2507 2508 wrb = wrb_from_mccq(adapter); 2509 if (!wrb) { 2510 status = -EBUSY; 2511 goto err; 2512 } 2513 req = cmd->va; 2514 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2515 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, 2516 cmd); 2517 2518 req->pattern = cpu_to_le64(pattern); 2519 req->byte_count = cpu_to_le32(byte_cnt); 2520 for (i = 0; i < byte_cnt; i++) { 2521 req->snd_buff[i] = (u8)(pattern >> (j*8)); 2522 j++; 2523 if (j > 7) 2524 j = 0; 2525 } 2526 2527 status = be_mcc_notify_wait(adapter); 2528 2529 if (!status) { 2530 struct be_cmd_resp_ddrdma_test *resp; 2531 resp = cmd->va; 2532 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || 2533 resp->snd_err) { 2534 status = -1; 2535 } 2536 } 2537 2538 err: 2539 spin_unlock_bh(&adapter->mcc_lock); 2540 return status; 2541 } 2542 2543 int be_cmd_get_seeprom_data(struct be_adapter *adapter, 2544 struct be_dma_mem *nonemb_cmd) 2545 { 2546 struct be_mcc_wrb *wrb; 2547 struct be_cmd_req_seeprom_read *req; 2548 int status; 2549 2550 spin_lock_bh(&adapter->mcc_lock); 2551 2552 wrb = wrb_from_mccq(adapter); 2553 if (!wrb) { 2554 status = -EBUSY; 2555 goto err; 2556 } 2557 req = nonemb_cmd->va; 2558 2559 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2560 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 2561 nonemb_cmd); 2562 2563 status = be_mcc_notify_wait(adapter); 2564 2565 err: 2566 spin_unlock_bh(&adapter->mcc_lock); 2567 return status; 2568 } 2569 2570 int be_cmd_get_phy_info(struct be_adapter *adapter) 2571 { 2572 struct be_mcc_wrb *wrb; 2573 struct be_cmd_req_get_phy_info *req; 2574 struct be_dma_mem cmd; 2575 int status; 2576 2577 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS, 2578 CMD_SUBSYSTEM_COMMON)) 2579 return -EPERM; 2580 2581 spin_lock_bh(&adapter->mcc_lock); 2582 2583 wrb = wrb_from_mccq(adapter); 2584 if (!wrb) { 2585 status = -EBUSY; 2586 goto err; 2587 } 2588 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2589 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2590 if (!cmd.va) { 2591 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2592 status = -ENOMEM; 2593 goto err; 2594 } 2595 2596 req = cmd.va; 2597 2598 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2599 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), 2600 wrb, &cmd); 2601 2602 status = be_mcc_notify_wait(adapter); 2603 if (!status) { 2604 struct be_phy_info *resp_phy_info = 2605 cmd.va + sizeof(struct be_cmd_req_hdr); 2606 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type); 2607 adapter->phy.interface_type = 2608 le16_to_cpu(resp_phy_info->interface_type); 2609 adapter->phy.auto_speeds_supported = 2610 le16_to_cpu(resp_phy_info->auto_speeds_supported); 2611 adapter->phy.fixed_speeds_supported = 2612 le16_to_cpu(resp_phy_info->fixed_speeds_supported); 2613 adapter->phy.misc_params = 2614 le32_to_cpu(resp_phy_info->misc_params); 2615 2616 if (BE2_chip(adapter)) { 2617 adapter->phy.fixed_speeds_supported = 2618 BE_SUPPORTED_SPEED_10GBPS | 2619 BE_SUPPORTED_SPEED_1GBPS; 2620 } 2621 } 2622 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2623 err: 2624 spin_unlock_bh(&adapter->mcc_lock); 2625 return status; 2626 } 2627 2628 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) 2629 { 2630 struct be_mcc_wrb *wrb; 2631 struct be_cmd_req_set_qos *req; 2632 int status; 2633 2634 spin_lock_bh(&adapter->mcc_lock); 2635 2636 wrb = wrb_from_mccq(adapter); 2637 if (!wrb) { 2638 status = -EBUSY; 2639 goto err; 2640 } 2641 2642 req = embedded_payload(wrb); 2643 2644 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2645 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); 2646 2647 req->hdr.domain = domain; 2648 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); 2649 req->max_bps_nic = cpu_to_le32(bps); 2650 2651 status = be_mcc_notify_wait(adapter); 2652 2653 err: 2654 spin_unlock_bh(&adapter->mcc_lock); 2655 return status; 2656 } 2657 2658 int be_cmd_get_cntl_attributes(struct be_adapter *adapter) 2659 { 2660 struct be_mcc_wrb *wrb; 2661 struct be_cmd_req_cntl_attribs *req; 2662 struct be_cmd_resp_cntl_attribs *resp; 2663 int status; 2664 int payload_len = max(sizeof(*req), sizeof(*resp)); 2665 struct mgmt_controller_attrib *attribs; 2666 struct be_dma_mem attribs_cmd; 2667 2668 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2669 return -1; 2670 2671 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2672 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2673 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2674 &attribs_cmd.dma); 2675 if (!attribs_cmd.va) { 2676 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 2677 status = -ENOMEM; 2678 goto err; 2679 } 2680 2681 wrb = wrb_from_mbox(adapter); 2682 if (!wrb) { 2683 status = -EBUSY; 2684 goto err; 2685 } 2686 req = attribs_cmd.va; 2687 2688 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2689 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, 2690 wrb, &attribs_cmd); 2691 2692 status = be_mbox_notify_wait(adapter); 2693 if (!status) { 2694 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); 2695 adapter->hba_port_num = attribs->hba_attribs.phy_port; 2696 } 2697 2698 err: 2699 mutex_unlock(&adapter->mbox_lock); 2700 if (attribs_cmd.va) 2701 pci_free_consistent(adapter->pdev, attribs_cmd.size, 2702 attribs_cmd.va, attribs_cmd.dma); 2703 return status; 2704 } 2705 2706 /* Uses mbox */ 2707 int be_cmd_req_native_mode(struct be_adapter *adapter) 2708 { 2709 struct be_mcc_wrb *wrb; 2710 struct be_cmd_req_set_func_cap *req; 2711 int status; 2712 2713 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2714 return -1; 2715 2716 wrb = wrb_from_mbox(adapter); 2717 if (!wrb) { 2718 status = -EBUSY; 2719 goto err; 2720 } 2721 2722 req = embedded_payload(wrb); 2723 2724 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2725 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, 2726 sizeof(*req), wrb, NULL); 2727 2728 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | 2729 CAPABILITY_BE3_NATIVE_ERX_API); 2730 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); 2731 2732 status = be_mbox_notify_wait(adapter); 2733 if (!status) { 2734 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); 2735 adapter->be3_native = le32_to_cpu(resp->cap_flags) & 2736 CAPABILITY_BE3_NATIVE_ERX_API; 2737 if (!adapter->be3_native) 2738 dev_warn(&adapter->pdev->dev, 2739 "adapter not in advanced mode\n"); 2740 } 2741 err: 2742 mutex_unlock(&adapter->mbox_lock); 2743 return status; 2744 } 2745 2746 /* Get privilege(s) for a function */ 2747 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, 2748 u32 domain) 2749 { 2750 struct be_mcc_wrb *wrb; 2751 struct be_cmd_req_get_fn_privileges *req; 2752 int status; 2753 2754 spin_lock_bh(&adapter->mcc_lock); 2755 2756 wrb = wrb_from_mccq(adapter); 2757 if (!wrb) { 2758 status = -EBUSY; 2759 goto err; 2760 } 2761 2762 req = embedded_payload(wrb); 2763 2764 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2765 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req), 2766 wrb, NULL); 2767 2768 req->hdr.domain = domain; 2769 2770 status = be_mcc_notify_wait(adapter); 2771 if (!status) { 2772 struct be_cmd_resp_get_fn_privileges *resp = 2773 embedded_payload(wrb); 2774 *privilege = le32_to_cpu(resp->privilege_mask); 2775 2776 /* In UMC mode FW does not return right privileges. 2777 * Override with correct privilege equivalent to PF. 2778 */ 2779 if (BEx_chip(adapter) && be_is_mc(adapter) && 2780 be_physfn(adapter)) 2781 *privilege = MAX_PRIVILEGES; 2782 } 2783 2784 err: 2785 spin_unlock_bh(&adapter->mcc_lock); 2786 return status; 2787 } 2788 2789 /* Set privilege(s) for a function */ 2790 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, 2791 u32 domain) 2792 { 2793 struct be_mcc_wrb *wrb; 2794 struct be_cmd_req_set_fn_privileges *req; 2795 int status; 2796 2797 spin_lock_bh(&adapter->mcc_lock); 2798 2799 wrb = wrb_from_mccq(adapter); 2800 if (!wrb) { 2801 status = -EBUSY; 2802 goto err; 2803 } 2804 2805 req = embedded_payload(wrb); 2806 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2807 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req), 2808 wrb, NULL); 2809 req->hdr.domain = domain; 2810 if (lancer_chip(adapter)) 2811 req->privileges_lancer = cpu_to_le32(privileges); 2812 else 2813 req->privileges = cpu_to_le32(privileges); 2814 2815 status = be_mcc_notify_wait(adapter); 2816 err: 2817 spin_unlock_bh(&adapter->mcc_lock); 2818 return status; 2819 } 2820 2821 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested. 2822 * pmac_id_valid: false => pmac_id or MAC address is requested. 2823 * If pmac_id is returned, pmac_id_valid is returned as true 2824 */ 2825 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 2826 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle, 2827 u8 domain) 2828 { 2829 struct be_mcc_wrb *wrb; 2830 struct be_cmd_req_get_mac_list *req; 2831 int status; 2832 int mac_count; 2833 struct be_dma_mem get_mac_list_cmd; 2834 int i; 2835 2836 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 2837 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 2838 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, 2839 get_mac_list_cmd.size, 2840 &get_mac_list_cmd.dma); 2841 2842 if (!get_mac_list_cmd.va) { 2843 dev_err(&adapter->pdev->dev, 2844 "Memory allocation failure during GET_MAC_LIST\n"); 2845 return -ENOMEM; 2846 } 2847 2848 spin_lock_bh(&adapter->mcc_lock); 2849 2850 wrb = wrb_from_mccq(adapter); 2851 if (!wrb) { 2852 status = -EBUSY; 2853 goto out; 2854 } 2855 2856 req = get_mac_list_cmd.va; 2857 2858 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2859 OPCODE_COMMON_GET_MAC_LIST, 2860 get_mac_list_cmd.size, wrb, &get_mac_list_cmd); 2861 req->hdr.domain = domain; 2862 req->mac_type = MAC_ADDRESS_TYPE_NETWORK; 2863 if (*pmac_id_valid) { 2864 req->mac_id = cpu_to_le32(*pmac_id); 2865 req->iface_id = cpu_to_le16(if_handle); 2866 req->perm_override = 0; 2867 } else { 2868 req->perm_override = 1; 2869 } 2870 2871 status = be_mcc_notify_wait(adapter); 2872 if (!status) { 2873 struct be_cmd_resp_get_mac_list *resp = 2874 get_mac_list_cmd.va; 2875 2876 if (*pmac_id_valid) { 2877 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr, 2878 ETH_ALEN); 2879 goto out; 2880 } 2881 2882 mac_count = resp->true_mac_count + resp->pseudo_mac_count; 2883 /* Mac list returned could contain one or more active mac_ids 2884 * or one or more true or pseudo permanant mac addresses. 2885 * If an active mac_id is present, return first active mac_id 2886 * found. 2887 */ 2888 for (i = 0; i < mac_count; i++) { 2889 struct get_list_macaddr *mac_entry; 2890 u16 mac_addr_size; 2891 u32 mac_id; 2892 2893 mac_entry = &resp->macaddr_list[i]; 2894 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size); 2895 /* mac_id is a 32 bit value and mac_addr size 2896 * is 6 bytes 2897 */ 2898 if (mac_addr_size == sizeof(u32)) { 2899 *pmac_id_valid = true; 2900 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; 2901 *pmac_id = le32_to_cpu(mac_id); 2902 goto out; 2903 } 2904 } 2905 /* If no active mac_id found, return first mac addr */ 2906 *pmac_id_valid = false; 2907 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 2908 ETH_ALEN); 2909 } 2910 2911 out: 2912 spin_unlock_bh(&adapter->mcc_lock); 2913 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, 2914 get_mac_list_cmd.va, get_mac_list_cmd.dma); 2915 return status; 2916 } 2917 2918 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, 2919 u8 *mac, u32 if_handle, bool active, u32 domain) 2920 { 2921 2922 if (!active) 2923 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id, 2924 if_handle, domain); 2925 if (BEx_chip(adapter)) 2926 return be_cmd_mac_addr_query(adapter, mac, false, 2927 if_handle, curr_pmac_id); 2928 else 2929 /* Fetch the MAC address using pmac_id */ 2930 return be_cmd_get_mac_from_list(adapter, mac, &active, 2931 &curr_pmac_id, 2932 if_handle, domain); 2933 } 2934 2935 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) 2936 { 2937 int status; 2938 bool pmac_valid = false; 2939 2940 memset(mac, 0, ETH_ALEN); 2941 2942 if (BEx_chip(adapter)) { 2943 if (be_physfn(adapter)) 2944 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 2945 0); 2946 else 2947 status = be_cmd_mac_addr_query(adapter, mac, false, 2948 adapter->if_handle, 0); 2949 } else { 2950 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid, 2951 NULL, adapter->if_handle, 0); 2952 } 2953 2954 return status; 2955 } 2956 2957 /* Uses synchronous MCCQ */ 2958 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 2959 u8 mac_count, u32 domain) 2960 { 2961 struct be_mcc_wrb *wrb; 2962 struct be_cmd_req_set_mac_list *req; 2963 int status; 2964 struct be_dma_mem cmd; 2965 2966 memset(&cmd, 0, sizeof(struct be_dma_mem)); 2967 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 2968 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 2969 &cmd.dma, GFP_KERNEL); 2970 if (!cmd.va) 2971 return -ENOMEM; 2972 2973 spin_lock_bh(&adapter->mcc_lock); 2974 2975 wrb = wrb_from_mccq(adapter); 2976 if (!wrb) { 2977 status = -EBUSY; 2978 goto err; 2979 } 2980 2981 req = cmd.va; 2982 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2983 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), 2984 wrb, &cmd); 2985 2986 req->hdr.domain = domain; 2987 req->mac_count = mac_count; 2988 if (mac_count) 2989 memcpy(req->mac, mac_array, ETH_ALEN*mac_count); 2990 2991 status = be_mcc_notify_wait(adapter); 2992 2993 err: 2994 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 2995 spin_unlock_bh(&adapter->mcc_lock); 2996 return status; 2997 } 2998 2999 /* Wrapper to delete any active MACs and provision the new mac. 3000 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the 3001 * current list are active. 3002 */ 3003 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom) 3004 { 3005 bool active_mac = false; 3006 u8 old_mac[ETH_ALEN]; 3007 u32 pmac_id; 3008 int status; 3009 3010 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, 3011 &pmac_id, if_id, dom); 3012 3013 if (!status && active_mac) 3014 be_cmd_pmac_del(adapter, if_id, pmac_id, dom); 3015 3016 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom); 3017 } 3018 3019 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 3020 u32 domain, u16 intf_id, u16 hsw_mode) 3021 { 3022 struct be_mcc_wrb *wrb; 3023 struct be_cmd_req_set_hsw_config *req; 3024 void *ctxt; 3025 int status; 3026 3027 spin_lock_bh(&adapter->mcc_lock); 3028 3029 wrb = wrb_from_mccq(adapter); 3030 if (!wrb) { 3031 status = -EBUSY; 3032 goto err; 3033 } 3034 3035 req = embedded_payload(wrb); 3036 ctxt = &req->context; 3037 3038 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3039 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, 3040 NULL); 3041 3042 req->hdr.domain = domain; 3043 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); 3044 if (pvid) { 3045 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); 3046 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); 3047 } 3048 if (!BEx_chip(adapter) && hsw_mode) { 3049 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, 3050 ctxt, adapter->hba_port_num); 3051 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); 3052 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type, 3053 ctxt, hsw_mode); 3054 } 3055 3056 be_dws_cpu_to_le(req->context, sizeof(req->context)); 3057 status = be_mcc_notify_wait(adapter); 3058 3059 err: 3060 spin_unlock_bh(&adapter->mcc_lock); 3061 return status; 3062 } 3063 3064 /* Get Hyper switch config */ 3065 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 3066 u32 domain, u16 intf_id, u8 *mode) 3067 { 3068 struct be_mcc_wrb *wrb; 3069 struct be_cmd_req_get_hsw_config *req; 3070 void *ctxt; 3071 int status; 3072 u16 vid; 3073 3074 spin_lock_bh(&adapter->mcc_lock); 3075 3076 wrb = wrb_from_mccq(adapter); 3077 if (!wrb) { 3078 status = -EBUSY; 3079 goto err; 3080 } 3081 3082 req = embedded_payload(wrb); 3083 ctxt = &req->context; 3084 3085 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3086 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, 3087 NULL); 3088 3089 req->hdr.domain = domain; 3090 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3091 ctxt, intf_id); 3092 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); 3093 3094 if (!BEx_chip(adapter) && mode) { 3095 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3096 ctxt, adapter->hba_port_num); 3097 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1); 3098 } 3099 be_dws_cpu_to_le(req->context, sizeof(req->context)); 3100 3101 status = be_mcc_notify_wait(adapter); 3102 if (!status) { 3103 struct be_cmd_resp_get_hsw_config *resp = 3104 embedded_payload(wrb); 3105 be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); 3106 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3107 pvid, &resp->context); 3108 if (pvid) 3109 *pvid = le16_to_cpu(vid); 3110 if (mode) 3111 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3112 port_fwd_type, &resp->context); 3113 } 3114 3115 err: 3116 spin_unlock_bh(&adapter->mcc_lock); 3117 return status; 3118 } 3119 3120 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) 3121 { 3122 struct be_mcc_wrb *wrb; 3123 struct be_cmd_req_acpi_wol_magic_config_v1 *req; 3124 int status = 0; 3125 struct be_dma_mem cmd; 3126 3127 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 3128 CMD_SUBSYSTEM_ETH)) 3129 return -EPERM; 3130 3131 if (be_is_wol_excluded(adapter)) 3132 return status; 3133 3134 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3135 return -1; 3136 3137 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3138 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 3139 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3140 if (!cmd.va) { 3141 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3142 status = -ENOMEM; 3143 goto err; 3144 } 3145 3146 wrb = wrb_from_mbox(adapter); 3147 if (!wrb) { 3148 status = -EBUSY; 3149 goto err; 3150 } 3151 3152 req = cmd.va; 3153 3154 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 3155 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 3156 sizeof(*req), wrb, &cmd); 3157 3158 req->hdr.version = 1; 3159 req->query_options = BE_GET_WOL_CAP; 3160 3161 status = be_mbox_notify_wait(adapter); 3162 if (!status) { 3163 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; 3164 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va; 3165 3166 adapter->wol_cap = resp->wol_settings; 3167 if (adapter->wol_cap & BE_WOL_CAP) 3168 adapter->wol_en = true; 3169 } 3170 err: 3171 mutex_unlock(&adapter->mbox_lock); 3172 if (cmd.va) 3173 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3174 return status; 3175 3176 } 3177 3178 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) 3179 { 3180 struct be_dma_mem extfat_cmd; 3181 struct be_fat_conf_params *cfgs; 3182 int status; 3183 int i, j; 3184 3185 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3186 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3187 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3188 &extfat_cmd.dma); 3189 if (!extfat_cmd.va) 3190 return -ENOMEM; 3191 3192 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); 3193 if (status) 3194 goto err; 3195 3196 cfgs = (struct be_fat_conf_params *) 3197 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); 3198 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { 3199 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); 3200 for (j = 0; j < num_modes; j++) { 3201 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) 3202 cfgs->module[i].trace_lvl[j].dbg_lvl = 3203 cpu_to_le32(level); 3204 } 3205 } 3206 3207 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); 3208 err: 3209 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, 3210 extfat_cmd.dma); 3211 return status; 3212 } 3213 3214 int be_cmd_get_fw_log_level(struct be_adapter *adapter) 3215 { 3216 struct be_dma_mem extfat_cmd; 3217 struct be_fat_conf_params *cfgs; 3218 int status, j; 3219 int level = 0; 3220 3221 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3222 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3223 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3224 &extfat_cmd.dma); 3225 3226 if (!extfat_cmd.va) { 3227 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", 3228 __func__); 3229 goto err; 3230 } 3231 3232 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); 3233 if (!status) { 3234 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + 3235 sizeof(struct be_cmd_resp_hdr)); 3236 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { 3237 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) 3238 level = cfgs->module[0].trace_lvl[j].dbg_lvl; 3239 } 3240 } 3241 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, 3242 extfat_cmd.dma); 3243 err: 3244 return level; 3245 } 3246 3247 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, 3248 struct be_dma_mem *cmd) 3249 { 3250 struct be_mcc_wrb *wrb; 3251 struct be_cmd_req_get_ext_fat_caps *req; 3252 int status; 3253 3254 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3255 return -1; 3256 3257 wrb = wrb_from_mbox(adapter); 3258 if (!wrb) { 3259 status = -EBUSY; 3260 goto err; 3261 } 3262 3263 req = cmd->va; 3264 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3265 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES, 3266 cmd->size, wrb, cmd); 3267 req->parameter_type = cpu_to_le32(1); 3268 3269 status = be_mbox_notify_wait(adapter); 3270 err: 3271 mutex_unlock(&adapter->mbox_lock); 3272 return status; 3273 } 3274 3275 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, 3276 struct be_dma_mem *cmd, 3277 struct be_fat_conf_params *configs) 3278 { 3279 struct be_mcc_wrb *wrb; 3280 struct be_cmd_req_set_ext_fat_caps *req; 3281 int status; 3282 3283 spin_lock_bh(&adapter->mcc_lock); 3284 3285 wrb = wrb_from_mccq(adapter); 3286 if (!wrb) { 3287 status = -EBUSY; 3288 goto err; 3289 } 3290 3291 req = cmd->va; 3292 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params)); 3293 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3294 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES, 3295 cmd->size, wrb, cmd); 3296 3297 status = be_mcc_notify_wait(adapter); 3298 err: 3299 spin_unlock_bh(&adapter->mcc_lock); 3300 return status; 3301 } 3302 3303 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name) 3304 { 3305 struct be_mcc_wrb *wrb; 3306 struct be_cmd_req_get_port_name *req; 3307 int status; 3308 3309 if (!lancer_chip(adapter)) { 3310 *port_name = adapter->hba_port_num + '0'; 3311 return 0; 3312 } 3313 3314 spin_lock_bh(&adapter->mcc_lock); 3315 3316 wrb = wrb_from_mccq(adapter); 3317 if (!wrb) { 3318 status = -EBUSY; 3319 goto err; 3320 } 3321 3322 req = embedded_payload(wrb); 3323 3324 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3325 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb, 3326 NULL); 3327 req->hdr.version = 1; 3328 3329 status = be_mcc_notify_wait(adapter); 3330 if (!status) { 3331 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb); 3332 *port_name = resp->port_name[adapter->hba_port_num]; 3333 } else { 3334 *port_name = adapter->hba_port_num + '0'; 3335 } 3336 err: 3337 spin_unlock_bh(&adapter->mcc_lock); 3338 return status; 3339 } 3340 3341 /* Descriptor type */ 3342 enum { 3343 FUNC_DESC = 1, 3344 VFT_DESC = 2 3345 }; 3346 3347 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count, 3348 int desc_type) 3349 { 3350 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3351 struct be_nic_res_desc *nic; 3352 int i; 3353 3354 for (i = 0; i < desc_count; i++) { 3355 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || 3356 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) { 3357 nic = (struct be_nic_res_desc *)hdr; 3358 if (desc_type == FUNC_DESC || 3359 (desc_type == VFT_DESC && 3360 nic->flags & (1 << VFT_SHIFT))) 3361 return nic; 3362 } 3363 3364 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3365 hdr = (void *)hdr + hdr->desc_len; 3366 } 3367 return NULL; 3368 } 3369 3370 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count) 3371 { 3372 return be_get_nic_desc(buf, desc_count, VFT_DESC); 3373 } 3374 3375 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count) 3376 { 3377 return be_get_nic_desc(buf, desc_count, FUNC_DESC); 3378 } 3379 3380 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf, 3381 u32 desc_count) 3382 { 3383 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3384 struct be_pcie_res_desc *pcie; 3385 int i; 3386 3387 for (i = 0; i < desc_count; i++) { 3388 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || 3389 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) { 3390 pcie = (struct be_pcie_res_desc *)hdr; 3391 if (pcie->pf_num == devfn) 3392 return pcie; 3393 } 3394 3395 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3396 hdr = (void *)hdr + hdr->desc_len; 3397 } 3398 return NULL; 3399 } 3400 3401 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count) 3402 { 3403 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3404 int i; 3405 3406 for (i = 0; i < desc_count; i++) { 3407 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1) 3408 return (struct be_port_res_desc *)hdr; 3409 3410 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3411 hdr = (void *)hdr + hdr->desc_len; 3412 } 3413 return NULL; 3414 } 3415 3416 static void be_copy_nic_desc(struct be_resources *res, 3417 struct be_nic_res_desc *desc) 3418 { 3419 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count); 3420 res->max_vlans = le16_to_cpu(desc->vlan_count); 3421 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count); 3422 res->max_tx_qs = le16_to_cpu(desc->txq_count); 3423 res->max_rss_qs = le16_to_cpu(desc->rssq_count); 3424 res->max_rx_qs = le16_to_cpu(desc->rq_count); 3425 res->max_evt_qs = le16_to_cpu(desc->eq_count); 3426 /* Clear flags that driver is not interested in */ 3427 res->if_cap_flags = le32_to_cpu(desc->cap_flags) & 3428 BE_IF_CAP_FLAGS_WANT; 3429 /* Need 1 RXQ as the default RXQ */ 3430 if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs) 3431 res->max_rss_qs -= 1; 3432 } 3433 3434 /* Uses Mbox */ 3435 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) 3436 { 3437 struct be_mcc_wrb *wrb; 3438 struct be_cmd_req_get_func_config *req; 3439 int status; 3440 struct be_dma_mem cmd; 3441 3442 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3443 return -1; 3444 3445 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3446 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 3447 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3448 if (!cmd.va) { 3449 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3450 status = -ENOMEM; 3451 goto err; 3452 } 3453 3454 wrb = wrb_from_mbox(adapter); 3455 if (!wrb) { 3456 status = -EBUSY; 3457 goto err; 3458 } 3459 3460 req = cmd.va; 3461 3462 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3463 OPCODE_COMMON_GET_FUNC_CONFIG, 3464 cmd.size, wrb, &cmd); 3465 3466 if (skyhawk_chip(adapter)) 3467 req->hdr.version = 1; 3468 3469 status = be_mbox_notify_wait(adapter); 3470 if (!status) { 3471 struct be_cmd_resp_get_func_config *resp = cmd.va; 3472 u32 desc_count = le32_to_cpu(resp->desc_count); 3473 struct be_nic_res_desc *desc; 3474 3475 desc = be_get_func_nic_desc(resp->func_param, desc_count); 3476 if (!desc) { 3477 status = -EINVAL; 3478 goto err; 3479 } 3480 3481 adapter->pf_number = desc->pf_num; 3482 be_copy_nic_desc(res, desc); 3483 } 3484 err: 3485 mutex_unlock(&adapter->mbox_lock); 3486 if (cmd.va) 3487 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3488 return status; 3489 } 3490 3491 /* Will use MBOX only if MCCQ has not been created */ 3492 int be_cmd_get_profile_config(struct be_adapter *adapter, 3493 struct be_resources *res, u8 domain) 3494 { 3495 struct be_cmd_resp_get_profile_config *resp; 3496 struct be_cmd_req_get_profile_config *req; 3497 struct be_nic_res_desc *vf_res; 3498 struct be_pcie_res_desc *pcie; 3499 struct be_port_res_desc *port; 3500 struct be_nic_res_desc *nic; 3501 struct be_mcc_wrb wrb = {0}; 3502 struct be_dma_mem cmd; 3503 u32 desc_count; 3504 int status; 3505 3506 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3507 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 3508 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3509 if (!cmd.va) 3510 return -ENOMEM; 3511 3512 req = cmd.va; 3513 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3514 OPCODE_COMMON_GET_PROFILE_CONFIG, 3515 cmd.size, &wrb, &cmd); 3516 3517 req->hdr.domain = domain; 3518 if (!lancer_chip(adapter)) 3519 req->hdr.version = 1; 3520 req->type = ACTIVE_PROFILE_TYPE; 3521 3522 status = be_cmd_notify_wait(adapter, &wrb); 3523 if (status) 3524 goto err; 3525 3526 resp = cmd.va; 3527 desc_count = le32_to_cpu(resp->desc_count); 3528 3529 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, 3530 desc_count); 3531 if (pcie) 3532 res->max_vfs = le16_to_cpu(pcie->num_vfs); 3533 3534 port = be_get_port_desc(resp->func_param, desc_count); 3535 if (port) 3536 adapter->mc_type = port->mc_type; 3537 3538 nic = be_get_func_nic_desc(resp->func_param, desc_count); 3539 if (nic) 3540 be_copy_nic_desc(res, nic); 3541 3542 vf_res = be_get_vft_desc(resp->func_param, desc_count); 3543 if (vf_res) 3544 res->vf_if_cap_flags = vf_res->cap_flags; 3545 err: 3546 if (cmd.va) 3547 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3548 return status; 3549 } 3550 3551 /* Will use MBOX only if MCCQ has not been created */ 3552 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, 3553 int size, int count, u8 version, u8 domain) 3554 { 3555 struct be_cmd_req_set_profile_config *req; 3556 struct be_mcc_wrb wrb = {0}; 3557 struct be_dma_mem cmd; 3558 int status; 3559 3560 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3561 cmd.size = sizeof(struct be_cmd_req_set_profile_config); 3562 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3563 if (!cmd.va) 3564 return -ENOMEM; 3565 3566 req = cmd.va; 3567 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3568 OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size, 3569 &wrb, &cmd); 3570 req->hdr.version = version; 3571 req->hdr.domain = domain; 3572 req->desc_count = cpu_to_le32(count); 3573 memcpy(req->desc, desc, size); 3574 3575 status = be_cmd_notify_wait(adapter, &wrb); 3576 3577 if (cmd.va) 3578 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3579 return status; 3580 } 3581 3582 /* Mark all fields invalid */ 3583 static void be_reset_nic_desc(struct be_nic_res_desc *nic) 3584 { 3585 memset(nic, 0, sizeof(*nic)); 3586 nic->unicast_mac_count = 0xFFFF; 3587 nic->mcc_count = 0xFFFF; 3588 nic->vlan_count = 0xFFFF; 3589 nic->mcast_mac_count = 0xFFFF; 3590 nic->txq_count = 0xFFFF; 3591 nic->rq_count = 0xFFFF; 3592 nic->rssq_count = 0xFFFF; 3593 nic->lro_count = 0xFFFF; 3594 nic->cq_count = 0xFFFF; 3595 nic->toe_conn_count = 0xFFFF; 3596 nic->eq_count = 0xFFFF; 3597 nic->iface_count = 0xFFFF; 3598 nic->link_param = 0xFF; 3599 nic->channel_id_param = cpu_to_le16(0xF000); 3600 nic->acpi_params = 0xFF; 3601 nic->wol_param = 0x0F; 3602 nic->tunnel_iface_count = 0xFFFF; 3603 nic->direct_tenant_iface_count = 0xFFFF; 3604 nic->bw_min = 0xFFFFFFFF; 3605 nic->bw_max = 0xFFFFFFFF; 3606 } 3607 3608 /* Mark all fields invalid */ 3609 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie) 3610 { 3611 memset(pcie, 0, sizeof(*pcie)); 3612 pcie->sriov_state = 0xFF; 3613 pcie->pf_state = 0xFF; 3614 pcie->pf_type = 0xFF; 3615 pcie->num_vfs = 0xFFFF; 3616 } 3617 3618 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, 3619 u8 domain) 3620 { 3621 struct be_nic_res_desc nic_desc; 3622 u32 bw_percent; 3623 u16 version = 0; 3624 3625 if (BE3_chip(adapter)) 3626 return be_cmd_set_qos(adapter, max_rate / 10, domain); 3627 3628 be_reset_nic_desc(&nic_desc); 3629 nic_desc.pf_num = adapter->pf_number; 3630 nic_desc.vf_num = domain; 3631 if (lancer_chip(adapter)) { 3632 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 3633 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; 3634 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) | 3635 (1 << NOSV_SHIFT); 3636 nic_desc.bw_max = cpu_to_le32(max_rate / 10); 3637 } else { 3638 version = 1; 3639 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; 3640 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3641 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); 3642 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100; 3643 nic_desc.bw_max = cpu_to_le32(bw_percent); 3644 } 3645 3646 return be_cmd_set_profile_config(adapter, &nic_desc, 3647 nic_desc.hdr.desc_len, 3648 1, version, domain); 3649 } 3650 3651 int be_cmd_set_sriov_config(struct be_adapter *adapter, 3652 struct be_resources res, u16 num_vfs) 3653 { 3654 struct { 3655 struct be_pcie_res_desc pcie; 3656 struct be_nic_res_desc nic_vft; 3657 } __packed desc; 3658 u16 vf_q_count; 3659 3660 if (BEx_chip(adapter) || lancer_chip(adapter)) 3661 return 0; 3662 3663 /* PF PCIE descriptor */ 3664 be_reset_pcie_desc(&desc.pcie); 3665 desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1; 3666 desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3667 desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); 3668 desc.pcie.pf_num = adapter->pdev->devfn; 3669 desc.pcie.sriov_state = num_vfs ? 1 : 0; 3670 desc.pcie.num_vfs = cpu_to_le16(num_vfs); 3671 3672 /* VF NIC Template descriptor */ 3673 be_reset_nic_desc(&desc.nic_vft); 3674 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; 3675 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3676 desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) | 3677 (1 << NOSV_SHIFT); 3678 desc.nic_vft.pf_num = adapter->pdev->devfn; 3679 desc.nic_vft.vf_num = 0; 3680 3681 if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) { 3682 /* If number of VFs requested is 8 less than max supported, 3683 * assign 8 queue pairs to the PF and divide the remaining 3684 * resources evenly among the VFs 3685 */ 3686 if (num_vfs < (be_max_vfs(adapter) - 8)) 3687 vf_q_count = (res.max_rss_qs - 8) / num_vfs; 3688 else 3689 vf_q_count = res.max_rss_qs / num_vfs; 3690 3691 desc.nic_vft.rq_count = cpu_to_le16(vf_q_count); 3692 desc.nic_vft.txq_count = cpu_to_le16(vf_q_count); 3693 desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1); 3694 desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count); 3695 } else { 3696 desc.nic_vft.txq_count = cpu_to_le16(1); 3697 desc.nic_vft.rq_count = cpu_to_le16(1); 3698 desc.nic_vft.rssq_count = cpu_to_le16(0); 3699 /* One CQ for each TX, RX and MCCQ */ 3700 desc.nic_vft.cq_count = cpu_to_le16(3); 3701 } 3702 3703 return be_cmd_set_profile_config(adapter, &desc, 3704 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0); 3705 } 3706 3707 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) 3708 { 3709 struct be_mcc_wrb *wrb; 3710 struct be_cmd_req_manage_iface_filters *req; 3711 int status; 3712 3713 if (iface == 0xFFFFFFFF) 3714 return -1; 3715 3716 spin_lock_bh(&adapter->mcc_lock); 3717 3718 wrb = wrb_from_mccq(adapter); 3719 if (!wrb) { 3720 status = -EBUSY; 3721 goto err; 3722 } 3723 req = embedded_payload(wrb); 3724 3725 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3726 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req), 3727 wrb, NULL); 3728 req->op = op; 3729 req->target_iface_id = cpu_to_le32(iface); 3730 3731 status = be_mcc_notify_wait(adapter); 3732 err: 3733 spin_unlock_bh(&adapter->mcc_lock); 3734 return status; 3735 } 3736 3737 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port) 3738 { 3739 struct be_port_res_desc port_desc; 3740 3741 memset(&port_desc, 0, sizeof(port_desc)); 3742 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1; 3743 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3744 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); 3745 port_desc.link_num = adapter->hba_port_num; 3746 if (port) { 3747 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) | 3748 (1 << RCVID_SHIFT); 3749 port_desc.nv_port = swab16(port); 3750 } else { 3751 port_desc.nv_flags = NV_TYPE_DISABLED; 3752 port_desc.nv_port = 0; 3753 } 3754 3755 return be_cmd_set_profile_config(adapter, &port_desc, 3756 RESOURCE_DESC_SIZE_V1, 1, 1, 0); 3757 } 3758 3759 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, 3760 int vf_num) 3761 { 3762 struct be_mcc_wrb *wrb; 3763 struct be_cmd_req_get_iface_list *req; 3764 struct be_cmd_resp_get_iface_list *resp; 3765 int status; 3766 3767 spin_lock_bh(&adapter->mcc_lock); 3768 3769 wrb = wrb_from_mccq(adapter); 3770 if (!wrb) { 3771 status = -EBUSY; 3772 goto err; 3773 } 3774 req = embedded_payload(wrb); 3775 3776 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3777 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp), 3778 wrb, NULL); 3779 req->hdr.domain = vf_num + 1; 3780 3781 status = be_mcc_notify_wait(adapter); 3782 if (!status) { 3783 resp = (struct be_cmd_resp_get_iface_list *)req; 3784 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id); 3785 } 3786 3787 err: 3788 spin_unlock_bh(&adapter->mcc_lock); 3789 return status; 3790 } 3791 3792 static int lancer_wait_idle(struct be_adapter *adapter) 3793 { 3794 #define SLIPORT_IDLE_TIMEOUT 30 3795 u32 reg_val; 3796 int status = 0, i; 3797 3798 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) { 3799 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET); 3800 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0) 3801 break; 3802 3803 ssleep(1); 3804 } 3805 3806 if (i == SLIPORT_IDLE_TIMEOUT) 3807 status = -1; 3808 3809 return status; 3810 } 3811 3812 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask) 3813 { 3814 int status = 0; 3815 3816 status = lancer_wait_idle(adapter); 3817 if (status) 3818 return status; 3819 3820 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET); 3821 3822 return status; 3823 } 3824 3825 /* Routine to check whether dump image is present or not */ 3826 bool dump_present(struct be_adapter *adapter) 3827 { 3828 u32 sliport_status = 0; 3829 3830 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 3831 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK); 3832 } 3833 3834 int lancer_initiate_dump(struct be_adapter *adapter) 3835 { 3836 struct device *dev = &adapter->pdev->dev; 3837 int status; 3838 3839 if (dump_present(adapter)) { 3840 dev_info(dev, "Previous dump not cleared, not forcing dump\n"); 3841 return -EEXIST; 3842 } 3843 3844 /* give firmware reset and diagnostic dump */ 3845 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK | 3846 PHYSDEV_CONTROL_DD_MASK); 3847 if (status < 0) { 3848 dev_err(dev, "FW reset failed\n"); 3849 return status; 3850 } 3851 3852 status = lancer_wait_idle(adapter); 3853 if (status) 3854 return status; 3855 3856 if (!dump_present(adapter)) { 3857 dev_err(dev, "FW dump not generated\n"); 3858 return -EIO; 3859 } 3860 3861 return 0; 3862 } 3863 3864 int lancer_delete_dump(struct be_adapter *adapter) 3865 { 3866 int status; 3867 3868 status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE); 3869 return be_cmd_status(status); 3870 } 3871 3872 /* Uses sync mcc */ 3873 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) 3874 { 3875 struct be_mcc_wrb *wrb; 3876 struct be_cmd_enable_disable_vf *req; 3877 int status; 3878 3879 if (BEx_chip(adapter)) 3880 return 0; 3881 3882 spin_lock_bh(&adapter->mcc_lock); 3883 3884 wrb = wrb_from_mccq(adapter); 3885 if (!wrb) { 3886 status = -EBUSY; 3887 goto err; 3888 } 3889 3890 req = embedded_payload(wrb); 3891 3892 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3893 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req), 3894 wrb, NULL); 3895 3896 req->hdr.domain = domain; 3897 req->enable = 1; 3898 status = be_mcc_notify_wait(adapter); 3899 err: 3900 spin_unlock_bh(&adapter->mcc_lock); 3901 return status; 3902 } 3903 3904 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable) 3905 { 3906 struct be_mcc_wrb *wrb; 3907 struct be_cmd_req_intr_set *req; 3908 int status; 3909 3910 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3911 return -1; 3912 3913 wrb = wrb_from_mbox(adapter); 3914 3915 req = embedded_payload(wrb); 3916 3917 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3918 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req), 3919 wrb, NULL); 3920 3921 req->intr_enabled = intr_enable; 3922 3923 status = be_mbox_notify_wait(adapter); 3924 3925 mutex_unlock(&adapter->mbox_lock); 3926 return status; 3927 } 3928 3929 /* Uses MBOX */ 3930 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id) 3931 { 3932 struct be_cmd_req_get_active_profile *req; 3933 struct be_mcc_wrb *wrb; 3934 int status; 3935 3936 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3937 return -1; 3938 3939 wrb = wrb_from_mbox(adapter); 3940 if (!wrb) { 3941 status = -EBUSY; 3942 goto err; 3943 } 3944 3945 req = embedded_payload(wrb); 3946 3947 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3948 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req), 3949 wrb, NULL); 3950 3951 status = be_mbox_notify_wait(adapter); 3952 if (!status) { 3953 struct be_cmd_resp_get_active_profile *resp = 3954 embedded_payload(wrb); 3955 *profile_id = le16_to_cpu(resp->active_profile_id); 3956 } 3957 3958 err: 3959 mutex_unlock(&adapter->mbox_lock); 3960 return status; 3961 } 3962 3963 int be_cmd_set_logical_link_config(struct be_adapter *adapter, 3964 int link_state, u8 domain) 3965 { 3966 struct be_mcc_wrb *wrb; 3967 struct be_cmd_req_set_ll_link *req; 3968 int status; 3969 3970 if (BEx_chip(adapter) || lancer_chip(adapter)) 3971 return 0; 3972 3973 spin_lock_bh(&adapter->mcc_lock); 3974 3975 wrb = wrb_from_mccq(adapter); 3976 if (!wrb) { 3977 status = -EBUSY; 3978 goto err; 3979 } 3980 3981 req = embedded_payload(wrb); 3982 3983 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3984 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG, 3985 sizeof(*req), wrb, NULL); 3986 3987 req->hdr.version = 1; 3988 req->hdr.domain = domain; 3989 3990 if (link_state == IFLA_VF_LINK_STATE_ENABLE) 3991 req->link_config |= 1; 3992 3993 if (link_state == IFLA_VF_LINK_STATE_AUTO) 3994 req->link_config |= 1 << PLINK_TRACK_SHIFT; 3995 3996 status = be_mcc_notify_wait(adapter); 3997 err: 3998 spin_unlock_bh(&adapter->mcc_lock); 3999 return status; 4000 } 4001 4002 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 4003 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 4004 { 4005 struct be_adapter *adapter = netdev_priv(netdev_handle); 4006 struct be_mcc_wrb *wrb; 4007 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload; 4008 struct be_cmd_req_hdr *req; 4009 struct be_cmd_resp_hdr *resp; 4010 int status; 4011 4012 spin_lock_bh(&adapter->mcc_lock); 4013 4014 wrb = wrb_from_mccq(adapter); 4015 if (!wrb) { 4016 status = -EBUSY; 4017 goto err; 4018 } 4019 req = embedded_payload(wrb); 4020 resp = embedded_payload(wrb); 4021 4022 be_wrb_cmd_hdr_prepare(req, hdr->subsystem, 4023 hdr->opcode, wrb_payload_size, wrb, NULL); 4024 memcpy(req, wrb_payload, wrb_payload_size); 4025 be_dws_cpu_to_le(req, wrb_payload_size); 4026 4027 status = be_mcc_notify_wait(adapter); 4028 if (cmd_status) 4029 *cmd_status = (status & 0xffff); 4030 if (ext_status) 4031 *ext_status = 0; 4032 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); 4033 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); 4034 err: 4035 spin_unlock_bh(&adapter->mcc_lock); 4036 return status; 4037 } 4038 EXPORT_SYMBOL(be_roce_mcc_cmd); 4039