1 /* 2 * Copyright (C) 2005 - 2014 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Contact Information: 11 * linux-drivers@emulex.com 12 * 13 * Emulex 14 * 3333 Susan Street 15 * Costa Mesa, CA 92626 16 */ 17 18 #include <linux/module.h> 19 #include "be.h" 20 #include "be_cmds.h" 21 22 static char *be_port_misconfig_evt_desc[] = { 23 "A valid SFP module detected", 24 "Optics faulted/ incorrectly installed/ not installed.", 25 "Optics of two types installed.", 26 "Incompatible optics.", 27 "Unknown port SFP status" 28 }; 29 30 static char *be_port_misconfig_remedy_desc[] = { 31 "", 32 "Reseat optics. If issue not resolved, replace", 33 "Remove one optic or install matching pair of optics", 34 "Replace with compatible optics for card to function", 35 "" 36 }; 37 38 static struct be_cmd_priv_map cmd_priv_map[] = { 39 { 40 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 41 CMD_SUBSYSTEM_ETH, 42 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 43 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 44 }, 45 { 46 OPCODE_COMMON_GET_FLOW_CONTROL, 47 CMD_SUBSYSTEM_COMMON, 48 BE_PRIV_LNKQUERY | BE_PRIV_VHADM | 49 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 50 }, 51 { 52 OPCODE_COMMON_SET_FLOW_CONTROL, 53 CMD_SUBSYSTEM_COMMON, 54 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 55 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 56 }, 57 { 58 OPCODE_ETH_GET_PPORT_STATS, 59 CMD_SUBSYSTEM_ETH, 60 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 61 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 62 }, 63 { 64 OPCODE_COMMON_GET_PHY_DETAILS, 65 CMD_SUBSYSTEM_COMMON, 66 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 67 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 68 } 69 }; 70 71 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) 72 { 73 int i; 74 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map); 75 u32 cmd_privileges = adapter->cmd_privileges; 76 77 for (i = 0; i < num_entries; i++) 78 if (opcode == cmd_priv_map[i].opcode && 79 subsystem == cmd_priv_map[i].subsystem) 80 if (!(cmd_privileges & cmd_priv_map[i].priv_mask)) 81 return false; 82 83 return true; 84 } 85 86 static inline void *embedded_payload(struct be_mcc_wrb *wrb) 87 { 88 return wrb->payload.embedded_payload; 89 } 90 91 static void be_mcc_notify(struct be_adapter *adapter) 92 { 93 struct be_queue_info *mccq = &adapter->mcc_obj.q; 94 u32 val = 0; 95 96 if (be_error(adapter)) 97 return; 98 99 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 100 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 101 102 wmb(); 103 iowrite32(val, adapter->db + DB_MCCQ_OFFSET); 104 } 105 106 /* To check if valid bit is set, check the entire word as we don't know 107 * the endianness of the data (old entry is host endian while a new entry is 108 * little endian) */ 109 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) 110 { 111 u32 flags; 112 113 if (compl->flags != 0) { 114 flags = le32_to_cpu(compl->flags); 115 if (flags & CQE_FLAGS_VALID_MASK) { 116 compl->flags = flags; 117 return true; 118 } 119 } 120 return false; 121 } 122 123 /* Need to reset the entire word that houses the valid bit */ 124 static inline void be_mcc_compl_use(struct be_mcc_compl *compl) 125 { 126 compl->flags = 0; 127 } 128 129 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1) 130 { 131 unsigned long addr; 132 133 addr = tag1; 134 addr = ((addr << 16) << 16) | tag0; 135 return (void *)addr; 136 } 137 138 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status) 139 { 140 if (base_status == MCC_STATUS_NOT_SUPPORTED || 141 base_status == MCC_STATUS_ILLEGAL_REQUEST || 142 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES || 143 (opcode == OPCODE_COMMON_WRITE_FLASHROM && 144 (base_status == MCC_STATUS_ILLEGAL_FIELD || 145 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH))) 146 return true; 147 else 148 return false; 149 } 150 151 /* Place holder for all the async MCC cmds wherein the caller is not in a busy 152 * loop (has not issued be_mcc_notify_wait()) 153 */ 154 static void be_async_cmd_process(struct be_adapter *adapter, 155 struct be_mcc_compl *compl, 156 struct be_cmd_resp_hdr *resp_hdr) 157 { 158 enum mcc_base_status base_status = base_status(compl->status); 159 u8 opcode = 0, subsystem = 0; 160 161 if (resp_hdr) { 162 opcode = resp_hdr->opcode; 163 subsystem = resp_hdr->subsystem; 164 } 165 166 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && 167 subsystem == CMD_SUBSYSTEM_LOWLEVEL) { 168 complete(&adapter->et_cmd_compl); 169 return; 170 } 171 172 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM || 173 opcode == OPCODE_COMMON_WRITE_OBJECT) && 174 subsystem == CMD_SUBSYSTEM_COMMON) { 175 adapter->flash_status = compl->status; 176 complete(&adapter->et_cmd_compl); 177 return; 178 } 179 180 if ((opcode == OPCODE_ETH_GET_STATISTICS || 181 opcode == OPCODE_ETH_GET_PPORT_STATS) && 182 subsystem == CMD_SUBSYSTEM_ETH && 183 base_status == MCC_STATUS_SUCCESS) { 184 be_parse_stats(adapter); 185 adapter->stats_cmd_sent = false; 186 return; 187 } 188 189 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && 190 subsystem == CMD_SUBSYSTEM_COMMON) { 191 if (base_status == MCC_STATUS_SUCCESS) { 192 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 193 (void *)resp_hdr; 194 adapter->drv_stats.be_on_die_temperature = 195 resp->on_die_temperature; 196 } else { 197 adapter->be_get_temp_freq = 0; 198 } 199 return; 200 } 201 } 202 203 static int be_mcc_compl_process(struct be_adapter *adapter, 204 struct be_mcc_compl *compl) 205 { 206 enum mcc_base_status base_status; 207 enum mcc_addl_status addl_status; 208 struct be_cmd_resp_hdr *resp_hdr; 209 u8 opcode = 0, subsystem = 0; 210 211 /* Just swap the status to host endian; mcc tag is opaquely copied 212 * from mcc_wrb */ 213 be_dws_le_to_cpu(compl, 4); 214 215 base_status = base_status(compl->status); 216 addl_status = addl_status(compl->status); 217 218 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); 219 if (resp_hdr) { 220 opcode = resp_hdr->opcode; 221 subsystem = resp_hdr->subsystem; 222 } 223 224 be_async_cmd_process(adapter, compl, resp_hdr); 225 226 if (base_status != MCC_STATUS_SUCCESS && 227 !be_skip_err_log(opcode, base_status, addl_status)) { 228 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 229 dev_warn(&adapter->pdev->dev, 230 "VF is not privileged to issue opcode %d-%d\n", 231 opcode, subsystem); 232 } else { 233 dev_err(&adapter->pdev->dev, 234 "opcode %d-%d failed:status %d-%d\n", 235 opcode, subsystem, base_status, addl_status); 236 } 237 } 238 return compl->status; 239 } 240 241 /* Link state evt is a string of bytes; no need for endian swapping */ 242 static void be_async_link_state_process(struct be_adapter *adapter, 243 struct be_mcc_compl *compl) 244 { 245 struct be_async_event_link_state *evt = 246 (struct be_async_event_link_state *)compl; 247 248 /* When link status changes, link speed must be re-queried from FW */ 249 adapter->phy.link_speed = -1; 250 251 /* On BEx the FW does not send a separate link status 252 * notification for physical and logical link. 253 * On other chips just process the logical link 254 * status notification 255 */ 256 if (!BEx_chip(adapter) && 257 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK)) 258 return; 259 260 /* For the initial link status do not rely on the ASYNC event as 261 * it may not be received in some cases. 262 */ 263 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) 264 be_link_status_update(adapter, 265 evt->port_link_status & LINK_STATUS_MASK); 266 } 267 268 static void be_async_port_misconfig_event_process(struct be_adapter *adapter, 269 struct be_mcc_compl *compl) 270 { 271 struct be_async_event_misconfig_port *evt = 272 (struct be_async_event_misconfig_port *)compl; 273 u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1); 274 struct device *dev = &adapter->pdev->dev; 275 u8 port_misconfig_evt; 276 277 port_misconfig_evt = 278 ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff); 279 280 /* Log an error message that would allow a user to determine 281 * whether the SFPs have an issue 282 */ 283 dev_info(dev, "Port %c: %s %s", adapter->port_name, 284 be_port_misconfig_evt_desc[port_misconfig_evt], 285 be_port_misconfig_remedy_desc[port_misconfig_evt]); 286 287 if (port_misconfig_evt == INCOMPATIBLE_SFP) 288 adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP; 289 } 290 291 /* Grp5 CoS Priority evt */ 292 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 293 struct be_mcc_compl *compl) 294 { 295 struct be_async_event_grp5_cos_priority *evt = 296 (struct be_async_event_grp5_cos_priority *)compl; 297 298 if (evt->valid) { 299 adapter->vlan_prio_bmap = evt->available_priority_bmap; 300 adapter->recommended_prio &= ~VLAN_PRIO_MASK; 301 adapter->recommended_prio = 302 evt->reco_default_priority << VLAN_PRIO_SHIFT; 303 } 304 } 305 306 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */ 307 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 308 struct be_mcc_compl *compl) 309 { 310 struct be_async_event_grp5_qos_link_speed *evt = 311 (struct be_async_event_grp5_qos_link_speed *)compl; 312 313 if (adapter->phy.link_speed >= 0 && 314 evt->physical_port == adapter->port_num) 315 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10; 316 } 317 318 /*Grp5 PVID evt*/ 319 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 320 struct be_mcc_compl *compl) 321 { 322 struct be_async_event_grp5_pvid_state *evt = 323 (struct be_async_event_grp5_pvid_state *)compl; 324 325 if (evt->enabled) { 326 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 327 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid); 328 } else { 329 adapter->pvid = 0; 330 } 331 } 332 333 static void be_async_grp5_evt_process(struct be_adapter *adapter, 334 struct be_mcc_compl *compl) 335 { 336 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) & 337 ASYNC_EVENT_TYPE_MASK; 338 339 switch (event_type) { 340 case ASYNC_EVENT_COS_PRIORITY: 341 be_async_grp5_cos_priority_process(adapter, compl); 342 break; 343 case ASYNC_EVENT_QOS_SPEED: 344 be_async_grp5_qos_speed_process(adapter, compl); 345 break; 346 case ASYNC_EVENT_PVID_STATE: 347 be_async_grp5_pvid_state_process(adapter, compl); 348 break; 349 default: 350 break; 351 } 352 } 353 354 static void be_async_dbg_evt_process(struct be_adapter *adapter, 355 struct be_mcc_compl *cmp) 356 { 357 u8 event_type = 0; 358 struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp; 359 360 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & 361 ASYNC_EVENT_TYPE_MASK; 362 363 switch (event_type) { 364 case ASYNC_DEBUG_EVENT_TYPE_QNQ: 365 if (evt->valid) 366 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag); 367 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD; 368 break; 369 default: 370 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n", 371 event_type); 372 break; 373 } 374 } 375 376 static void be_async_sliport_evt_process(struct be_adapter *adapter, 377 struct be_mcc_compl *cmp) 378 { 379 u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & 380 ASYNC_EVENT_TYPE_MASK; 381 382 if (event_type == ASYNC_EVENT_PORT_MISCONFIG) 383 be_async_port_misconfig_event_process(adapter, cmp); 384 } 385 386 static inline bool is_link_state_evt(u32 flags) 387 { 388 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 389 ASYNC_EVENT_CODE_LINK_STATE; 390 } 391 392 static inline bool is_grp5_evt(u32 flags) 393 { 394 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 395 ASYNC_EVENT_CODE_GRP_5; 396 } 397 398 static inline bool is_dbg_evt(u32 flags) 399 { 400 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 401 ASYNC_EVENT_CODE_QNQ; 402 } 403 404 static inline bool is_sliport_evt(u32 flags) 405 { 406 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 407 ASYNC_EVENT_CODE_SLIPORT; 408 } 409 410 static void be_mcc_event_process(struct be_adapter *adapter, 411 struct be_mcc_compl *compl) 412 { 413 if (is_link_state_evt(compl->flags)) 414 be_async_link_state_process(adapter, compl); 415 else if (is_grp5_evt(compl->flags)) 416 be_async_grp5_evt_process(adapter, compl); 417 else if (is_dbg_evt(compl->flags)) 418 be_async_dbg_evt_process(adapter, compl); 419 else if (is_sliport_evt(compl->flags)) 420 be_async_sliport_evt_process(adapter, compl); 421 } 422 423 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 424 { 425 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; 426 struct be_mcc_compl *compl = queue_tail_node(mcc_cq); 427 428 if (be_mcc_compl_is_new(compl)) { 429 queue_tail_inc(mcc_cq); 430 return compl; 431 } 432 return NULL; 433 } 434 435 void be_async_mcc_enable(struct be_adapter *adapter) 436 { 437 spin_lock_bh(&adapter->mcc_cq_lock); 438 439 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); 440 adapter->mcc_obj.rearm_cq = true; 441 442 spin_unlock_bh(&adapter->mcc_cq_lock); 443 } 444 445 void be_async_mcc_disable(struct be_adapter *adapter) 446 { 447 spin_lock_bh(&adapter->mcc_cq_lock); 448 449 adapter->mcc_obj.rearm_cq = false; 450 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); 451 452 spin_unlock_bh(&adapter->mcc_cq_lock); 453 } 454 455 int be_process_mcc(struct be_adapter *adapter) 456 { 457 struct be_mcc_compl *compl; 458 int num = 0, status = 0; 459 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 460 461 spin_lock(&adapter->mcc_cq_lock); 462 463 while ((compl = be_mcc_compl_get(adapter))) { 464 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 465 be_mcc_event_process(adapter, compl); 466 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 467 status = be_mcc_compl_process(adapter, compl); 468 atomic_dec(&mcc_obj->q.used); 469 } 470 be_mcc_compl_use(compl); 471 num++; 472 } 473 474 if (num) 475 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); 476 477 spin_unlock(&adapter->mcc_cq_lock); 478 return status; 479 } 480 481 /* Wait till no more pending mcc requests are present */ 482 static int be_mcc_wait_compl(struct be_adapter *adapter) 483 { 484 #define mcc_timeout 120000 /* 12s timeout */ 485 int i, status = 0; 486 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 487 488 for (i = 0; i < mcc_timeout; i++) { 489 if (be_error(adapter)) 490 return -EIO; 491 492 local_bh_disable(); 493 status = be_process_mcc(adapter); 494 local_bh_enable(); 495 496 if (atomic_read(&mcc_obj->q.used) == 0) 497 break; 498 udelay(100); 499 } 500 if (i == mcc_timeout) { 501 dev_err(&adapter->pdev->dev, "FW not responding\n"); 502 adapter->fw_timeout = true; 503 return -EIO; 504 } 505 return status; 506 } 507 508 /* Notify MCC requests and wait for completion */ 509 static int be_mcc_notify_wait(struct be_adapter *adapter) 510 { 511 int status; 512 struct be_mcc_wrb *wrb; 513 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 514 u16 index = mcc_obj->q.head; 515 struct be_cmd_resp_hdr *resp; 516 517 index_dec(&index, mcc_obj->q.len); 518 wrb = queue_index_node(&mcc_obj->q, index); 519 520 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1); 521 522 be_mcc_notify(adapter); 523 524 status = be_mcc_wait_compl(adapter); 525 if (status == -EIO) 526 goto out; 527 528 status = (resp->base_status | 529 ((resp->addl_status & CQE_ADDL_STATUS_MASK) << 530 CQE_ADDL_STATUS_SHIFT)); 531 out: 532 return status; 533 } 534 535 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) 536 { 537 int msecs = 0; 538 u32 ready; 539 540 do { 541 if (be_error(adapter)) 542 return -EIO; 543 544 ready = ioread32(db); 545 if (ready == 0xffffffff) 546 return -1; 547 548 ready &= MPU_MAILBOX_DB_RDY_MASK; 549 if (ready) 550 break; 551 552 if (msecs > 4000) { 553 dev_err(&adapter->pdev->dev, "FW not responding\n"); 554 adapter->fw_timeout = true; 555 be_detect_error(adapter); 556 return -1; 557 } 558 559 msleep(1); 560 msecs++; 561 } while (true); 562 563 return 0; 564 } 565 566 /* 567 * Insert the mailbox address into the doorbell in two steps 568 * Polls on the mbox doorbell till a command completion (or a timeout) occurs 569 */ 570 static int be_mbox_notify_wait(struct be_adapter *adapter) 571 { 572 int status; 573 u32 val = 0; 574 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; 575 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 576 struct be_mcc_mailbox *mbox = mbox_mem->va; 577 struct be_mcc_compl *compl = &mbox->compl; 578 579 /* wait for ready to be set */ 580 status = be_mbox_db_ready_wait(adapter, db); 581 if (status != 0) 582 return status; 583 584 val |= MPU_MAILBOX_DB_HI_MASK; 585 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ 586 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 587 iowrite32(val, db); 588 589 /* wait for ready to be set */ 590 status = be_mbox_db_ready_wait(adapter, db); 591 if (status != 0) 592 return status; 593 594 val = 0; 595 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ 596 val |= (u32)(mbox_mem->dma >> 4) << 2; 597 iowrite32(val, db); 598 599 status = be_mbox_db_ready_wait(adapter, db); 600 if (status != 0) 601 return status; 602 603 /* A cq entry has been made now */ 604 if (be_mcc_compl_is_new(compl)) { 605 status = be_mcc_compl_process(adapter, &mbox->compl); 606 be_mcc_compl_use(compl); 607 if (status) 608 return status; 609 } else { 610 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); 611 return -1; 612 } 613 return 0; 614 } 615 616 static u16 be_POST_stage_get(struct be_adapter *adapter) 617 { 618 u32 sem; 619 620 if (BEx_chip(adapter)) 621 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx); 622 else 623 pci_read_config_dword(adapter->pdev, 624 SLIPORT_SEMAPHORE_OFFSET_SH, &sem); 625 626 return sem & POST_STAGE_MASK; 627 } 628 629 static int lancer_wait_ready(struct be_adapter *adapter) 630 { 631 #define SLIPORT_READY_TIMEOUT 30 632 u32 sliport_status; 633 int i; 634 635 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { 636 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 637 if (sliport_status & SLIPORT_STATUS_RDY_MASK) 638 return 0; 639 640 if (sliport_status & SLIPORT_STATUS_ERR_MASK && 641 !(sliport_status & SLIPORT_STATUS_RN_MASK)) 642 return -EIO; 643 644 msleep(1000); 645 } 646 647 return sliport_status ? : -1; 648 } 649 650 int be_fw_wait_ready(struct be_adapter *adapter) 651 { 652 u16 stage; 653 int status, timeout = 0; 654 struct device *dev = &adapter->pdev->dev; 655 656 if (lancer_chip(adapter)) { 657 status = lancer_wait_ready(adapter); 658 if (status) { 659 stage = status; 660 goto err; 661 } 662 return 0; 663 } 664 665 do { 666 /* There's no means to poll POST state on BE2/3 VFs */ 667 if (BEx_chip(adapter) && be_virtfn(adapter)) 668 return 0; 669 670 stage = be_POST_stage_get(adapter); 671 if (stage == POST_STAGE_ARMFW_RDY) 672 return 0; 673 674 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout); 675 if (msleep_interruptible(2000)) { 676 dev_err(dev, "Waiting for POST aborted\n"); 677 return -EINTR; 678 } 679 timeout += 2; 680 } while (timeout < 60); 681 682 err: 683 dev_err(dev, "POST timeout; stage=%#x\n", stage); 684 return -ETIMEDOUT; 685 } 686 687 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) 688 { 689 return &wrb->payload.sgl[0]; 690 } 691 692 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr) 693 { 694 wrb->tag0 = addr & 0xFFFFFFFF; 695 wrb->tag1 = upper_32_bits(addr); 696 } 697 698 /* Don't touch the hdr after it's prepared */ 699 /* mem will be NULL for embedded commands */ 700 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 701 u8 subsystem, u8 opcode, int cmd_len, 702 struct be_mcc_wrb *wrb, 703 struct be_dma_mem *mem) 704 { 705 struct be_sge *sge; 706 707 req_hdr->opcode = opcode; 708 req_hdr->subsystem = subsystem; 709 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 710 req_hdr->version = 0; 711 fill_wrb_tags(wrb, (ulong) req_hdr); 712 wrb->payload_length = cmd_len; 713 if (mem) { 714 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << 715 MCC_WRB_SGE_CNT_SHIFT; 716 sge = nonembedded_sgl(wrb); 717 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); 718 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); 719 sge->len = cpu_to_le32(mem->size); 720 } else 721 wrb->embedded |= MCC_WRB_EMBEDDED_MASK; 722 be_dws_cpu_to_le(wrb, 8); 723 } 724 725 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 726 struct be_dma_mem *mem) 727 { 728 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); 729 u64 dma = (u64)mem->dma; 730 731 for (i = 0; i < buf_pages; i++) { 732 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); 733 pages[i].hi = cpu_to_le32(upper_32_bits(dma)); 734 dma += PAGE_SIZE_4K; 735 } 736 } 737 738 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) 739 { 740 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 741 struct be_mcc_wrb *wrb 742 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; 743 memset(wrb, 0, sizeof(*wrb)); 744 return wrb; 745 } 746 747 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) 748 { 749 struct be_queue_info *mccq = &adapter->mcc_obj.q; 750 struct be_mcc_wrb *wrb; 751 752 if (!mccq->created) 753 return NULL; 754 755 if (atomic_read(&mccq->used) >= mccq->len) 756 return NULL; 757 758 wrb = queue_head_node(mccq); 759 queue_head_inc(mccq); 760 atomic_inc(&mccq->used); 761 memset(wrb, 0, sizeof(*wrb)); 762 return wrb; 763 } 764 765 static bool use_mcc(struct be_adapter *adapter) 766 { 767 return adapter->mcc_obj.q.created; 768 } 769 770 /* Must be used only in process context */ 771 static int be_cmd_lock(struct be_adapter *adapter) 772 { 773 if (use_mcc(adapter)) { 774 spin_lock_bh(&adapter->mcc_lock); 775 return 0; 776 } else { 777 return mutex_lock_interruptible(&adapter->mbox_lock); 778 } 779 } 780 781 /* Must be used only in process context */ 782 static void be_cmd_unlock(struct be_adapter *adapter) 783 { 784 if (use_mcc(adapter)) 785 spin_unlock_bh(&adapter->mcc_lock); 786 else 787 return mutex_unlock(&adapter->mbox_lock); 788 } 789 790 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter, 791 struct be_mcc_wrb *wrb) 792 { 793 struct be_mcc_wrb *dest_wrb; 794 795 if (use_mcc(adapter)) { 796 dest_wrb = wrb_from_mccq(adapter); 797 if (!dest_wrb) 798 return NULL; 799 } else { 800 dest_wrb = wrb_from_mbox(adapter); 801 } 802 803 memcpy(dest_wrb, wrb, sizeof(*wrb)); 804 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK)) 805 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb)); 806 807 return dest_wrb; 808 } 809 810 /* Must be used only in process context */ 811 static int be_cmd_notify_wait(struct be_adapter *adapter, 812 struct be_mcc_wrb *wrb) 813 { 814 struct be_mcc_wrb *dest_wrb; 815 int status; 816 817 status = be_cmd_lock(adapter); 818 if (status) 819 return status; 820 821 dest_wrb = be_cmd_copy(adapter, wrb); 822 if (!dest_wrb) 823 return -EBUSY; 824 825 if (use_mcc(adapter)) 826 status = be_mcc_notify_wait(adapter); 827 else 828 status = be_mbox_notify_wait(adapter); 829 830 if (!status) 831 memcpy(wrb, dest_wrb, sizeof(*wrb)); 832 833 be_cmd_unlock(adapter); 834 return status; 835 } 836 837 /* Tell fw we're about to start firing cmds by writing a 838 * special pattern across the wrb hdr; uses mbox 839 */ 840 int be_cmd_fw_init(struct be_adapter *adapter) 841 { 842 u8 *wrb; 843 int status; 844 845 if (lancer_chip(adapter)) 846 return 0; 847 848 if (mutex_lock_interruptible(&adapter->mbox_lock)) 849 return -1; 850 851 wrb = (u8 *)wrb_from_mbox(adapter); 852 *wrb++ = 0xFF; 853 *wrb++ = 0x12; 854 *wrb++ = 0x34; 855 *wrb++ = 0xFF; 856 *wrb++ = 0xFF; 857 *wrb++ = 0x56; 858 *wrb++ = 0x78; 859 *wrb = 0xFF; 860 861 status = be_mbox_notify_wait(adapter); 862 863 mutex_unlock(&adapter->mbox_lock); 864 return status; 865 } 866 867 /* Tell fw we're done with firing cmds by writing a 868 * special pattern across the wrb hdr; uses mbox 869 */ 870 int be_cmd_fw_clean(struct be_adapter *adapter) 871 { 872 u8 *wrb; 873 int status; 874 875 if (lancer_chip(adapter)) 876 return 0; 877 878 if (mutex_lock_interruptible(&adapter->mbox_lock)) 879 return -1; 880 881 wrb = (u8 *)wrb_from_mbox(adapter); 882 *wrb++ = 0xFF; 883 *wrb++ = 0xAA; 884 *wrb++ = 0xBB; 885 *wrb++ = 0xFF; 886 *wrb++ = 0xFF; 887 *wrb++ = 0xCC; 888 *wrb++ = 0xDD; 889 *wrb = 0xFF; 890 891 status = be_mbox_notify_wait(adapter); 892 893 mutex_unlock(&adapter->mbox_lock); 894 return status; 895 } 896 897 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo) 898 { 899 struct be_mcc_wrb *wrb; 900 struct be_cmd_req_eq_create *req; 901 struct be_dma_mem *q_mem = &eqo->q.dma_mem; 902 int status, ver = 0; 903 904 if (mutex_lock_interruptible(&adapter->mbox_lock)) 905 return -1; 906 907 wrb = wrb_from_mbox(adapter); 908 req = embedded_payload(wrb); 909 910 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 911 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, 912 NULL); 913 914 /* Support for EQ_CREATEv2 available only SH-R onwards */ 915 if (!(BEx_chip(adapter) || lancer_chip(adapter))) 916 ver = 2; 917 918 req->hdr.version = ver; 919 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 920 921 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); 922 /* 4byte eqe*/ 923 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); 924 AMAP_SET_BITS(struct amap_eq_context, count, req->context, 925 __ilog2_u32(eqo->q.len / 256)); 926 be_dws_cpu_to_le(req->context, sizeof(req->context)); 927 928 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 929 930 status = be_mbox_notify_wait(adapter); 931 if (!status) { 932 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); 933 934 eqo->q.id = le16_to_cpu(resp->eq_id); 935 eqo->msix_idx = 936 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx; 937 eqo->q.created = true; 938 } 939 940 mutex_unlock(&adapter->mbox_lock); 941 return status; 942 } 943 944 /* Use MCC */ 945 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 946 bool permanent, u32 if_handle, u32 pmac_id) 947 { 948 struct be_mcc_wrb *wrb; 949 struct be_cmd_req_mac_query *req; 950 int status; 951 952 spin_lock_bh(&adapter->mcc_lock); 953 954 wrb = wrb_from_mccq(adapter); 955 if (!wrb) { 956 status = -EBUSY; 957 goto err; 958 } 959 req = embedded_payload(wrb); 960 961 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 962 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, 963 NULL); 964 req->type = MAC_ADDRESS_TYPE_NETWORK; 965 if (permanent) { 966 req->permanent = 1; 967 } else { 968 req->if_id = cpu_to_le16((u16)if_handle); 969 req->pmac_id = cpu_to_le32(pmac_id); 970 req->permanent = 0; 971 } 972 973 status = be_mcc_notify_wait(adapter); 974 if (!status) { 975 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); 976 977 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 978 } 979 980 err: 981 spin_unlock_bh(&adapter->mcc_lock); 982 return status; 983 } 984 985 /* Uses synchronous MCCQ */ 986 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 987 u32 if_id, u32 *pmac_id, u32 domain) 988 { 989 struct be_mcc_wrb *wrb; 990 struct be_cmd_req_pmac_add *req; 991 int status; 992 993 spin_lock_bh(&adapter->mcc_lock); 994 995 wrb = wrb_from_mccq(adapter); 996 if (!wrb) { 997 status = -EBUSY; 998 goto err; 999 } 1000 req = embedded_payload(wrb); 1001 1002 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1003 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, 1004 NULL); 1005 1006 req->hdr.domain = domain; 1007 req->if_id = cpu_to_le32(if_id); 1008 memcpy(req->mac_address, mac_addr, ETH_ALEN); 1009 1010 status = be_mcc_notify_wait(adapter); 1011 if (!status) { 1012 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); 1013 1014 *pmac_id = le32_to_cpu(resp->pmac_id); 1015 } 1016 1017 err: 1018 spin_unlock_bh(&adapter->mcc_lock); 1019 1020 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) 1021 status = -EPERM; 1022 1023 return status; 1024 } 1025 1026 /* Uses synchronous MCCQ */ 1027 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) 1028 { 1029 struct be_mcc_wrb *wrb; 1030 struct be_cmd_req_pmac_del *req; 1031 int status; 1032 1033 if (pmac_id == -1) 1034 return 0; 1035 1036 spin_lock_bh(&adapter->mcc_lock); 1037 1038 wrb = wrb_from_mccq(adapter); 1039 if (!wrb) { 1040 status = -EBUSY; 1041 goto err; 1042 } 1043 req = embedded_payload(wrb); 1044 1045 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1046 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), 1047 wrb, NULL); 1048 1049 req->hdr.domain = dom; 1050 req->if_id = cpu_to_le32(if_id); 1051 req->pmac_id = cpu_to_le32(pmac_id); 1052 1053 status = be_mcc_notify_wait(adapter); 1054 1055 err: 1056 spin_unlock_bh(&adapter->mcc_lock); 1057 return status; 1058 } 1059 1060 /* Uses Mbox */ 1061 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, 1062 struct be_queue_info *eq, bool no_delay, int coalesce_wm) 1063 { 1064 struct be_mcc_wrb *wrb; 1065 struct be_cmd_req_cq_create *req; 1066 struct be_dma_mem *q_mem = &cq->dma_mem; 1067 void *ctxt; 1068 int status; 1069 1070 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1071 return -1; 1072 1073 wrb = wrb_from_mbox(adapter); 1074 req = embedded_payload(wrb); 1075 ctxt = &req->context; 1076 1077 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1078 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, 1079 NULL); 1080 1081 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1082 1083 if (BEx_chip(adapter)) { 1084 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 1085 coalesce_wm); 1086 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 1087 ctxt, no_delay); 1088 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, 1089 __ilog2_u32(cq->len / 256)); 1090 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 1091 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 1092 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); 1093 } else { 1094 req->hdr.version = 2; 1095 req->page_size = 1; /* 1 for 4K */ 1096 1097 /* coalesce-wm field in this cmd is not relevant to Lancer. 1098 * Lancer uses COMMON_MODIFY_CQ to set this field 1099 */ 1100 if (!lancer_chip(adapter)) 1101 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, 1102 ctxt, coalesce_wm); 1103 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1104 no_delay); 1105 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, 1106 __ilog2_u32(cq->len / 256)); 1107 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); 1108 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); 1109 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); 1110 } 1111 1112 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1113 1114 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1115 1116 status = be_mbox_notify_wait(adapter); 1117 if (!status) { 1118 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); 1119 1120 cq->id = le16_to_cpu(resp->cq_id); 1121 cq->created = true; 1122 } 1123 1124 mutex_unlock(&adapter->mbox_lock); 1125 1126 return status; 1127 } 1128 1129 static u32 be_encoded_q_len(int q_len) 1130 { 1131 u32 len_encoded = fls(q_len); /* log2(len) + 1 */ 1132 1133 if (len_encoded == 16) 1134 len_encoded = 0; 1135 return len_encoded; 1136 } 1137 1138 static int be_cmd_mccq_ext_create(struct be_adapter *adapter, 1139 struct be_queue_info *mccq, 1140 struct be_queue_info *cq) 1141 { 1142 struct be_mcc_wrb *wrb; 1143 struct be_cmd_req_mcc_ext_create *req; 1144 struct be_dma_mem *q_mem = &mccq->dma_mem; 1145 void *ctxt; 1146 int status; 1147 1148 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1149 return -1; 1150 1151 wrb = wrb_from_mbox(adapter); 1152 req = embedded_payload(wrb); 1153 ctxt = &req->context; 1154 1155 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1156 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, 1157 NULL); 1158 1159 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1160 if (BEx_chip(adapter)) { 1161 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1162 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1163 be_encoded_q_len(mccq->len)); 1164 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1165 } else { 1166 req->hdr.version = 1; 1167 req->cq_id = cpu_to_le16(cq->id); 1168 1169 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt, 1170 be_encoded_q_len(mccq->len)); 1171 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1); 1172 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id, 1173 ctxt, cq->id); 1174 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid, 1175 ctxt, 1); 1176 } 1177 1178 /* Subscribe to Link State, Sliport Event and Group 5 Events 1179 * (bits 1, 5 and 17 set) 1180 */ 1181 req->async_event_bitmap[0] = 1182 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) | 1183 BIT(ASYNC_EVENT_CODE_GRP_5) | 1184 BIT(ASYNC_EVENT_CODE_QNQ) | 1185 BIT(ASYNC_EVENT_CODE_SLIPORT)); 1186 1187 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1188 1189 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1190 1191 status = be_mbox_notify_wait(adapter); 1192 if (!status) { 1193 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1194 1195 mccq->id = le16_to_cpu(resp->id); 1196 mccq->created = true; 1197 } 1198 mutex_unlock(&adapter->mbox_lock); 1199 1200 return status; 1201 } 1202 1203 static int be_cmd_mccq_org_create(struct be_adapter *adapter, 1204 struct be_queue_info *mccq, 1205 struct be_queue_info *cq) 1206 { 1207 struct be_mcc_wrb *wrb; 1208 struct be_cmd_req_mcc_create *req; 1209 struct be_dma_mem *q_mem = &mccq->dma_mem; 1210 void *ctxt; 1211 int status; 1212 1213 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1214 return -1; 1215 1216 wrb = wrb_from_mbox(adapter); 1217 req = embedded_payload(wrb); 1218 ctxt = &req->context; 1219 1220 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1221 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, 1222 NULL); 1223 1224 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1225 1226 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1227 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1228 be_encoded_q_len(mccq->len)); 1229 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1230 1231 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1232 1233 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1234 1235 status = be_mbox_notify_wait(adapter); 1236 if (!status) { 1237 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1238 1239 mccq->id = le16_to_cpu(resp->id); 1240 mccq->created = true; 1241 } 1242 1243 mutex_unlock(&adapter->mbox_lock); 1244 return status; 1245 } 1246 1247 int be_cmd_mccq_create(struct be_adapter *adapter, 1248 struct be_queue_info *mccq, struct be_queue_info *cq) 1249 { 1250 int status; 1251 1252 status = be_cmd_mccq_ext_create(adapter, mccq, cq); 1253 if (status && BEx_chip(adapter)) { 1254 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " 1255 "or newer to avoid conflicting priorities between NIC " 1256 "and FCoE traffic"); 1257 status = be_cmd_mccq_org_create(adapter, mccq, cq); 1258 } 1259 return status; 1260 } 1261 1262 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) 1263 { 1264 struct be_mcc_wrb wrb = {0}; 1265 struct be_cmd_req_eth_tx_create *req; 1266 struct be_queue_info *txq = &txo->q; 1267 struct be_queue_info *cq = &txo->cq; 1268 struct be_dma_mem *q_mem = &txq->dma_mem; 1269 int status, ver = 0; 1270 1271 req = embedded_payload(&wrb); 1272 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1273 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); 1274 1275 if (lancer_chip(adapter)) { 1276 req->hdr.version = 1; 1277 } else if (BEx_chip(adapter)) { 1278 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) 1279 req->hdr.version = 2; 1280 } else { /* For SH */ 1281 req->hdr.version = 2; 1282 } 1283 1284 if (req->hdr.version > 0) 1285 req->if_id = cpu_to_le16(adapter->if_handle); 1286 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1287 req->ulp_num = BE_ULP1_NUM; 1288 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 1289 req->cq_id = cpu_to_le16(cq->id); 1290 req->queue_size = be_encoded_q_len(txq->len); 1291 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1292 ver = req->hdr.version; 1293 1294 status = be_cmd_notify_wait(adapter, &wrb); 1295 if (!status) { 1296 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb); 1297 1298 txq->id = le16_to_cpu(resp->cid); 1299 if (ver == 2) 1300 txo->db_offset = le32_to_cpu(resp->db_offset); 1301 else 1302 txo->db_offset = DB_TXULP1_OFFSET; 1303 txq->created = true; 1304 } 1305 1306 return status; 1307 } 1308 1309 /* Uses MCC */ 1310 int be_cmd_rxq_create(struct be_adapter *adapter, 1311 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 1312 u32 if_id, u32 rss, u8 *rss_id) 1313 { 1314 struct be_mcc_wrb *wrb; 1315 struct be_cmd_req_eth_rx_create *req; 1316 struct be_dma_mem *q_mem = &rxq->dma_mem; 1317 int status; 1318 1319 spin_lock_bh(&adapter->mcc_lock); 1320 1321 wrb = wrb_from_mccq(adapter); 1322 if (!wrb) { 1323 status = -EBUSY; 1324 goto err; 1325 } 1326 req = embedded_payload(wrb); 1327 1328 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1329 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); 1330 1331 req->cq_id = cpu_to_le16(cq_id); 1332 req->frag_size = fls(frag_size) - 1; 1333 req->num_pages = 2; 1334 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1335 req->interface_id = cpu_to_le32(if_id); 1336 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE); 1337 req->rss_queue = cpu_to_le32(rss); 1338 1339 status = be_mcc_notify_wait(adapter); 1340 if (!status) { 1341 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); 1342 1343 rxq->id = le16_to_cpu(resp->id); 1344 rxq->created = true; 1345 *rss_id = resp->rss_id; 1346 } 1347 1348 err: 1349 spin_unlock_bh(&adapter->mcc_lock); 1350 return status; 1351 } 1352 1353 /* Generic destroyer function for all types of queues 1354 * Uses Mbox 1355 */ 1356 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1357 int queue_type) 1358 { 1359 struct be_mcc_wrb *wrb; 1360 struct be_cmd_req_q_destroy *req; 1361 u8 subsys = 0, opcode = 0; 1362 int status; 1363 1364 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1365 return -1; 1366 1367 wrb = wrb_from_mbox(adapter); 1368 req = embedded_payload(wrb); 1369 1370 switch (queue_type) { 1371 case QTYPE_EQ: 1372 subsys = CMD_SUBSYSTEM_COMMON; 1373 opcode = OPCODE_COMMON_EQ_DESTROY; 1374 break; 1375 case QTYPE_CQ: 1376 subsys = CMD_SUBSYSTEM_COMMON; 1377 opcode = OPCODE_COMMON_CQ_DESTROY; 1378 break; 1379 case QTYPE_TXQ: 1380 subsys = CMD_SUBSYSTEM_ETH; 1381 opcode = OPCODE_ETH_TX_DESTROY; 1382 break; 1383 case QTYPE_RXQ: 1384 subsys = CMD_SUBSYSTEM_ETH; 1385 opcode = OPCODE_ETH_RX_DESTROY; 1386 break; 1387 case QTYPE_MCCQ: 1388 subsys = CMD_SUBSYSTEM_COMMON; 1389 opcode = OPCODE_COMMON_MCC_DESTROY; 1390 break; 1391 default: 1392 BUG(); 1393 } 1394 1395 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, 1396 NULL); 1397 req->id = cpu_to_le16(q->id); 1398 1399 status = be_mbox_notify_wait(adapter); 1400 q->created = false; 1401 1402 mutex_unlock(&adapter->mbox_lock); 1403 return status; 1404 } 1405 1406 /* Uses MCC */ 1407 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) 1408 { 1409 struct be_mcc_wrb *wrb; 1410 struct be_cmd_req_q_destroy *req; 1411 int status; 1412 1413 spin_lock_bh(&adapter->mcc_lock); 1414 1415 wrb = wrb_from_mccq(adapter); 1416 if (!wrb) { 1417 status = -EBUSY; 1418 goto err; 1419 } 1420 req = embedded_payload(wrb); 1421 1422 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1423 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); 1424 req->id = cpu_to_le16(q->id); 1425 1426 status = be_mcc_notify_wait(adapter); 1427 q->created = false; 1428 1429 err: 1430 spin_unlock_bh(&adapter->mcc_lock); 1431 return status; 1432 } 1433 1434 /* Create an rx filtering policy configuration on an i/f 1435 * Will use MBOX only if MCCQ has not been created. 1436 */ 1437 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 1438 u32 *if_handle, u32 domain) 1439 { 1440 struct be_mcc_wrb wrb = {0}; 1441 struct be_cmd_req_if_create *req; 1442 int status; 1443 1444 req = embedded_payload(&wrb); 1445 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1446 OPCODE_COMMON_NTWK_INTERFACE_CREATE, 1447 sizeof(*req), &wrb, NULL); 1448 req->hdr.domain = domain; 1449 req->capability_flags = cpu_to_le32(cap_flags); 1450 req->enable_flags = cpu_to_le32(en_flags); 1451 req->pmac_invalid = true; 1452 1453 status = be_cmd_notify_wait(adapter, &wrb); 1454 if (!status) { 1455 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb); 1456 1457 *if_handle = le32_to_cpu(resp->interface_id); 1458 1459 /* Hack to retrieve VF's pmac-id on BE3 */ 1460 if (BE3_chip(adapter) && !be_physfn(adapter)) 1461 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id); 1462 } 1463 return status; 1464 } 1465 1466 /* Uses MCCQ */ 1467 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) 1468 { 1469 struct be_mcc_wrb *wrb; 1470 struct be_cmd_req_if_destroy *req; 1471 int status; 1472 1473 if (interface_id == -1) 1474 return 0; 1475 1476 spin_lock_bh(&adapter->mcc_lock); 1477 1478 wrb = wrb_from_mccq(adapter); 1479 if (!wrb) { 1480 status = -EBUSY; 1481 goto err; 1482 } 1483 req = embedded_payload(wrb); 1484 1485 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1486 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, 1487 sizeof(*req), wrb, NULL); 1488 req->hdr.domain = domain; 1489 req->interface_id = cpu_to_le32(interface_id); 1490 1491 status = be_mcc_notify_wait(adapter); 1492 err: 1493 spin_unlock_bh(&adapter->mcc_lock); 1494 return status; 1495 } 1496 1497 /* Get stats is a non embedded command: the request is not embedded inside 1498 * WRB but is a separate dma memory block 1499 * Uses asynchronous MCC 1500 */ 1501 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) 1502 { 1503 struct be_mcc_wrb *wrb; 1504 struct be_cmd_req_hdr *hdr; 1505 int status = 0; 1506 1507 spin_lock_bh(&adapter->mcc_lock); 1508 1509 wrb = wrb_from_mccq(adapter); 1510 if (!wrb) { 1511 status = -EBUSY; 1512 goto err; 1513 } 1514 hdr = nonemb_cmd->va; 1515 1516 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1517 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, 1518 nonemb_cmd); 1519 1520 /* version 1 of the cmd is not supported only by BE2 */ 1521 if (BE2_chip(adapter)) 1522 hdr->version = 0; 1523 if (BE3_chip(adapter) || lancer_chip(adapter)) 1524 hdr->version = 1; 1525 else 1526 hdr->version = 2; 1527 1528 be_mcc_notify(adapter); 1529 adapter->stats_cmd_sent = true; 1530 1531 err: 1532 spin_unlock_bh(&adapter->mcc_lock); 1533 return status; 1534 } 1535 1536 /* Lancer Stats */ 1537 int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1538 struct be_dma_mem *nonemb_cmd) 1539 { 1540 struct be_mcc_wrb *wrb; 1541 struct lancer_cmd_req_pport_stats *req; 1542 int status = 0; 1543 1544 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS, 1545 CMD_SUBSYSTEM_ETH)) 1546 return -EPERM; 1547 1548 spin_lock_bh(&adapter->mcc_lock); 1549 1550 wrb = wrb_from_mccq(adapter); 1551 if (!wrb) { 1552 status = -EBUSY; 1553 goto err; 1554 } 1555 req = nonemb_cmd->va; 1556 1557 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1558 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, 1559 wrb, nonemb_cmd); 1560 1561 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); 1562 req->cmd_params.params.reset_stats = 0; 1563 1564 be_mcc_notify(adapter); 1565 adapter->stats_cmd_sent = true; 1566 1567 err: 1568 spin_unlock_bh(&adapter->mcc_lock); 1569 return status; 1570 } 1571 1572 static int be_mac_to_link_speed(int mac_speed) 1573 { 1574 switch (mac_speed) { 1575 case PHY_LINK_SPEED_ZERO: 1576 return 0; 1577 case PHY_LINK_SPEED_10MBPS: 1578 return 10; 1579 case PHY_LINK_SPEED_100MBPS: 1580 return 100; 1581 case PHY_LINK_SPEED_1GBPS: 1582 return 1000; 1583 case PHY_LINK_SPEED_10GBPS: 1584 return 10000; 1585 case PHY_LINK_SPEED_20GBPS: 1586 return 20000; 1587 case PHY_LINK_SPEED_25GBPS: 1588 return 25000; 1589 case PHY_LINK_SPEED_40GBPS: 1590 return 40000; 1591 } 1592 return 0; 1593 } 1594 1595 /* Uses synchronous mcc 1596 * Returns link_speed in Mbps 1597 */ 1598 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, 1599 u8 *link_status, u32 dom) 1600 { 1601 struct be_mcc_wrb *wrb; 1602 struct be_cmd_req_link_status *req; 1603 int status; 1604 1605 spin_lock_bh(&adapter->mcc_lock); 1606 1607 if (link_status) 1608 *link_status = LINK_DOWN; 1609 1610 wrb = wrb_from_mccq(adapter); 1611 if (!wrb) { 1612 status = -EBUSY; 1613 goto err; 1614 } 1615 req = embedded_payload(wrb); 1616 1617 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1618 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, 1619 sizeof(*req), wrb, NULL); 1620 1621 /* version 1 of the cmd is not supported only by BE2 */ 1622 if (!BE2_chip(adapter)) 1623 req->hdr.version = 1; 1624 1625 req->hdr.domain = dom; 1626 1627 status = be_mcc_notify_wait(adapter); 1628 if (!status) { 1629 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 1630 1631 if (link_speed) { 1632 *link_speed = resp->link_speed ? 1633 le16_to_cpu(resp->link_speed) * 10 : 1634 be_mac_to_link_speed(resp->mac_speed); 1635 1636 if (!resp->logical_link_status) 1637 *link_speed = 0; 1638 } 1639 if (link_status) 1640 *link_status = resp->logical_link_status; 1641 } 1642 1643 err: 1644 spin_unlock_bh(&adapter->mcc_lock); 1645 return status; 1646 } 1647 1648 /* Uses synchronous mcc */ 1649 int be_cmd_get_die_temperature(struct be_adapter *adapter) 1650 { 1651 struct be_mcc_wrb *wrb; 1652 struct be_cmd_req_get_cntl_addnl_attribs *req; 1653 int status = 0; 1654 1655 spin_lock_bh(&adapter->mcc_lock); 1656 1657 wrb = wrb_from_mccq(adapter); 1658 if (!wrb) { 1659 status = -EBUSY; 1660 goto err; 1661 } 1662 req = embedded_payload(wrb); 1663 1664 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1665 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, 1666 sizeof(*req), wrb, NULL); 1667 1668 be_mcc_notify(adapter); 1669 1670 err: 1671 spin_unlock_bh(&adapter->mcc_lock); 1672 return status; 1673 } 1674 1675 /* Uses synchronous mcc */ 1676 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) 1677 { 1678 struct be_mcc_wrb *wrb; 1679 struct be_cmd_req_get_fat *req; 1680 int status; 1681 1682 spin_lock_bh(&adapter->mcc_lock); 1683 1684 wrb = wrb_from_mccq(adapter); 1685 if (!wrb) { 1686 status = -EBUSY; 1687 goto err; 1688 } 1689 req = embedded_payload(wrb); 1690 1691 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1692 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, 1693 NULL); 1694 req->fat_operation = cpu_to_le32(QUERY_FAT); 1695 status = be_mcc_notify_wait(adapter); 1696 if (!status) { 1697 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); 1698 1699 if (log_size && resp->log_size) 1700 *log_size = le32_to_cpu(resp->log_size) - 1701 sizeof(u32); 1702 } 1703 err: 1704 spin_unlock_bh(&adapter->mcc_lock); 1705 return status; 1706 } 1707 1708 int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) 1709 { 1710 struct be_dma_mem get_fat_cmd; 1711 struct be_mcc_wrb *wrb; 1712 struct be_cmd_req_get_fat *req; 1713 u32 offset = 0, total_size, buf_size, 1714 log_offset = sizeof(u32), payload_len; 1715 int status = 0; 1716 1717 if (buf_len == 0) 1718 return -EIO; 1719 1720 total_size = buf_len; 1721 1722 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1723 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, 1724 get_fat_cmd.size, 1725 &get_fat_cmd.dma); 1726 if (!get_fat_cmd.va) { 1727 dev_err(&adapter->pdev->dev, 1728 "Memory allocation failure while reading FAT data\n"); 1729 return -ENOMEM; 1730 } 1731 1732 spin_lock_bh(&adapter->mcc_lock); 1733 1734 while (total_size) { 1735 buf_size = min(total_size, (u32)60*1024); 1736 total_size -= buf_size; 1737 1738 wrb = wrb_from_mccq(adapter); 1739 if (!wrb) { 1740 status = -EBUSY; 1741 goto err; 1742 } 1743 req = get_fat_cmd.va; 1744 1745 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; 1746 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1747 OPCODE_COMMON_MANAGE_FAT, payload_len, 1748 wrb, &get_fat_cmd); 1749 1750 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); 1751 req->read_log_offset = cpu_to_le32(log_offset); 1752 req->read_log_length = cpu_to_le32(buf_size); 1753 req->data_buffer_size = cpu_to_le32(buf_size); 1754 1755 status = be_mcc_notify_wait(adapter); 1756 if (!status) { 1757 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1758 1759 memcpy(buf + offset, 1760 resp->data_buffer, 1761 le32_to_cpu(resp->read_log_length)); 1762 } else { 1763 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); 1764 goto err; 1765 } 1766 offset += buf_size; 1767 log_offset += buf_size; 1768 } 1769 err: 1770 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1771 get_fat_cmd.va, get_fat_cmd.dma); 1772 spin_unlock_bh(&adapter->mcc_lock); 1773 return status; 1774 } 1775 1776 /* Uses synchronous mcc */ 1777 int be_cmd_get_fw_ver(struct be_adapter *adapter) 1778 { 1779 struct be_mcc_wrb *wrb; 1780 struct be_cmd_req_get_fw_version *req; 1781 int status; 1782 1783 spin_lock_bh(&adapter->mcc_lock); 1784 1785 wrb = wrb_from_mccq(adapter); 1786 if (!wrb) { 1787 status = -EBUSY; 1788 goto err; 1789 } 1790 1791 req = embedded_payload(wrb); 1792 1793 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1794 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, 1795 NULL); 1796 status = be_mcc_notify_wait(adapter); 1797 if (!status) { 1798 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); 1799 1800 strlcpy(adapter->fw_ver, resp->firmware_version_string, 1801 sizeof(adapter->fw_ver)); 1802 strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string, 1803 sizeof(adapter->fw_on_flash)); 1804 } 1805 err: 1806 spin_unlock_bh(&adapter->mcc_lock); 1807 return status; 1808 } 1809 1810 /* set the EQ delay interval of an EQ to specified value 1811 * Uses async mcc 1812 */ 1813 static int __be_cmd_modify_eqd(struct be_adapter *adapter, 1814 struct be_set_eqd *set_eqd, int num) 1815 { 1816 struct be_mcc_wrb *wrb; 1817 struct be_cmd_req_modify_eq_delay *req; 1818 int status = 0, i; 1819 1820 spin_lock_bh(&adapter->mcc_lock); 1821 1822 wrb = wrb_from_mccq(adapter); 1823 if (!wrb) { 1824 status = -EBUSY; 1825 goto err; 1826 } 1827 req = embedded_payload(wrb); 1828 1829 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1830 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, 1831 NULL); 1832 1833 req->num_eq = cpu_to_le32(num); 1834 for (i = 0; i < num; i++) { 1835 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); 1836 req->set_eqd[i].phase = 0; 1837 req->set_eqd[i].delay_multiplier = 1838 cpu_to_le32(set_eqd[i].delay_multiplier); 1839 } 1840 1841 be_mcc_notify(adapter); 1842 err: 1843 spin_unlock_bh(&adapter->mcc_lock); 1844 return status; 1845 } 1846 1847 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, 1848 int num) 1849 { 1850 int num_eqs, i = 0; 1851 1852 while (num) { 1853 num_eqs = min(num, 8); 1854 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); 1855 i += num_eqs; 1856 num -= num_eqs; 1857 } 1858 1859 return 0; 1860 } 1861 1862 /* Uses sycnhronous mcc */ 1863 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1864 u32 num, u32 domain) 1865 { 1866 struct be_mcc_wrb *wrb; 1867 struct be_cmd_req_vlan_config *req; 1868 int status; 1869 1870 spin_lock_bh(&adapter->mcc_lock); 1871 1872 wrb = wrb_from_mccq(adapter); 1873 if (!wrb) { 1874 status = -EBUSY; 1875 goto err; 1876 } 1877 req = embedded_payload(wrb); 1878 1879 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1880 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), 1881 wrb, NULL); 1882 req->hdr.domain = domain; 1883 1884 req->interface_id = if_id; 1885 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1886 req->num_vlan = num; 1887 memcpy(req->normal_vlan, vtag_array, 1888 req->num_vlan * sizeof(vtag_array[0])); 1889 1890 status = be_mcc_notify_wait(adapter); 1891 err: 1892 spin_unlock_bh(&adapter->mcc_lock); 1893 return status; 1894 } 1895 1896 static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1897 { 1898 struct be_mcc_wrb *wrb; 1899 struct be_dma_mem *mem = &adapter->rx_filter; 1900 struct be_cmd_req_rx_filter *req = mem->va; 1901 int status; 1902 1903 spin_lock_bh(&adapter->mcc_lock); 1904 1905 wrb = wrb_from_mccq(adapter); 1906 if (!wrb) { 1907 status = -EBUSY; 1908 goto err; 1909 } 1910 memset(req, 0, sizeof(*req)); 1911 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1912 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), 1913 wrb, mem); 1914 1915 req->if_id = cpu_to_le32(adapter->if_handle); 1916 req->if_flags_mask = cpu_to_le32(flags); 1917 req->if_flags = (value == ON) ? req->if_flags_mask : 0; 1918 1919 if (flags & BE_IF_FLAGS_MULTICAST) { 1920 struct netdev_hw_addr *ha; 1921 int i = 0; 1922 1923 /* Reset mcast promisc mode if already set by setting mask 1924 * and not setting flags field 1925 */ 1926 req->if_flags_mask |= 1927 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS & 1928 be_if_cap_flags(adapter)); 1929 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); 1930 netdev_for_each_mc_addr(ha, adapter->netdev) 1931 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1932 } 1933 1934 status = be_mcc_notify_wait(adapter); 1935 err: 1936 spin_unlock_bh(&adapter->mcc_lock); 1937 return status; 1938 } 1939 1940 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1941 { 1942 struct device *dev = &adapter->pdev->dev; 1943 1944 if ((flags & be_if_cap_flags(adapter)) != flags) { 1945 dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags); 1946 dev_warn(dev, "Interface is capable of 0x%x flags only\n", 1947 be_if_cap_flags(adapter)); 1948 } 1949 flags &= be_if_cap_flags(adapter); 1950 1951 return __be_cmd_rx_filter(adapter, flags, value); 1952 } 1953 1954 /* Uses synchrounous mcc */ 1955 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) 1956 { 1957 struct be_mcc_wrb *wrb; 1958 struct be_cmd_req_set_flow_control *req; 1959 int status; 1960 1961 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL, 1962 CMD_SUBSYSTEM_COMMON)) 1963 return -EPERM; 1964 1965 spin_lock_bh(&adapter->mcc_lock); 1966 1967 wrb = wrb_from_mccq(adapter); 1968 if (!wrb) { 1969 status = -EBUSY; 1970 goto err; 1971 } 1972 req = embedded_payload(wrb); 1973 1974 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1975 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), 1976 wrb, NULL); 1977 1978 req->hdr.version = 1; 1979 req->tx_flow_control = cpu_to_le16((u16)tx_fc); 1980 req->rx_flow_control = cpu_to_le16((u16)rx_fc); 1981 1982 status = be_mcc_notify_wait(adapter); 1983 1984 err: 1985 spin_unlock_bh(&adapter->mcc_lock); 1986 1987 if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED) 1988 return -EOPNOTSUPP; 1989 1990 return status; 1991 } 1992 1993 /* Uses sycn mcc */ 1994 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) 1995 { 1996 struct be_mcc_wrb *wrb; 1997 struct be_cmd_req_get_flow_control *req; 1998 int status; 1999 2000 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL, 2001 CMD_SUBSYSTEM_COMMON)) 2002 return -EPERM; 2003 2004 spin_lock_bh(&adapter->mcc_lock); 2005 2006 wrb = wrb_from_mccq(adapter); 2007 if (!wrb) { 2008 status = -EBUSY; 2009 goto err; 2010 } 2011 req = embedded_payload(wrb); 2012 2013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2014 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), 2015 wrb, NULL); 2016 2017 status = be_mcc_notify_wait(adapter); 2018 if (!status) { 2019 struct be_cmd_resp_get_flow_control *resp = 2020 embedded_payload(wrb); 2021 2022 *tx_fc = le16_to_cpu(resp->tx_flow_control); 2023 *rx_fc = le16_to_cpu(resp->rx_flow_control); 2024 } 2025 2026 err: 2027 spin_unlock_bh(&adapter->mcc_lock); 2028 return status; 2029 } 2030 2031 /* Uses mbox */ 2032 int be_cmd_query_fw_cfg(struct be_adapter *adapter) 2033 { 2034 struct be_mcc_wrb *wrb; 2035 struct be_cmd_req_query_fw_cfg *req; 2036 int status; 2037 2038 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2039 return -1; 2040 2041 wrb = wrb_from_mbox(adapter); 2042 req = embedded_payload(wrb); 2043 2044 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2045 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, 2046 sizeof(*req), wrb, NULL); 2047 2048 status = be_mbox_notify_wait(adapter); 2049 if (!status) { 2050 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 2051 2052 adapter->port_num = le32_to_cpu(resp->phys_port); 2053 adapter->function_mode = le32_to_cpu(resp->function_mode); 2054 adapter->function_caps = le32_to_cpu(resp->function_caps); 2055 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF; 2056 dev_info(&adapter->pdev->dev, 2057 "FW config: function_mode=0x%x, function_caps=0x%x\n", 2058 adapter->function_mode, adapter->function_caps); 2059 } 2060 2061 mutex_unlock(&adapter->mbox_lock); 2062 return status; 2063 } 2064 2065 /* Uses mbox */ 2066 int be_cmd_reset_function(struct be_adapter *adapter) 2067 { 2068 struct be_mcc_wrb *wrb; 2069 struct be_cmd_req_hdr *req; 2070 int status; 2071 2072 if (lancer_chip(adapter)) { 2073 iowrite32(SLI_PORT_CONTROL_IP_MASK, 2074 adapter->db + SLIPORT_CONTROL_OFFSET); 2075 status = lancer_wait_ready(adapter); 2076 if (status) 2077 dev_err(&adapter->pdev->dev, 2078 "Adapter in non recoverable error\n"); 2079 return status; 2080 } 2081 2082 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2083 return -1; 2084 2085 wrb = wrb_from_mbox(adapter); 2086 req = embedded_payload(wrb); 2087 2088 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, 2089 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, 2090 NULL); 2091 2092 status = be_mbox_notify_wait(adapter); 2093 2094 mutex_unlock(&adapter->mbox_lock); 2095 return status; 2096 } 2097 2098 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 2099 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey) 2100 { 2101 struct be_mcc_wrb *wrb; 2102 struct be_cmd_req_rss_config *req; 2103 int status; 2104 2105 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) 2106 return 0; 2107 2108 spin_lock_bh(&adapter->mcc_lock); 2109 2110 wrb = wrb_from_mccq(adapter); 2111 if (!wrb) { 2112 status = -EBUSY; 2113 goto err; 2114 } 2115 req = embedded_payload(wrb); 2116 2117 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2118 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 2119 2120 req->if_id = cpu_to_le32(adapter->if_handle); 2121 req->enable_rss = cpu_to_le16(rss_hash_opts); 2122 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 2123 2124 if (!BEx_chip(adapter)) 2125 req->hdr.version = 1; 2126 2127 memcpy(req->cpu_table, rsstable, table_size); 2128 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN); 2129 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 2130 2131 status = be_mcc_notify_wait(adapter); 2132 err: 2133 spin_unlock_bh(&adapter->mcc_lock); 2134 return status; 2135 } 2136 2137 /* Uses sync mcc */ 2138 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 2139 u8 bcn, u8 sts, u8 state) 2140 { 2141 struct be_mcc_wrb *wrb; 2142 struct be_cmd_req_enable_disable_beacon *req; 2143 int status; 2144 2145 spin_lock_bh(&adapter->mcc_lock); 2146 2147 wrb = wrb_from_mccq(adapter); 2148 if (!wrb) { 2149 status = -EBUSY; 2150 goto err; 2151 } 2152 req = embedded_payload(wrb); 2153 2154 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2155 OPCODE_COMMON_ENABLE_DISABLE_BEACON, 2156 sizeof(*req), wrb, NULL); 2157 2158 req->port_num = port_num; 2159 req->beacon_state = state; 2160 req->beacon_duration = bcn; 2161 req->status_duration = sts; 2162 2163 status = be_mcc_notify_wait(adapter); 2164 2165 err: 2166 spin_unlock_bh(&adapter->mcc_lock); 2167 return status; 2168 } 2169 2170 /* Uses sync mcc */ 2171 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) 2172 { 2173 struct be_mcc_wrb *wrb; 2174 struct be_cmd_req_get_beacon_state *req; 2175 int status; 2176 2177 spin_lock_bh(&adapter->mcc_lock); 2178 2179 wrb = wrb_from_mccq(adapter); 2180 if (!wrb) { 2181 status = -EBUSY; 2182 goto err; 2183 } 2184 req = embedded_payload(wrb); 2185 2186 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2187 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), 2188 wrb, NULL); 2189 2190 req->port_num = port_num; 2191 2192 status = be_mcc_notify_wait(adapter); 2193 if (!status) { 2194 struct be_cmd_resp_get_beacon_state *resp = 2195 embedded_payload(wrb); 2196 2197 *state = resp->beacon_state; 2198 } 2199 2200 err: 2201 spin_unlock_bh(&adapter->mcc_lock); 2202 return status; 2203 } 2204 2205 /* Uses sync mcc */ 2206 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, 2207 u8 page_num, u8 *data) 2208 { 2209 struct be_dma_mem cmd; 2210 struct be_mcc_wrb *wrb; 2211 struct be_cmd_req_port_type *req; 2212 int status; 2213 2214 if (page_num > TR_PAGE_A2) 2215 return -EINVAL; 2216 2217 cmd.size = sizeof(struct be_cmd_resp_port_type); 2218 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2219 if (!cmd.va) { 2220 dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); 2221 return -ENOMEM; 2222 } 2223 memset(cmd.va, 0, cmd.size); 2224 2225 spin_lock_bh(&adapter->mcc_lock); 2226 2227 wrb = wrb_from_mccq(adapter); 2228 if (!wrb) { 2229 status = -EBUSY; 2230 goto err; 2231 } 2232 req = cmd.va; 2233 2234 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2235 OPCODE_COMMON_READ_TRANSRECV_DATA, 2236 cmd.size, wrb, &cmd); 2237 2238 req->port = cpu_to_le32(adapter->hba_port_num); 2239 req->page_num = cpu_to_le32(page_num); 2240 status = be_mcc_notify_wait(adapter); 2241 if (!status) { 2242 struct be_cmd_resp_port_type *resp = cmd.va; 2243 2244 memcpy(data, resp->page_data, PAGE_DATA_LEN); 2245 } 2246 err: 2247 spin_unlock_bh(&adapter->mcc_lock); 2248 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2249 return status; 2250 } 2251 2252 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2253 u32 data_size, u32 data_offset, 2254 const char *obj_name, u32 *data_written, 2255 u8 *change_status, u8 *addn_status) 2256 { 2257 struct be_mcc_wrb *wrb; 2258 struct lancer_cmd_req_write_object *req; 2259 struct lancer_cmd_resp_write_object *resp; 2260 void *ctxt = NULL; 2261 int status; 2262 2263 spin_lock_bh(&adapter->mcc_lock); 2264 adapter->flash_status = 0; 2265 2266 wrb = wrb_from_mccq(adapter); 2267 if (!wrb) { 2268 status = -EBUSY; 2269 goto err_unlock; 2270 } 2271 2272 req = embedded_payload(wrb); 2273 2274 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2275 OPCODE_COMMON_WRITE_OBJECT, 2276 sizeof(struct lancer_cmd_req_write_object), wrb, 2277 NULL); 2278 2279 ctxt = &req->context; 2280 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2281 write_length, ctxt, data_size); 2282 2283 if (data_size == 0) 2284 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2285 eof, ctxt, 1); 2286 else 2287 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2288 eof, ctxt, 0); 2289 2290 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 2291 req->write_offset = cpu_to_le32(data_offset); 2292 strlcpy(req->object_name, obj_name, sizeof(req->object_name)); 2293 req->descriptor_count = cpu_to_le32(1); 2294 req->buf_len = cpu_to_le32(data_size); 2295 req->addr_low = cpu_to_le32((cmd->dma + 2296 sizeof(struct lancer_cmd_req_write_object)) 2297 & 0xFFFFFFFF); 2298 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + 2299 sizeof(struct lancer_cmd_req_write_object))); 2300 2301 be_mcc_notify(adapter); 2302 spin_unlock_bh(&adapter->mcc_lock); 2303 2304 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2305 msecs_to_jiffies(60000))) 2306 status = -ETIMEDOUT; 2307 else 2308 status = adapter->flash_status; 2309 2310 resp = embedded_payload(wrb); 2311 if (!status) { 2312 *data_written = le32_to_cpu(resp->actual_write_len); 2313 *change_status = resp->change_status; 2314 } else { 2315 *addn_status = resp->additional_status; 2316 } 2317 2318 return status; 2319 2320 err_unlock: 2321 spin_unlock_bh(&adapter->mcc_lock); 2322 return status; 2323 } 2324 2325 int be_cmd_query_cable_type(struct be_adapter *adapter) 2326 { 2327 u8 page_data[PAGE_DATA_LEN]; 2328 int status; 2329 2330 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, 2331 page_data); 2332 if (!status) { 2333 switch (adapter->phy.interface_type) { 2334 case PHY_TYPE_QSFP: 2335 adapter->phy.cable_type = 2336 page_data[QSFP_PLUS_CABLE_TYPE_OFFSET]; 2337 break; 2338 case PHY_TYPE_SFP_PLUS_10GB: 2339 adapter->phy.cable_type = 2340 page_data[SFP_PLUS_CABLE_TYPE_OFFSET]; 2341 break; 2342 default: 2343 adapter->phy.cable_type = 0; 2344 break; 2345 } 2346 } 2347 return status; 2348 } 2349 2350 int be_cmd_query_sfp_info(struct be_adapter *adapter) 2351 { 2352 u8 page_data[PAGE_DATA_LEN]; 2353 int status; 2354 2355 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, 2356 page_data); 2357 if (!status) { 2358 strlcpy(adapter->phy.vendor_name, page_data + 2359 SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1); 2360 strlcpy(adapter->phy.vendor_pn, 2361 page_data + SFP_VENDOR_PN_OFFSET, 2362 SFP_VENDOR_NAME_LEN - 1); 2363 } 2364 2365 return status; 2366 } 2367 2368 int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name) 2369 { 2370 struct lancer_cmd_req_delete_object *req; 2371 struct be_mcc_wrb *wrb; 2372 int status; 2373 2374 spin_lock_bh(&adapter->mcc_lock); 2375 2376 wrb = wrb_from_mccq(adapter); 2377 if (!wrb) { 2378 status = -EBUSY; 2379 goto err; 2380 } 2381 2382 req = embedded_payload(wrb); 2383 2384 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2385 OPCODE_COMMON_DELETE_OBJECT, 2386 sizeof(*req), wrb, NULL); 2387 2388 strlcpy(req->object_name, obj_name, sizeof(req->object_name)); 2389 2390 status = be_mcc_notify_wait(adapter); 2391 err: 2392 spin_unlock_bh(&adapter->mcc_lock); 2393 return status; 2394 } 2395 2396 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2397 u32 data_size, u32 data_offset, const char *obj_name, 2398 u32 *data_read, u32 *eof, u8 *addn_status) 2399 { 2400 struct be_mcc_wrb *wrb; 2401 struct lancer_cmd_req_read_object *req; 2402 struct lancer_cmd_resp_read_object *resp; 2403 int status; 2404 2405 spin_lock_bh(&adapter->mcc_lock); 2406 2407 wrb = wrb_from_mccq(adapter); 2408 if (!wrb) { 2409 status = -EBUSY; 2410 goto err_unlock; 2411 } 2412 2413 req = embedded_payload(wrb); 2414 2415 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2416 OPCODE_COMMON_READ_OBJECT, 2417 sizeof(struct lancer_cmd_req_read_object), wrb, 2418 NULL); 2419 2420 req->desired_read_len = cpu_to_le32(data_size); 2421 req->read_offset = cpu_to_le32(data_offset); 2422 strcpy(req->object_name, obj_name); 2423 req->descriptor_count = cpu_to_le32(1); 2424 req->buf_len = cpu_to_le32(data_size); 2425 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); 2426 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); 2427 2428 status = be_mcc_notify_wait(adapter); 2429 2430 resp = embedded_payload(wrb); 2431 if (!status) { 2432 *data_read = le32_to_cpu(resp->actual_read_len); 2433 *eof = le32_to_cpu(resp->eof); 2434 } else { 2435 *addn_status = resp->additional_status; 2436 } 2437 2438 err_unlock: 2439 spin_unlock_bh(&adapter->mcc_lock); 2440 return status; 2441 } 2442 2443 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 2444 u32 flash_type, u32 flash_opcode, u32 img_offset, 2445 u32 buf_size) 2446 { 2447 struct be_mcc_wrb *wrb; 2448 struct be_cmd_write_flashrom *req; 2449 int status; 2450 2451 spin_lock_bh(&adapter->mcc_lock); 2452 adapter->flash_status = 0; 2453 2454 wrb = wrb_from_mccq(adapter); 2455 if (!wrb) { 2456 status = -EBUSY; 2457 goto err_unlock; 2458 } 2459 req = cmd->va; 2460 2461 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2462 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, 2463 cmd); 2464 2465 req->params.op_type = cpu_to_le32(flash_type); 2466 if (flash_type == OPTYPE_OFFSET_SPECIFIED) 2467 req->params.offset = cpu_to_le32(img_offset); 2468 2469 req->params.op_code = cpu_to_le32(flash_opcode); 2470 req->params.data_buf_size = cpu_to_le32(buf_size); 2471 2472 be_mcc_notify(adapter); 2473 spin_unlock_bh(&adapter->mcc_lock); 2474 2475 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2476 msecs_to_jiffies(40000))) 2477 status = -ETIMEDOUT; 2478 else 2479 status = adapter->flash_status; 2480 2481 return status; 2482 2483 err_unlock: 2484 spin_unlock_bh(&adapter->mcc_lock); 2485 return status; 2486 } 2487 2488 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2489 u16 img_optype, u32 img_offset, u32 crc_offset) 2490 { 2491 struct be_cmd_read_flash_crc *req; 2492 struct be_mcc_wrb *wrb; 2493 int status; 2494 2495 spin_lock_bh(&adapter->mcc_lock); 2496 2497 wrb = wrb_from_mccq(adapter); 2498 if (!wrb) { 2499 status = -EBUSY; 2500 goto err; 2501 } 2502 req = embedded_payload(wrb); 2503 2504 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2505 OPCODE_COMMON_READ_FLASHROM, sizeof(*req), 2506 wrb, NULL); 2507 2508 req->params.op_type = cpu_to_le32(img_optype); 2509 if (img_optype == OPTYPE_OFFSET_SPECIFIED) 2510 req->params.offset = cpu_to_le32(img_offset + crc_offset); 2511 else 2512 req->params.offset = cpu_to_le32(crc_offset); 2513 2514 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2515 req->params.data_buf_size = cpu_to_le32(0x4); 2516 2517 status = be_mcc_notify_wait(adapter); 2518 if (!status) 2519 memcpy(flashed_crc, req->crc, 4); 2520 2521 err: 2522 spin_unlock_bh(&adapter->mcc_lock); 2523 return status; 2524 } 2525 2526 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2527 struct be_dma_mem *nonemb_cmd) 2528 { 2529 struct be_mcc_wrb *wrb; 2530 struct be_cmd_req_acpi_wol_magic_config *req; 2531 int status; 2532 2533 spin_lock_bh(&adapter->mcc_lock); 2534 2535 wrb = wrb_from_mccq(adapter); 2536 if (!wrb) { 2537 status = -EBUSY; 2538 goto err; 2539 } 2540 req = nonemb_cmd->va; 2541 2542 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2543 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), 2544 wrb, nonemb_cmd); 2545 memcpy(req->magic_mac, mac, ETH_ALEN); 2546 2547 status = be_mcc_notify_wait(adapter); 2548 2549 err: 2550 spin_unlock_bh(&adapter->mcc_lock); 2551 return status; 2552 } 2553 2554 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 2555 u8 loopback_type, u8 enable) 2556 { 2557 struct be_mcc_wrb *wrb; 2558 struct be_cmd_req_set_lmode *req; 2559 int status; 2560 2561 spin_lock_bh(&adapter->mcc_lock); 2562 2563 wrb = wrb_from_mccq(adapter); 2564 if (!wrb) { 2565 status = -EBUSY; 2566 goto err; 2567 } 2568 2569 req = embedded_payload(wrb); 2570 2571 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2572 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), 2573 wrb, NULL); 2574 2575 req->src_port = port_num; 2576 req->dest_port = port_num; 2577 req->loopback_type = loopback_type; 2578 req->loopback_state = enable; 2579 2580 status = be_mcc_notify_wait(adapter); 2581 err: 2582 spin_unlock_bh(&adapter->mcc_lock); 2583 return status; 2584 } 2585 2586 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 2587 u32 loopback_type, u32 pkt_size, u32 num_pkts, 2588 u64 pattern) 2589 { 2590 struct be_mcc_wrb *wrb; 2591 struct be_cmd_req_loopback_test *req; 2592 struct be_cmd_resp_loopback_test *resp; 2593 int status; 2594 2595 spin_lock_bh(&adapter->mcc_lock); 2596 2597 wrb = wrb_from_mccq(adapter); 2598 if (!wrb) { 2599 status = -EBUSY; 2600 goto err; 2601 } 2602 2603 req = embedded_payload(wrb); 2604 2605 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2606 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, 2607 NULL); 2608 2609 req->hdr.timeout = cpu_to_le32(15); 2610 req->pattern = cpu_to_le64(pattern); 2611 req->src_port = cpu_to_le32(port_num); 2612 req->dest_port = cpu_to_le32(port_num); 2613 req->pkt_size = cpu_to_le32(pkt_size); 2614 req->num_pkts = cpu_to_le32(num_pkts); 2615 req->loopback_type = cpu_to_le32(loopback_type); 2616 2617 be_mcc_notify(adapter); 2618 2619 spin_unlock_bh(&adapter->mcc_lock); 2620 2621 wait_for_completion(&adapter->et_cmd_compl); 2622 resp = embedded_payload(wrb); 2623 status = le32_to_cpu(resp->status); 2624 2625 return status; 2626 err: 2627 spin_unlock_bh(&adapter->mcc_lock); 2628 return status; 2629 } 2630 2631 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 2632 u32 byte_cnt, struct be_dma_mem *cmd) 2633 { 2634 struct be_mcc_wrb *wrb; 2635 struct be_cmd_req_ddrdma_test *req; 2636 int status; 2637 int i, j = 0; 2638 2639 spin_lock_bh(&adapter->mcc_lock); 2640 2641 wrb = wrb_from_mccq(adapter); 2642 if (!wrb) { 2643 status = -EBUSY; 2644 goto err; 2645 } 2646 req = cmd->va; 2647 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2648 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, 2649 cmd); 2650 2651 req->pattern = cpu_to_le64(pattern); 2652 req->byte_count = cpu_to_le32(byte_cnt); 2653 for (i = 0; i < byte_cnt; i++) { 2654 req->snd_buff[i] = (u8)(pattern >> (j*8)); 2655 j++; 2656 if (j > 7) 2657 j = 0; 2658 } 2659 2660 status = be_mcc_notify_wait(adapter); 2661 2662 if (!status) { 2663 struct be_cmd_resp_ddrdma_test *resp; 2664 2665 resp = cmd->va; 2666 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || 2667 resp->snd_err) { 2668 status = -1; 2669 } 2670 } 2671 2672 err: 2673 spin_unlock_bh(&adapter->mcc_lock); 2674 return status; 2675 } 2676 2677 int be_cmd_get_seeprom_data(struct be_adapter *adapter, 2678 struct be_dma_mem *nonemb_cmd) 2679 { 2680 struct be_mcc_wrb *wrb; 2681 struct be_cmd_req_seeprom_read *req; 2682 int status; 2683 2684 spin_lock_bh(&adapter->mcc_lock); 2685 2686 wrb = wrb_from_mccq(adapter); 2687 if (!wrb) { 2688 status = -EBUSY; 2689 goto err; 2690 } 2691 req = nonemb_cmd->va; 2692 2693 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2694 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 2695 nonemb_cmd); 2696 2697 status = be_mcc_notify_wait(adapter); 2698 2699 err: 2700 spin_unlock_bh(&adapter->mcc_lock); 2701 return status; 2702 } 2703 2704 int be_cmd_get_phy_info(struct be_adapter *adapter) 2705 { 2706 struct be_mcc_wrb *wrb; 2707 struct be_cmd_req_get_phy_info *req; 2708 struct be_dma_mem cmd; 2709 int status; 2710 2711 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS, 2712 CMD_SUBSYSTEM_COMMON)) 2713 return -EPERM; 2714 2715 spin_lock_bh(&adapter->mcc_lock); 2716 2717 wrb = wrb_from_mccq(adapter); 2718 if (!wrb) { 2719 status = -EBUSY; 2720 goto err; 2721 } 2722 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2723 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2724 if (!cmd.va) { 2725 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2726 status = -ENOMEM; 2727 goto err; 2728 } 2729 2730 req = cmd.va; 2731 2732 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2733 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), 2734 wrb, &cmd); 2735 2736 status = be_mcc_notify_wait(adapter); 2737 if (!status) { 2738 struct be_phy_info *resp_phy_info = 2739 cmd.va + sizeof(struct be_cmd_req_hdr); 2740 2741 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type); 2742 adapter->phy.interface_type = 2743 le16_to_cpu(resp_phy_info->interface_type); 2744 adapter->phy.auto_speeds_supported = 2745 le16_to_cpu(resp_phy_info->auto_speeds_supported); 2746 adapter->phy.fixed_speeds_supported = 2747 le16_to_cpu(resp_phy_info->fixed_speeds_supported); 2748 adapter->phy.misc_params = 2749 le32_to_cpu(resp_phy_info->misc_params); 2750 2751 if (BE2_chip(adapter)) { 2752 adapter->phy.fixed_speeds_supported = 2753 BE_SUPPORTED_SPEED_10GBPS | 2754 BE_SUPPORTED_SPEED_1GBPS; 2755 } 2756 } 2757 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2758 err: 2759 spin_unlock_bh(&adapter->mcc_lock); 2760 return status; 2761 } 2762 2763 static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) 2764 { 2765 struct be_mcc_wrb *wrb; 2766 struct be_cmd_req_set_qos *req; 2767 int status; 2768 2769 spin_lock_bh(&adapter->mcc_lock); 2770 2771 wrb = wrb_from_mccq(adapter); 2772 if (!wrb) { 2773 status = -EBUSY; 2774 goto err; 2775 } 2776 2777 req = embedded_payload(wrb); 2778 2779 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2780 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); 2781 2782 req->hdr.domain = domain; 2783 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); 2784 req->max_bps_nic = cpu_to_le32(bps); 2785 2786 status = be_mcc_notify_wait(adapter); 2787 2788 err: 2789 spin_unlock_bh(&adapter->mcc_lock); 2790 return status; 2791 } 2792 2793 int be_cmd_get_cntl_attributes(struct be_adapter *adapter) 2794 { 2795 struct be_mcc_wrb *wrb; 2796 struct be_cmd_req_cntl_attribs *req; 2797 struct be_cmd_resp_cntl_attribs *resp; 2798 int status; 2799 int payload_len = max(sizeof(*req), sizeof(*resp)); 2800 struct mgmt_controller_attrib *attribs; 2801 struct be_dma_mem attribs_cmd; 2802 2803 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2804 return -1; 2805 2806 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2807 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2808 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2809 &attribs_cmd.dma); 2810 if (!attribs_cmd.va) { 2811 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 2812 status = -ENOMEM; 2813 goto err; 2814 } 2815 2816 wrb = wrb_from_mbox(adapter); 2817 if (!wrb) { 2818 status = -EBUSY; 2819 goto err; 2820 } 2821 req = attribs_cmd.va; 2822 2823 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2824 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, 2825 wrb, &attribs_cmd); 2826 2827 status = be_mbox_notify_wait(adapter); 2828 if (!status) { 2829 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); 2830 adapter->hba_port_num = attribs->hba_attribs.phy_port; 2831 } 2832 2833 err: 2834 mutex_unlock(&adapter->mbox_lock); 2835 if (attribs_cmd.va) 2836 pci_free_consistent(adapter->pdev, attribs_cmd.size, 2837 attribs_cmd.va, attribs_cmd.dma); 2838 return status; 2839 } 2840 2841 /* Uses mbox */ 2842 int be_cmd_req_native_mode(struct be_adapter *adapter) 2843 { 2844 struct be_mcc_wrb *wrb; 2845 struct be_cmd_req_set_func_cap *req; 2846 int status; 2847 2848 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2849 return -1; 2850 2851 wrb = wrb_from_mbox(adapter); 2852 if (!wrb) { 2853 status = -EBUSY; 2854 goto err; 2855 } 2856 2857 req = embedded_payload(wrb); 2858 2859 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2860 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, 2861 sizeof(*req), wrb, NULL); 2862 2863 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | 2864 CAPABILITY_BE3_NATIVE_ERX_API); 2865 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); 2866 2867 status = be_mbox_notify_wait(adapter); 2868 if (!status) { 2869 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); 2870 2871 adapter->be3_native = le32_to_cpu(resp->cap_flags) & 2872 CAPABILITY_BE3_NATIVE_ERX_API; 2873 if (!adapter->be3_native) 2874 dev_warn(&adapter->pdev->dev, 2875 "adapter not in advanced mode\n"); 2876 } 2877 err: 2878 mutex_unlock(&adapter->mbox_lock); 2879 return status; 2880 } 2881 2882 /* Get privilege(s) for a function */ 2883 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, 2884 u32 domain) 2885 { 2886 struct be_mcc_wrb *wrb; 2887 struct be_cmd_req_get_fn_privileges *req; 2888 int status; 2889 2890 spin_lock_bh(&adapter->mcc_lock); 2891 2892 wrb = wrb_from_mccq(adapter); 2893 if (!wrb) { 2894 status = -EBUSY; 2895 goto err; 2896 } 2897 2898 req = embedded_payload(wrb); 2899 2900 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2901 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req), 2902 wrb, NULL); 2903 2904 req->hdr.domain = domain; 2905 2906 status = be_mcc_notify_wait(adapter); 2907 if (!status) { 2908 struct be_cmd_resp_get_fn_privileges *resp = 2909 embedded_payload(wrb); 2910 2911 *privilege = le32_to_cpu(resp->privilege_mask); 2912 2913 /* In UMC mode FW does not return right privileges. 2914 * Override with correct privilege equivalent to PF. 2915 */ 2916 if (BEx_chip(adapter) && be_is_mc(adapter) && 2917 be_physfn(adapter)) 2918 *privilege = MAX_PRIVILEGES; 2919 } 2920 2921 err: 2922 spin_unlock_bh(&adapter->mcc_lock); 2923 return status; 2924 } 2925 2926 /* Set privilege(s) for a function */ 2927 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, 2928 u32 domain) 2929 { 2930 struct be_mcc_wrb *wrb; 2931 struct be_cmd_req_set_fn_privileges *req; 2932 int status; 2933 2934 spin_lock_bh(&adapter->mcc_lock); 2935 2936 wrb = wrb_from_mccq(adapter); 2937 if (!wrb) { 2938 status = -EBUSY; 2939 goto err; 2940 } 2941 2942 req = embedded_payload(wrb); 2943 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2944 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req), 2945 wrb, NULL); 2946 req->hdr.domain = domain; 2947 if (lancer_chip(adapter)) 2948 req->privileges_lancer = cpu_to_le32(privileges); 2949 else 2950 req->privileges = cpu_to_le32(privileges); 2951 2952 status = be_mcc_notify_wait(adapter); 2953 err: 2954 spin_unlock_bh(&adapter->mcc_lock); 2955 return status; 2956 } 2957 2958 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested. 2959 * pmac_id_valid: false => pmac_id or MAC address is requested. 2960 * If pmac_id is returned, pmac_id_valid is returned as true 2961 */ 2962 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 2963 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle, 2964 u8 domain) 2965 { 2966 struct be_mcc_wrb *wrb; 2967 struct be_cmd_req_get_mac_list *req; 2968 int status; 2969 int mac_count; 2970 struct be_dma_mem get_mac_list_cmd; 2971 int i; 2972 2973 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 2974 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 2975 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, 2976 get_mac_list_cmd.size, 2977 &get_mac_list_cmd.dma); 2978 2979 if (!get_mac_list_cmd.va) { 2980 dev_err(&adapter->pdev->dev, 2981 "Memory allocation failure during GET_MAC_LIST\n"); 2982 return -ENOMEM; 2983 } 2984 2985 spin_lock_bh(&adapter->mcc_lock); 2986 2987 wrb = wrb_from_mccq(adapter); 2988 if (!wrb) { 2989 status = -EBUSY; 2990 goto out; 2991 } 2992 2993 req = get_mac_list_cmd.va; 2994 2995 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2996 OPCODE_COMMON_GET_MAC_LIST, 2997 get_mac_list_cmd.size, wrb, &get_mac_list_cmd); 2998 req->hdr.domain = domain; 2999 req->mac_type = MAC_ADDRESS_TYPE_NETWORK; 3000 if (*pmac_id_valid) { 3001 req->mac_id = cpu_to_le32(*pmac_id); 3002 req->iface_id = cpu_to_le16(if_handle); 3003 req->perm_override = 0; 3004 } else { 3005 req->perm_override = 1; 3006 } 3007 3008 status = be_mcc_notify_wait(adapter); 3009 if (!status) { 3010 struct be_cmd_resp_get_mac_list *resp = 3011 get_mac_list_cmd.va; 3012 3013 if (*pmac_id_valid) { 3014 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr, 3015 ETH_ALEN); 3016 goto out; 3017 } 3018 3019 mac_count = resp->true_mac_count + resp->pseudo_mac_count; 3020 /* Mac list returned could contain one or more active mac_ids 3021 * or one or more true or pseudo permanent mac addresses. 3022 * If an active mac_id is present, return first active mac_id 3023 * found. 3024 */ 3025 for (i = 0; i < mac_count; i++) { 3026 struct get_list_macaddr *mac_entry; 3027 u16 mac_addr_size; 3028 u32 mac_id; 3029 3030 mac_entry = &resp->macaddr_list[i]; 3031 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size); 3032 /* mac_id is a 32 bit value and mac_addr size 3033 * is 6 bytes 3034 */ 3035 if (mac_addr_size == sizeof(u32)) { 3036 *pmac_id_valid = true; 3037 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; 3038 *pmac_id = le32_to_cpu(mac_id); 3039 goto out; 3040 } 3041 } 3042 /* If no active mac_id found, return first mac addr */ 3043 *pmac_id_valid = false; 3044 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 3045 ETH_ALEN); 3046 } 3047 3048 out: 3049 spin_unlock_bh(&adapter->mcc_lock); 3050 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, 3051 get_mac_list_cmd.va, get_mac_list_cmd.dma); 3052 return status; 3053 } 3054 3055 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, 3056 u8 *mac, u32 if_handle, bool active, u32 domain) 3057 { 3058 if (!active) 3059 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id, 3060 if_handle, domain); 3061 if (BEx_chip(adapter)) 3062 return be_cmd_mac_addr_query(adapter, mac, false, 3063 if_handle, curr_pmac_id); 3064 else 3065 /* Fetch the MAC address using pmac_id */ 3066 return be_cmd_get_mac_from_list(adapter, mac, &active, 3067 &curr_pmac_id, 3068 if_handle, domain); 3069 } 3070 3071 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) 3072 { 3073 int status; 3074 bool pmac_valid = false; 3075 3076 eth_zero_addr(mac); 3077 3078 if (BEx_chip(adapter)) { 3079 if (be_physfn(adapter)) 3080 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 3081 0); 3082 else 3083 status = be_cmd_mac_addr_query(adapter, mac, false, 3084 adapter->if_handle, 0); 3085 } else { 3086 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid, 3087 NULL, adapter->if_handle, 0); 3088 } 3089 3090 return status; 3091 } 3092 3093 /* Uses synchronous MCCQ */ 3094 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 3095 u8 mac_count, u32 domain) 3096 { 3097 struct be_mcc_wrb *wrb; 3098 struct be_cmd_req_set_mac_list *req; 3099 int status; 3100 struct be_dma_mem cmd; 3101 3102 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3103 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 3104 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 3105 &cmd.dma, GFP_KERNEL); 3106 if (!cmd.va) 3107 return -ENOMEM; 3108 3109 spin_lock_bh(&adapter->mcc_lock); 3110 3111 wrb = wrb_from_mccq(adapter); 3112 if (!wrb) { 3113 status = -EBUSY; 3114 goto err; 3115 } 3116 3117 req = cmd.va; 3118 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3119 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), 3120 wrb, &cmd); 3121 3122 req->hdr.domain = domain; 3123 req->mac_count = mac_count; 3124 if (mac_count) 3125 memcpy(req->mac, mac_array, ETH_ALEN*mac_count); 3126 3127 status = be_mcc_notify_wait(adapter); 3128 3129 err: 3130 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 3131 spin_unlock_bh(&adapter->mcc_lock); 3132 return status; 3133 } 3134 3135 /* Wrapper to delete any active MACs and provision the new mac. 3136 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the 3137 * current list are active. 3138 */ 3139 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom) 3140 { 3141 bool active_mac = false; 3142 u8 old_mac[ETH_ALEN]; 3143 u32 pmac_id; 3144 int status; 3145 3146 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, 3147 &pmac_id, if_id, dom); 3148 3149 if (!status && active_mac) 3150 be_cmd_pmac_del(adapter, if_id, pmac_id, dom); 3151 3152 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom); 3153 } 3154 3155 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 3156 u32 domain, u16 intf_id, u16 hsw_mode) 3157 { 3158 struct be_mcc_wrb *wrb; 3159 struct be_cmd_req_set_hsw_config *req; 3160 void *ctxt; 3161 int status; 3162 3163 spin_lock_bh(&adapter->mcc_lock); 3164 3165 wrb = wrb_from_mccq(adapter); 3166 if (!wrb) { 3167 status = -EBUSY; 3168 goto err; 3169 } 3170 3171 req = embedded_payload(wrb); 3172 ctxt = &req->context; 3173 3174 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3175 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, 3176 NULL); 3177 3178 req->hdr.domain = domain; 3179 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); 3180 if (pvid) { 3181 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); 3182 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); 3183 } 3184 if (!BEx_chip(adapter) && hsw_mode) { 3185 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, 3186 ctxt, adapter->hba_port_num); 3187 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); 3188 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type, 3189 ctxt, hsw_mode); 3190 } 3191 3192 be_dws_cpu_to_le(req->context, sizeof(req->context)); 3193 status = be_mcc_notify_wait(adapter); 3194 3195 err: 3196 spin_unlock_bh(&adapter->mcc_lock); 3197 return status; 3198 } 3199 3200 /* Get Hyper switch config */ 3201 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 3202 u32 domain, u16 intf_id, u8 *mode) 3203 { 3204 struct be_mcc_wrb *wrb; 3205 struct be_cmd_req_get_hsw_config *req; 3206 void *ctxt; 3207 int status; 3208 u16 vid; 3209 3210 spin_lock_bh(&adapter->mcc_lock); 3211 3212 wrb = wrb_from_mccq(adapter); 3213 if (!wrb) { 3214 status = -EBUSY; 3215 goto err; 3216 } 3217 3218 req = embedded_payload(wrb); 3219 ctxt = &req->context; 3220 3221 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3222 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, 3223 NULL); 3224 3225 req->hdr.domain = domain; 3226 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3227 ctxt, intf_id); 3228 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); 3229 3230 if (!BEx_chip(adapter) && mode) { 3231 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3232 ctxt, adapter->hba_port_num); 3233 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1); 3234 } 3235 be_dws_cpu_to_le(req->context, sizeof(req->context)); 3236 3237 status = be_mcc_notify_wait(adapter); 3238 if (!status) { 3239 struct be_cmd_resp_get_hsw_config *resp = 3240 embedded_payload(wrb); 3241 3242 be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); 3243 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3244 pvid, &resp->context); 3245 if (pvid) 3246 *pvid = le16_to_cpu(vid); 3247 if (mode) 3248 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3249 port_fwd_type, &resp->context); 3250 } 3251 3252 err: 3253 spin_unlock_bh(&adapter->mcc_lock); 3254 return status; 3255 } 3256 3257 static bool be_is_wol_excluded(struct be_adapter *adapter) 3258 { 3259 struct pci_dev *pdev = adapter->pdev; 3260 3261 if (!be_physfn(adapter)) 3262 return true; 3263 3264 switch (pdev->subsystem_device) { 3265 case OC_SUBSYS_DEVICE_ID1: 3266 case OC_SUBSYS_DEVICE_ID2: 3267 case OC_SUBSYS_DEVICE_ID3: 3268 case OC_SUBSYS_DEVICE_ID4: 3269 return true; 3270 default: 3271 return false; 3272 } 3273 } 3274 3275 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) 3276 { 3277 struct be_mcc_wrb *wrb; 3278 struct be_cmd_req_acpi_wol_magic_config_v1 *req; 3279 int status = 0; 3280 struct be_dma_mem cmd; 3281 3282 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 3283 CMD_SUBSYSTEM_ETH)) 3284 return -EPERM; 3285 3286 if (be_is_wol_excluded(adapter)) 3287 return status; 3288 3289 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3290 return -1; 3291 3292 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3293 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 3294 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3295 if (!cmd.va) { 3296 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3297 status = -ENOMEM; 3298 goto err; 3299 } 3300 3301 wrb = wrb_from_mbox(adapter); 3302 if (!wrb) { 3303 status = -EBUSY; 3304 goto err; 3305 } 3306 3307 req = cmd.va; 3308 3309 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 3310 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 3311 sizeof(*req), wrb, &cmd); 3312 3313 req->hdr.version = 1; 3314 req->query_options = BE_GET_WOL_CAP; 3315 3316 status = be_mbox_notify_wait(adapter); 3317 if (!status) { 3318 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; 3319 3320 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va; 3321 3322 adapter->wol_cap = resp->wol_settings; 3323 if (adapter->wol_cap & BE_WOL_CAP) 3324 adapter->wol_en = true; 3325 } 3326 err: 3327 mutex_unlock(&adapter->mbox_lock); 3328 if (cmd.va) 3329 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3330 return status; 3331 3332 } 3333 3334 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) 3335 { 3336 struct be_dma_mem extfat_cmd; 3337 struct be_fat_conf_params *cfgs; 3338 int status; 3339 int i, j; 3340 3341 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3342 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3343 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3344 &extfat_cmd.dma); 3345 if (!extfat_cmd.va) 3346 return -ENOMEM; 3347 3348 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); 3349 if (status) 3350 goto err; 3351 3352 cfgs = (struct be_fat_conf_params *) 3353 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); 3354 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { 3355 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); 3356 3357 for (j = 0; j < num_modes; j++) { 3358 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) 3359 cfgs->module[i].trace_lvl[j].dbg_lvl = 3360 cpu_to_le32(level); 3361 } 3362 } 3363 3364 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); 3365 err: 3366 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, 3367 extfat_cmd.dma); 3368 return status; 3369 } 3370 3371 int be_cmd_get_fw_log_level(struct be_adapter *adapter) 3372 { 3373 struct be_dma_mem extfat_cmd; 3374 struct be_fat_conf_params *cfgs; 3375 int status, j; 3376 int level = 0; 3377 3378 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3379 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3380 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3381 &extfat_cmd.dma); 3382 3383 if (!extfat_cmd.va) { 3384 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", 3385 __func__); 3386 goto err; 3387 } 3388 3389 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); 3390 if (!status) { 3391 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + 3392 sizeof(struct be_cmd_resp_hdr)); 3393 3394 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { 3395 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) 3396 level = cfgs->module[0].trace_lvl[j].dbg_lvl; 3397 } 3398 } 3399 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, 3400 extfat_cmd.dma); 3401 err: 3402 return level; 3403 } 3404 3405 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, 3406 struct be_dma_mem *cmd) 3407 { 3408 struct be_mcc_wrb *wrb; 3409 struct be_cmd_req_get_ext_fat_caps *req; 3410 int status; 3411 3412 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3413 return -1; 3414 3415 wrb = wrb_from_mbox(adapter); 3416 if (!wrb) { 3417 status = -EBUSY; 3418 goto err; 3419 } 3420 3421 req = cmd->va; 3422 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3423 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES, 3424 cmd->size, wrb, cmd); 3425 req->parameter_type = cpu_to_le32(1); 3426 3427 status = be_mbox_notify_wait(adapter); 3428 err: 3429 mutex_unlock(&adapter->mbox_lock); 3430 return status; 3431 } 3432 3433 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, 3434 struct be_dma_mem *cmd, 3435 struct be_fat_conf_params *configs) 3436 { 3437 struct be_mcc_wrb *wrb; 3438 struct be_cmd_req_set_ext_fat_caps *req; 3439 int status; 3440 3441 spin_lock_bh(&adapter->mcc_lock); 3442 3443 wrb = wrb_from_mccq(adapter); 3444 if (!wrb) { 3445 status = -EBUSY; 3446 goto err; 3447 } 3448 3449 req = cmd->va; 3450 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params)); 3451 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3452 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES, 3453 cmd->size, wrb, cmd); 3454 3455 status = be_mcc_notify_wait(adapter); 3456 err: 3457 spin_unlock_bh(&adapter->mcc_lock); 3458 return status; 3459 } 3460 3461 int be_cmd_query_port_name(struct be_adapter *adapter) 3462 { 3463 struct be_cmd_req_get_port_name *req; 3464 struct be_mcc_wrb *wrb; 3465 int status; 3466 3467 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3468 return -1; 3469 3470 wrb = wrb_from_mbox(adapter); 3471 req = embedded_payload(wrb); 3472 3473 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3474 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb, 3475 NULL); 3476 if (!BEx_chip(adapter)) 3477 req->hdr.version = 1; 3478 3479 status = be_mbox_notify_wait(adapter); 3480 if (!status) { 3481 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb); 3482 3483 adapter->port_name = resp->port_name[adapter->hba_port_num]; 3484 } else { 3485 adapter->port_name = adapter->hba_port_num + '0'; 3486 } 3487 3488 mutex_unlock(&adapter->mbox_lock); 3489 return status; 3490 } 3491 3492 /* Descriptor type */ 3493 enum { 3494 FUNC_DESC = 1, 3495 VFT_DESC = 2 3496 }; 3497 3498 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count, 3499 int desc_type) 3500 { 3501 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3502 struct be_nic_res_desc *nic; 3503 int i; 3504 3505 for (i = 0; i < desc_count; i++) { 3506 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || 3507 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) { 3508 nic = (struct be_nic_res_desc *)hdr; 3509 if (desc_type == FUNC_DESC || 3510 (desc_type == VFT_DESC && 3511 nic->flags & (1 << VFT_SHIFT))) 3512 return nic; 3513 } 3514 3515 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3516 hdr = (void *)hdr + hdr->desc_len; 3517 } 3518 return NULL; 3519 } 3520 3521 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count) 3522 { 3523 return be_get_nic_desc(buf, desc_count, VFT_DESC); 3524 } 3525 3526 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count) 3527 { 3528 return be_get_nic_desc(buf, desc_count, FUNC_DESC); 3529 } 3530 3531 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf, 3532 u32 desc_count) 3533 { 3534 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3535 struct be_pcie_res_desc *pcie; 3536 int i; 3537 3538 for (i = 0; i < desc_count; i++) { 3539 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || 3540 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) { 3541 pcie = (struct be_pcie_res_desc *)hdr; 3542 if (pcie->pf_num == devfn) 3543 return pcie; 3544 } 3545 3546 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3547 hdr = (void *)hdr + hdr->desc_len; 3548 } 3549 return NULL; 3550 } 3551 3552 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count) 3553 { 3554 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3555 int i; 3556 3557 for (i = 0; i < desc_count; i++) { 3558 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1) 3559 return (struct be_port_res_desc *)hdr; 3560 3561 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3562 hdr = (void *)hdr + hdr->desc_len; 3563 } 3564 return NULL; 3565 } 3566 3567 static void be_copy_nic_desc(struct be_resources *res, 3568 struct be_nic_res_desc *desc) 3569 { 3570 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count); 3571 res->max_vlans = le16_to_cpu(desc->vlan_count); 3572 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count); 3573 res->max_tx_qs = le16_to_cpu(desc->txq_count); 3574 res->max_rss_qs = le16_to_cpu(desc->rssq_count); 3575 res->max_rx_qs = le16_to_cpu(desc->rq_count); 3576 res->max_evt_qs = le16_to_cpu(desc->eq_count); 3577 res->max_cq_count = le16_to_cpu(desc->cq_count); 3578 res->max_iface_count = le16_to_cpu(desc->iface_count); 3579 res->max_mcc_count = le16_to_cpu(desc->mcc_count); 3580 /* Clear flags that driver is not interested in */ 3581 res->if_cap_flags = le32_to_cpu(desc->cap_flags) & 3582 BE_IF_CAP_FLAGS_WANT; 3583 } 3584 3585 /* Uses Mbox */ 3586 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) 3587 { 3588 struct be_mcc_wrb *wrb; 3589 struct be_cmd_req_get_func_config *req; 3590 int status; 3591 struct be_dma_mem cmd; 3592 3593 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3594 return -1; 3595 3596 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3597 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 3598 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3599 if (!cmd.va) { 3600 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3601 status = -ENOMEM; 3602 goto err; 3603 } 3604 3605 wrb = wrb_from_mbox(adapter); 3606 if (!wrb) { 3607 status = -EBUSY; 3608 goto err; 3609 } 3610 3611 req = cmd.va; 3612 3613 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3614 OPCODE_COMMON_GET_FUNC_CONFIG, 3615 cmd.size, wrb, &cmd); 3616 3617 if (skyhawk_chip(adapter)) 3618 req->hdr.version = 1; 3619 3620 status = be_mbox_notify_wait(adapter); 3621 if (!status) { 3622 struct be_cmd_resp_get_func_config *resp = cmd.va; 3623 u32 desc_count = le32_to_cpu(resp->desc_count); 3624 struct be_nic_res_desc *desc; 3625 3626 desc = be_get_func_nic_desc(resp->func_param, desc_count); 3627 if (!desc) { 3628 status = -EINVAL; 3629 goto err; 3630 } 3631 3632 adapter->pf_number = desc->pf_num; 3633 be_copy_nic_desc(res, desc); 3634 } 3635 err: 3636 mutex_unlock(&adapter->mbox_lock); 3637 if (cmd.va) 3638 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3639 return status; 3640 } 3641 3642 /* Will use MBOX only if MCCQ has not been created */ 3643 int be_cmd_get_profile_config(struct be_adapter *adapter, 3644 struct be_resources *res, u8 query, u8 domain) 3645 { 3646 struct be_cmd_resp_get_profile_config *resp; 3647 struct be_cmd_req_get_profile_config *req; 3648 struct be_nic_res_desc *vf_res; 3649 struct be_pcie_res_desc *pcie; 3650 struct be_port_res_desc *port; 3651 struct be_nic_res_desc *nic; 3652 struct be_mcc_wrb wrb = {0}; 3653 struct be_dma_mem cmd; 3654 u16 desc_count; 3655 int status; 3656 3657 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3658 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 3659 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3660 if (!cmd.va) 3661 return -ENOMEM; 3662 3663 req = cmd.va; 3664 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3665 OPCODE_COMMON_GET_PROFILE_CONFIG, 3666 cmd.size, &wrb, &cmd); 3667 3668 req->hdr.domain = domain; 3669 if (!lancer_chip(adapter)) 3670 req->hdr.version = 1; 3671 req->type = ACTIVE_PROFILE_TYPE; 3672 3673 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the 3674 * descriptors with all bits set to "1" for the fields which can be 3675 * modified using SET_PROFILE_CONFIG cmd. 3676 */ 3677 if (query == RESOURCE_MODIFIABLE) 3678 req->type |= QUERY_MODIFIABLE_FIELDS_TYPE; 3679 3680 status = be_cmd_notify_wait(adapter, &wrb); 3681 if (status) 3682 goto err; 3683 3684 resp = cmd.va; 3685 desc_count = le16_to_cpu(resp->desc_count); 3686 3687 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, 3688 desc_count); 3689 if (pcie) 3690 res->max_vfs = le16_to_cpu(pcie->num_vfs); 3691 3692 port = be_get_port_desc(resp->func_param, desc_count); 3693 if (port) 3694 adapter->mc_type = port->mc_type; 3695 3696 nic = be_get_func_nic_desc(resp->func_param, desc_count); 3697 if (nic) 3698 be_copy_nic_desc(res, nic); 3699 3700 vf_res = be_get_vft_desc(resp->func_param, desc_count); 3701 if (vf_res) 3702 res->vf_if_cap_flags = vf_res->cap_flags; 3703 err: 3704 if (cmd.va) 3705 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3706 return status; 3707 } 3708 3709 /* Will use MBOX only if MCCQ has not been created */ 3710 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, 3711 int size, int count, u8 version, u8 domain) 3712 { 3713 struct be_cmd_req_set_profile_config *req; 3714 struct be_mcc_wrb wrb = {0}; 3715 struct be_dma_mem cmd; 3716 int status; 3717 3718 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3719 cmd.size = sizeof(struct be_cmd_req_set_profile_config); 3720 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3721 if (!cmd.va) 3722 return -ENOMEM; 3723 3724 req = cmd.va; 3725 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3726 OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size, 3727 &wrb, &cmd); 3728 req->hdr.version = version; 3729 req->hdr.domain = domain; 3730 req->desc_count = cpu_to_le32(count); 3731 memcpy(req->desc, desc, size); 3732 3733 status = be_cmd_notify_wait(adapter, &wrb); 3734 3735 if (cmd.va) 3736 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3737 return status; 3738 } 3739 3740 /* Mark all fields invalid */ 3741 static void be_reset_nic_desc(struct be_nic_res_desc *nic) 3742 { 3743 memset(nic, 0, sizeof(*nic)); 3744 nic->unicast_mac_count = 0xFFFF; 3745 nic->mcc_count = 0xFFFF; 3746 nic->vlan_count = 0xFFFF; 3747 nic->mcast_mac_count = 0xFFFF; 3748 nic->txq_count = 0xFFFF; 3749 nic->rq_count = 0xFFFF; 3750 nic->rssq_count = 0xFFFF; 3751 nic->lro_count = 0xFFFF; 3752 nic->cq_count = 0xFFFF; 3753 nic->toe_conn_count = 0xFFFF; 3754 nic->eq_count = 0xFFFF; 3755 nic->iface_count = 0xFFFF; 3756 nic->link_param = 0xFF; 3757 nic->channel_id_param = cpu_to_le16(0xF000); 3758 nic->acpi_params = 0xFF; 3759 nic->wol_param = 0x0F; 3760 nic->tunnel_iface_count = 0xFFFF; 3761 nic->direct_tenant_iface_count = 0xFFFF; 3762 nic->bw_min = 0xFFFFFFFF; 3763 nic->bw_max = 0xFFFFFFFF; 3764 } 3765 3766 /* Mark all fields invalid */ 3767 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie) 3768 { 3769 memset(pcie, 0, sizeof(*pcie)); 3770 pcie->sriov_state = 0xFF; 3771 pcie->pf_state = 0xFF; 3772 pcie->pf_type = 0xFF; 3773 pcie->num_vfs = 0xFFFF; 3774 } 3775 3776 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, 3777 u8 domain) 3778 { 3779 struct be_nic_res_desc nic_desc; 3780 u32 bw_percent; 3781 u16 version = 0; 3782 3783 if (BE3_chip(adapter)) 3784 return be_cmd_set_qos(adapter, max_rate / 10, domain); 3785 3786 be_reset_nic_desc(&nic_desc); 3787 nic_desc.pf_num = adapter->pf_number; 3788 nic_desc.vf_num = domain; 3789 nic_desc.bw_min = 0; 3790 if (lancer_chip(adapter)) { 3791 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 3792 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; 3793 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) | 3794 (1 << NOSV_SHIFT); 3795 nic_desc.bw_max = cpu_to_le32(max_rate / 10); 3796 } else { 3797 version = 1; 3798 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; 3799 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3800 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); 3801 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100; 3802 nic_desc.bw_max = cpu_to_le32(bw_percent); 3803 } 3804 3805 return be_cmd_set_profile_config(adapter, &nic_desc, 3806 nic_desc.hdr.desc_len, 3807 1, version, domain); 3808 } 3809 3810 static void be_fill_vf_res_template(struct be_adapter *adapter, 3811 struct be_resources pool_res, 3812 u16 num_vfs, u16 num_vf_qs, 3813 struct be_nic_res_desc *nic_vft) 3814 { 3815 u32 vf_if_cap_flags = pool_res.vf_if_cap_flags; 3816 struct be_resources res_mod = {0}; 3817 3818 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd, 3819 * which are modifiable using SET_PROFILE_CONFIG cmd. 3820 */ 3821 be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0); 3822 3823 /* If RSS IFACE capability flags are modifiable for a VF, set the 3824 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if 3825 * more than 1 RSSQ is available for a VF. 3826 * Otherwise, provision only 1 queue pair for VF. 3827 */ 3828 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) { 3829 nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); 3830 if (num_vf_qs > 1) { 3831 vf_if_cap_flags |= BE_IF_FLAGS_RSS; 3832 if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS) 3833 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS; 3834 } else { 3835 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | 3836 BE_IF_FLAGS_DEFQ_RSS); 3837 } 3838 3839 nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); 3840 } else { 3841 num_vf_qs = 1; 3842 } 3843 3844 nic_vft->rq_count = cpu_to_le16(num_vf_qs); 3845 nic_vft->txq_count = cpu_to_le16(num_vf_qs); 3846 nic_vft->rssq_count = cpu_to_le16(num_vf_qs); 3847 nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count / 3848 (num_vfs + 1)); 3849 3850 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally 3851 * among the PF and it's VFs, if the fields are changeable 3852 */ 3853 if (res_mod.max_uc_mac == FIELD_MODIFIABLE) 3854 nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac / 3855 (num_vfs + 1)); 3856 3857 if (res_mod.max_vlans == FIELD_MODIFIABLE) 3858 nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans / 3859 (num_vfs + 1)); 3860 3861 if (res_mod.max_iface_count == FIELD_MODIFIABLE) 3862 nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count / 3863 (num_vfs + 1)); 3864 3865 if (res_mod.max_mcc_count == FIELD_MODIFIABLE) 3866 nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count / 3867 (num_vfs + 1)); 3868 } 3869 3870 int be_cmd_set_sriov_config(struct be_adapter *adapter, 3871 struct be_resources pool_res, u16 num_vfs, 3872 u16 num_vf_qs) 3873 { 3874 struct { 3875 struct be_pcie_res_desc pcie; 3876 struct be_nic_res_desc nic_vft; 3877 } __packed desc; 3878 3879 /* PF PCIE descriptor */ 3880 be_reset_pcie_desc(&desc.pcie); 3881 desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1; 3882 desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3883 desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); 3884 desc.pcie.pf_num = adapter->pdev->devfn; 3885 desc.pcie.sriov_state = num_vfs ? 1 : 0; 3886 desc.pcie.num_vfs = cpu_to_le16(num_vfs); 3887 3888 /* VF NIC Template descriptor */ 3889 be_reset_nic_desc(&desc.nic_vft); 3890 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; 3891 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3892 desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); 3893 desc.nic_vft.pf_num = adapter->pdev->devfn; 3894 desc.nic_vft.vf_num = 0; 3895 3896 be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs, 3897 &desc.nic_vft); 3898 3899 return be_cmd_set_profile_config(adapter, &desc, 3900 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0); 3901 } 3902 3903 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) 3904 { 3905 struct be_mcc_wrb *wrb; 3906 struct be_cmd_req_manage_iface_filters *req; 3907 int status; 3908 3909 if (iface == 0xFFFFFFFF) 3910 return -1; 3911 3912 spin_lock_bh(&adapter->mcc_lock); 3913 3914 wrb = wrb_from_mccq(adapter); 3915 if (!wrb) { 3916 status = -EBUSY; 3917 goto err; 3918 } 3919 req = embedded_payload(wrb); 3920 3921 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3922 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req), 3923 wrb, NULL); 3924 req->op = op; 3925 req->target_iface_id = cpu_to_le32(iface); 3926 3927 status = be_mcc_notify_wait(adapter); 3928 err: 3929 spin_unlock_bh(&adapter->mcc_lock); 3930 return status; 3931 } 3932 3933 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port) 3934 { 3935 struct be_port_res_desc port_desc; 3936 3937 memset(&port_desc, 0, sizeof(port_desc)); 3938 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1; 3939 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3940 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); 3941 port_desc.link_num = adapter->hba_port_num; 3942 if (port) { 3943 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) | 3944 (1 << RCVID_SHIFT); 3945 port_desc.nv_port = swab16(port); 3946 } else { 3947 port_desc.nv_flags = NV_TYPE_DISABLED; 3948 port_desc.nv_port = 0; 3949 } 3950 3951 return be_cmd_set_profile_config(adapter, &port_desc, 3952 RESOURCE_DESC_SIZE_V1, 1, 1, 0); 3953 } 3954 3955 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, 3956 int vf_num) 3957 { 3958 struct be_mcc_wrb *wrb; 3959 struct be_cmd_req_get_iface_list *req; 3960 struct be_cmd_resp_get_iface_list *resp; 3961 int status; 3962 3963 spin_lock_bh(&adapter->mcc_lock); 3964 3965 wrb = wrb_from_mccq(adapter); 3966 if (!wrb) { 3967 status = -EBUSY; 3968 goto err; 3969 } 3970 req = embedded_payload(wrb); 3971 3972 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3973 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp), 3974 wrb, NULL); 3975 req->hdr.domain = vf_num + 1; 3976 3977 status = be_mcc_notify_wait(adapter); 3978 if (!status) { 3979 resp = (struct be_cmd_resp_get_iface_list *)req; 3980 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id); 3981 } 3982 3983 err: 3984 spin_unlock_bh(&adapter->mcc_lock); 3985 return status; 3986 } 3987 3988 static int lancer_wait_idle(struct be_adapter *adapter) 3989 { 3990 #define SLIPORT_IDLE_TIMEOUT 30 3991 u32 reg_val; 3992 int status = 0, i; 3993 3994 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) { 3995 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET); 3996 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0) 3997 break; 3998 3999 ssleep(1); 4000 } 4001 4002 if (i == SLIPORT_IDLE_TIMEOUT) 4003 status = -1; 4004 4005 return status; 4006 } 4007 4008 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask) 4009 { 4010 int status = 0; 4011 4012 status = lancer_wait_idle(adapter); 4013 if (status) 4014 return status; 4015 4016 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET); 4017 4018 return status; 4019 } 4020 4021 /* Routine to check whether dump image is present or not */ 4022 bool dump_present(struct be_adapter *adapter) 4023 { 4024 u32 sliport_status = 0; 4025 4026 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 4027 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK); 4028 } 4029 4030 int lancer_initiate_dump(struct be_adapter *adapter) 4031 { 4032 struct device *dev = &adapter->pdev->dev; 4033 int status; 4034 4035 if (dump_present(adapter)) { 4036 dev_info(dev, "Previous dump not cleared, not forcing dump\n"); 4037 return -EEXIST; 4038 } 4039 4040 /* give firmware reset and diagnostic dump */ 4041 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK | 4042 PHYSDEV_CONTROL_DD_MASK); 4043 if (status < 0) { 4044 dev_err(dev, "FW reset failed\n"); 4045 return status; 4046 } 4047 4048 status = lancer_wait_idle(adapter); 4049 if (status) 4050 return status; 4051 4052 if (!dump_present(adapter)) { 4053 dev_err(dev, "FW dump not generated\n"); 4054 return -EIO; 4055 } 4056 4057 return 0; 4058 } 4059 4060 int lancer_delete_dump(struct be_adapter *adapter) 4061 { 4062 int status; 4063 4064 status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE); 4065 return be_cmd_status(status); 4066 } 4067 4068 /* Uses sync mcc */ 4069 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) 4070 { 4071 struct be_mcc_wrb *wrb; 4072 struct be_cmd_enable_disable_vf *req; 4073 int status; 4074 4075 if (BEx_chip(adapter)) 4076 return 0; 4077 4078 spin_lock_bh(&adapter->mcc_lock); 4079 4080 wrb = wrb_from_mccq(adapter); 4081 if (!wrb) { 4082 status = -EBUSY; 4083 goto err; 4084 } 4085 4086 req = embedded_payload(wrb); 4087 4088 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4089 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req), 4090 wrb, NULL); 4091 4092 req->hdr.domain = domain; 4093 req->enable = 1; 4094 status = be_mcc_notify_wait(adapter); 4095 err: 4096 spin_unlock_bh(&adapter->mcc_lock); 4097 return status; 4098 } 4099 4100 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable) 4101 { 4102 struct be_mcc_wrb *wrb; 4103 struct be_cmd_req_intr_set *req; 4104 int status; 4105 4106 if (mutex_lock_interruptible(&adapter->mbox_lock)) 4107 return -1; 4108 4109 wrb = wrb_from_mbox(adapter); 4110 4111 req = embedded_payload(wrb); 4112 4113 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4114 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req), 4115 wrb, NULL); 4116 4117 req->intr_enabled = intr_enable; 4118 4119 status = be_mbox_notify_wait(adapter); 4120 4121 mutex_unlock(&adapter->mbox_lock); 4122 return status; 4123 } 4124 4125 /* Uses MBOX */ 4126 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id) 4127 { 4128 struct be_cmd_req_get_active_profile *req; 4129 struct be_mcc_wrb *wrb; 4130 int status; 4131 4132 if (mutex_lock_interruptible(&adapter->mbox_lock)) 4133 return -1; 4134 4135 wrb = wrb_from_mbox(adapter); 4136 if (!wrb) { 4137 status = -EBUSY; 4138 goto err; 4139 } 4140 4141 req = embedded_payload(wrb); 4142 4143 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4144 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req), 4145 wrb, NULL); 4146 4147 status = be_mbox_notify_wait(adapter); 4148 if (!status) { 4149 struct be_cmd_resp_get_active_profile *resp = 4150 embedded_payload(wrb); 4151 4152 *profile_id = le16_to_cpu(resp->active_profile_id); 4153 } 4154 4155 err: 4156 mutex_unlock(&adapter->mbox_lock); 4157 return status; 4158 } 4159 4160 int be_cmd_set_logical_link_config(struct be_adapter *adapter, 4161 int link_state, u8 domain) 4162 { 4163 struct be_mcc_wrb *wrb; 4164 struct be_cmd_req_set_ll_link *req; 4165 int status; 4166 4167 if (BEx_chip(adapter) || lancer_chip(adapter)) 4168 return -EOPNOTSUPP; 4169 4170 spin_lock_bh(&adapter->mcc_lock); 4171 4172 wrb = wrb_from_mccq(adapter); 4173 if (!wrb) { 4174 status = -EBUSY; 4175 goto err; 4176 } 4177 4178 req = embedded_payload(wrb); 4179 4180 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4181 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG, 4182 sizeof(*req), wrb, NULL); 4183 4184 req->hdr.version = 1; 4185 req->hdr.domain = domain; 4186 4187 if (link_state == IFLA_VF_LINK_STATE_ENABLE) 4188 req->link_config |= 1; 4189 4190 if (link_state == IFLA_VF_LINK_STATE_AUTO) 4191 req->link_config |= 1 << PLINK_TRACK_SHIFT; 4192 4193 status = be_mcc_notify_wait(adapter); 4194 err: 4195 spin_unlock_bh(&adapter->mcc_lock); 4196 return status; 4197 } 4198 4199 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 4200 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 4201 { 4202 struct be_adapter *adapter = netdev_priv(netdev_handle); 4203 struct be_mcc_wrb *wrb; 4204 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload; 4205 struct be_cmd_req_hdr *req; 4206 struct be_cmd_resp_hdr *resp; 4207 int status; 4208 4209 spin_lock_bh(&adapter->mcc_lock); 4210 4211 wrb = wrb_from_mccq(adapter); 4212 if (!wrb) { 4213 status = -EBUSY; 4214 goto err; 4215 } 4216 req = embedded_payload(wrb); 4217 resp = embedded_payload(wrb); 4218 4219 be_wrb_cmd_hdr_prepare(req, hdr->subsystem, 4220 hdr->opcode, wrb_payload_size, wrb, NULL); 4221 memcpy(req, wrb_payload, wrb_payload_size); 4222 be_dws_cpu_to_le(req, wrb_payload_size); 4223 4224 status = be_mcc_notify_wait(adapter); 4225 if (cmd_status) 4226 *cmd_status = (status & 0xffff); 4227 if (ext_status) 4228 *ext_status = 0; 4229 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); 4230 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); 4231 err: 4232 spin_unlock_bh(&adapter->mcc_lock); 4233 return status; 4234 } 4235 EXPORT_SYMBOL(be_roce_mcc_cmd); 4236