1 /* 2 * Copyright (C) 2005 - 2014 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Contact Information: 11 * linux-drivers@emulex.com 12 * 13 * Emulex 14 * 3333 Susan Street 15 * Costa Mesa, CA 92626 16 */ 17 18 #include <linux/module.h> 19 #include "be.h" 20 #include "be_cmds.h" 21 22 static char *be_port_misconfig_evt_desc[] = { 23 "A valid SFP module detected", 24 "Optics faulted/ incorrectly installed/ not installed.", 25 "Optics of two types installed.", 26 "Incompatible optics.", 27 "Unknown port SFP status" 28 }; 29 30 static char *be_port_misconfig_remedy_desc[] = { 31 "", 32 "Reseat optics. If issue not resolved, replace", 33 "Remove one optic or install matching pair of optics", 34 "Replace with compatible optics for card to function", 35 "" 36 }; 37 38 static struct be_cmd_priv_map cmd_priv_map[] = { 39 { 40 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 41 CMD_SUBSYSTEM_ETH, 42 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 43 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 44 }, 45 { 46 OPCODE_COMMON_GET_FLOW_CONTROL, 47 CMD_SUBSYSTEM_COMMON, 48 BE_PRIV_LNKQUERY | BE_PRIV_VHADM | 49 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 50 }, 51 { 52 OPCODE_COMMON_SET_FLOW_CONTROL, 53 CMD_SUBSYSTEM_COMMON, 54 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 55 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 56 }, 57 { 58 OPCODE_ETH_GET_PPORT_STATS, 59 CMD_SUBSYSTEM_ETH, 60 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 61 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 62 }, 63 { 64 OPCODE_COMMON_GET_PHY_DETAILS, 65 CMD_SUBSYSTEM_COMMON, 66 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 67 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 68 } 69 }; 70 71 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) 72 { 73 int i; 74 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map); 75 u32 cmd_privileges = adapter->cmd_privileges; 76 77 for (i = 0; i < num_entries; i++) 78 if (opcode == cmd_priv_map[i].opcode && 79 subsystem == cmd_priv_map[i].subsystem) 80 if (!(cmd_privileges & cmd_priv_map[i].priv_mask)) 81 return false; 82 83 return true; 84 } 85 86 static inline void *embedded_payload(struct be_mcc_wrb *wrb) 87 { 88 return wrb->payload.embedded_payload; 89 } 90 91 static void be_mcc_notify(struct be_adapter *adapter) 92 { 93 struct be_queue_info *mccq = &adapter->mcc_obj.q; 94 u32 val = 0; 95 96 if (be_error(adapter)) 97 return; 98 99 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 100 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 101 102 wmb(); 103 iowrite32(val, adapter->db + DB_MCCQ_OFFSET); 104 } 105 106 /* To check if valid bit is set, check the entire word as we don't know 107 * the endianness of the data (old entry is host endian while a new entry is 108 * little endian) */ 109 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) 110 { 111 u32 flags; 112 113 if (compl->flags != 0) { 114 flags = le32_to_cpu(compl->flags); 115 if (flags & CQE_FLAGS_VALID_MASK) { 116 compl->flags = flags; 117 return true; 118 } 119 } 120 return false; 121 } 122 123 /* Need to reset the entire word that houses the valid bit */ 124 static inline void be_mcc_compl_use(struct be_mcc_compl *compl) 125 { 126 compl->flags = 0; 127 } 128 129 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1) 130 { 131 unsigned long addr; 132 133 addr = tag1; 134 addr = ((addr << 16) << 16) | tag0; 135 return (void *)addr; 136 } 137 138 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status) 139 { 140 if (base_status == MCC_STATUS_NOT_SUPPORTED || 141 base_status == MCC_STATUS_ILLEGAL_REQUEST || 142 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES || 143 (opcode == OPCODE_COMMON_WRITE_FLASHROM && 144 (base_status == MCC_STATUS_ILLEGAL_FIELD || 145 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH))) 146 return true; 147 else 148 return false; 149 } 150 151 /* Place holder for all the async MCC cmds wherein the caller is not in a busy 152 * loop (has not issued be_mcc_notify_wait()) 153 */ 154 static void be_async_cmd_process(struct be_adapter *adapter, 155 struct be_mcc_compl *compl, 156 struct be_cmd_resp_hdr *resp_hdr) 157 { 158 enum mcc_base_status base_status = base_status(compl->status); 159 u8 opcode = 0, subsystem = 0; 160 161 if (resp_hdr) { 162 opcode = resp_hdr->opcode; 163 subsystem = resp_hdr->subsystem; 164 } 165 166 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && 167 subsystem == CMD_SUBSYSTEM_LOWLEVEL) { 168 complete(&adapter->et_cmd_compl); 169 return; 170 } 171 172 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM || 173 opcode == OPCODE_COMMON_WRITE_OBJECT) && 174 subsystem == CMD_SUBSYSTEM_COMMON) { 175 adapter->flash_status = compl->status; 176 complete(&adapter->et_cmd_compl); 177 return; 178 } 179 180 if ((opcode == OPCODE_ETH_GET_STATISTICS || 181 opcode == OPCODE_ETH_GET_PPORT_STATS) && 182 subsystem == CMD_SUBSYSTEM_ETH && 183 base_status == MCC_STATUS_SUCCESS) { 184 be_parse_stats(adapter); 185 adapter->stats_cmd_sent = false; 186 return; 187 } 188 189 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && 190 subsystem == CMD_SUBSYSTEM_COMMON) { 191 if (base_status == MCC_STATUS_SUCCESS) { 192 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 193 (void *)resp_hdr; 194 adapter->drv_stats.be_on_die_temperature = 195 resp->on_die_temperature; 196 } else { 197 adapter->be_get_temp_freq = 0; 198 } 199 return; 200 } 201 } 202 203 static int be_mcc_compl_process(struct be_adapter *adapter, 204 struct be_mcc_compl *compl) 205 { 206 enum mcc_base_status base_status; 207 enum mcc_addl_status addl_status; 208 struct be_cmd_resp_hdr *resp_hdr; 209 u8 opcode = 0, subsystem = 0; 210 211 /* Just swap the status to host endian; mcc tag is opaquely copied 212 * from mcc_wrb */ 213 be_dws_le_to_cpu(compl, 4); 214 215 base_status = base_status(compl->status); 216 addl_status = addl_status(compl->status); 217 218 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); 219 if (resp_hdr) { 220 opcode = resp_hdr->opcode; 221 subsystem = resp_hdr->subsystem; 222 } 223 224 be_async_cmd_process(adapter, compl, resp_hdr); 225 226 if (base_status != MCC_STATUS_SUCCESS && 227 !be_skip_err_log(opcode, base_status, addl_status)) { 228 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 229 dev_warn(&adapter->pdev->dev, 230 "VF is not privileged to issue opcode %d-%d\n", 231 opcode, subsystem); 232 } else { 233 dev_err(&adapter->pdev->dev, 234 "opcode %d-%d failed:status %d-%d\n", 235 opcode, subsystem, base_status, addl_status); 236 } 237 } 238 return compl->status; 239 } 240 241 /* Link state evt is a string of bytes; no need for endian swapping */ 242 static void be_async_link_state_process(struct be_adapter *adapter, 243 struct be_mcc_compl *compl) 244 { 245 struct be_async_event_link_state *evt = 246 (struct be_async_event_link_state *)compl; 247 248 /* When link status changes, link speed must be re-queried from FW */ 249 adapter->phy.link_speed = -1; 250 251 /* On BEx the FW does not send a separate link status 252 * notification for physical and logical link. 253 * On other chips just process the logical link 254 * status notification 255 */ 256 if (!BEx_chip(adapter) && 257 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK)) 258 return; 259 260 /* For the initial link status do not rely on the ASYNC event as 261 * it may not be received in some cases. 262 */ 263 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) 264 be_link_status_update(adapter, 265 evt->port_link_status & LINK_STATUS_MASK); 266 } 267 268 static void be_async_port_misconfig_event_process(struct be_adapter *adapter, 269 struct be_mcc_compl *compl) 270 { 271 struct be_async_event_misconfig_port *evt = 272 (struct be_async_event_misconfig_port *)compl; 273 u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1); 274 struct device *dev = &adapter->pdev->dev; 275 u8 port_misconfig_evt; 276 277 port_misconfig_evt = 278 ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff); 279 280 /* Log an error message that would allow a user to determine 281 * whether the SFPs have an issue 282 */ 283 dev_info(dev, "Port %c: %s %s", adapter->port_name, 284 be_port_misconfig_evt_desc[port_misconfig_evt], 285 be_port_misconfig_remedy_desc[port_misconfig_evt]); 286 287 if (port_misconfig_evt == INCOMPATIBLE_SFP) 288 adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP; 289 } 290 291 /* Grp5 CoS Priority evt */ 292 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 293 struct be_mcc_compl *compl) 294 { 295 struct be_async_event_grp5_cos_priority *evt = 296 (struct be_async_event_grp5_cos_priority *)compl; 297 298 if (evt->valid) { 299 adapter->vlan_prio_bmap = evt->available_priority_bmap; 300 adapter->recommended_prio &= ~VLAN_PRIO_MASK; 301 adapter->recommended_prio = 302 evt->reco_default_priority << VLAN_PRIO_SHIFT; 303 } 304 } 305 306 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */ 307 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 308 struct be_mcc_compl *compl) 309 { 310 struct be_async_event_grp5_qos_link_speed *evt = 311 (struct be_async_event_grp5_qos_link_speed *)compl; 312 313 if (adapter->phy.link_speed >= 0 && 314 evt->physical_port == adapter->port_num) 315 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10; 316 } 317 318 /*Grp5 PVID evt*/ 319 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 320 struct be_mcc_compl *compl) 321 { 322 struct be_async_event_grp5_pvid_state *evt = 323 (struct be_async_event_grp5_pvid_state *)compl; 324 325 if (evt->enabled) { 326 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 327 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid); 328 } else { 329 adapter->pvid = 0; 330 } 331 } 332 333 static void be_async_grp5_evt_process(struct be_adapter *adapter, 334 struct be_mcc_compl *compl) 335 { 336 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) & 337 ASYNC_EVENT_TYPE_MASK; 338 339 switch (event_type) { 340 case ASYNC_EVENT_COS_PRIORITY: 341 be_async_grp5_cos_priority_process(adapter, compl); 342 break; 343 case ASYNC_EVENT_QOS_SPEED: 344 be_async_grp5_qos_speed_process(adapter, compl); 345 break; 346 case ASYNC_EVENT_PVID_STATE: 347 be_async_grp5_pvid_state_process(adapter, compl); 348 break; 349 default: 350 break; 351 } 352 } 353 354 static void be_async_dbg_evt_process(struct be_adapter *adapter, 355 struct be_mcc_compl *cmp) 356 { 357 u8 event_type = 0; 358 struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp; 359 360 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & 361 ASYNC_EVENT_TYPE_MASK; 362 363 switch (event_type) { 364 case ASYNC_DEBUG_EVENT_TYPE_QNQ: 365 if (evt->valid) 366 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag); 367 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD; 368 break; 369 default: 370 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n", 371 event_type); 372 break; 373 } 374 } 375 376 static void be_async_sliport_evt_process(struct be_adapter *adapter, 377 struct be_mcc_compl *cmp) 378 { 379 u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & 380 ASYNC_EVENT_TYPE_MASK; 381 382 if (event_type == ASYNC_EVENT_PORT_MISCONFIG) 383 be_async_port_misconfig_event_process(adapter, cmp); 384 } 385 386 static inline bool is_link_state_evt(u32 flags) 387 { 388 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 389 ASYNC_EVENT_CODE_LINK_STATE; 390 } 391 392 static inline bool is_grp5_evt(u32 flags) 393 { 394 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 395 ASYNC_EVENT_CODE_GRP_5; 396 } 397 398 static inline bool is_dbg_evt(u32 flags) 399 { 400 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 401 ASYNC_EVENT_CODE_QNQ; 402 } 403 404 static inline bool is_sliport_evt(u32 flags) 405 { 406 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 407 ASYNC_EVENT_CODE_SLIPORT; 408 } 409 410 static void be_mcc_event_process(struct be_adapter *adapter, 411 struct be_mcc_compl *compl) 412 { 413 if (is_link_state_evt(compl->flags)) 414 be_async_link_state_process(adapter, compl); 415 else if (is_grp5_evt(compl->flags)) 416 be_async_grp5_evt_process(adapter, compl); 417 else if (is_dbg_evt(compl->flags)) 418 be_async_dbg_evt_process(adapter, compl); 419 else if (is_sliport_evt(compl->flags)) 420 be_async_sliport_evt_process(adapter, compl); 421 } 422 423 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 424 { 425 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; 426 struct be_mcc_compl *compl = queue_tail_node(mcc_cq); 427 428 if (be_mcc_compl_is_new(compl)) { 429 queue_tail_inc(mcc_cq); 430 return compl; 431 } 432 return NULL; 433 } 434 435 void be_async_mcc_enable(struct be_adapter *adapter) 436 { 437 spin_lock_bh(&adapter->mcc_cq_lock); 438 439 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); 440 adapter->mcc_obj.rearm_cq = true; 441 442 spin_unlock_bh(&adapter->mcc_cq_lock); 443 } 444 445 void be_async_mcc_disable(struct be_adapter *adapter) 446 { 447 spin_lock_bh(&adapter->mcc_cq_lock); 448 449 adapter->mcc_obj.rearm_cq = false; 450 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); 451 452 spin_unlock_bh(&adapter->mcc_cq_lock); 453 } 454 455 int be_process_mcc(struct be_adapter *adapter) 456 { 457 struct be_mcc_compl *compl; 458 int num = 0, status = 0; 459 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 460 461 spin_lock(&adapter->mcc_cq_lock); 462 463 while ((compl = be_mcc_compl_get(adapter))) { 464 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 465 be_mcc_event_process(adapter, compl); 466 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 467 status = be_mcc_compl_process(adapter, compl); 468 atomic_dec(&mcc_obj->q.used); 469 } 470 be_mcc_compl_use(compl); 471 num++; 472 } 473 474 if (num) 475 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); 476 477 spin_unlock(&adapter->mcc_cq_lock); 478 return status; 479 } 480 481 /* Wait till no more pending mcc requests are present */ 482 static int be_mcc_wait_compl(struct be_adapter *adapter) 483 { 484 #define mcc_timeout 120000 /* 12s timeout */ 485 int i, status = 0; 486 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 487 488 for (i = 0; i < mcc_timeout; i++) { 489 if (be_error(adapter)) 490 return -EIO; 491 492 local_bh_disable(); 493 status = be_process_mcc(adapter); 494 local_bh_enable(); 495 496 if (atomic_read(&mcc_obj->q.used) == 0) 497 break; 498 udelay(100); 499 } 500 if (i == mcc_timeout) { 501 dev_err(&adapter->pdev->dev, "FW not responding\n"); 502 adapter->fw_timeout = true; 503 return -EIO; 504 } 505 return status; 506 } 507 508 /* Notify MCC requests and wait for completion */ 509 static int be_mcc_notify_wait(struct be_adapter *adapter) 510 { 511 int status; 512 struct be_mcc_wrb *wrb; 513 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 514 u16 index = mcc_obj->q.head; 515 struct be_cmd_resp_hdr *resp; 516 517 index_dec(&index, mcc_obj->q.len); 518 wrb = queue_index_node(&mcc_obj->q, index); 519 520 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1); 521 522 be_mcc_notify(adapter); 523 524 status = be_mcc_wait_compl(adapter); 525 if (status == -EIO) 526 goto out; 527 528 status = (resp->base_status | 529 ((resp->addl_status & CQE_ADDL_STATUS_MASK) << 530 CQE_ADDL_STATUS_SHIFT)); 531 out: 532 return status; 533 } 534 535 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) 536 { 537 int msecs = 0; 538 u32 ready; 539 540 do { 541 if (be_error(adapter)) 542 return -EIO; 543 544 ready = ioread32(db); 545 if (ready == 0xffffffff) 546 return -1; 547 548 ready &= MPU_MAILBOX_DB_RDY_MASK; 549 if (ready) 550 break; 551 552 if (msecs > 4000) { 553 dev_err(&adapter->pdev->dev, "FW not responding\n"); 554 adapter->fw_timeout = true; 555 be_detect_error(adapter); 556 return -1; 557 } 558 559 msleep(1); 560 msecs++; 561 } while (true); 562 563 return 0; 564 } 565 566 /* 567 * Insert the mailbox address into the doorbell in two steps 568 * Polls on the mbox doorbell till a command completion (or a timeout) occurs 569 */ 570 static int be_mbox_notify_wait(struct be_adapter *adapter) 571 { 572 int status; 573 u32 val = 0; 574 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; 575 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 576 struct be_mcc_mailbox *mbox = mbox_mem->va; 577 struct be_mcc_compl *compl = &mbox->compl; 578 579 /* wait for ready to be set */ 580 status = be_mbox_db_ready_wait(adapter, db); 581 if (status != 0) 582 return status; 583 584 val |= MPU_MAILBOX_DB_HI_MASK; 585 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ 586 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 587 iowrite32(val, db); 588 589 /* wait for ready to be set */ 590 status = be_mbox_db_ready_wait(adapter, db); 591 if (status != 0) 592 return status; 593 594 val = 0; 595 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ 596 val |= (u32)(mbox_mem->dma >> 4) << 2; 597 iowrite32(val, db); 598 599 status = be_mbox_db_ready_wait(adapter, db); 600 if (status != 0) 601 return status; 602 603 /* A cq entry has been made now */ 604 if (be_mcc_compl_is_new(compl)) { 605 status = be_mcc_compl_process(adapter, &mbox->compl); 606 be_mcc_compl_use(compl); 607 if (status) 608 return status; 609 } else { 610 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); 611 return -1; 612 } 613 return 0; 614 } 615 616 static u16 be_POST_stage_get(struct be_adapter *adapter) 617 { 618 u32 sem; 619 620 if (BEx_chip(adapter)) 621 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx); 622 else 623 pci_read_config_dword(adapter->pdev, 624 SLIPORT_SEMAPHORE_OFFSET_SH, &sem); 625 626 return sem & POST_STAGE_MASK; 627 } 628 629 static int lancer_wait_ready(struct be_adapter *adapter) 630 { 631 #define SLIPORT_READY_TIMEOUT 30 632 u32 sliport_status; 633 int i; 634 635 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { 636 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 637 if (sliport_status & SLIPORT_STATUS_RDY_MASK) 638 return 0; 639 640 if (sliport_status & SLIPORT_STATUS_ERR_MASK && 641 !(sliport_status & SLIPORT_STATUS_RN_MASK)) 642 return -EIO; 643 644 msleep(1000); 645 } 646 647 return sliport_status ? : -1; 648 } 649 650 int be_fw_wait_ready(struct be_adapter *adapter) 651 { 652 u16 stage; 653 int status, timeout = 0; 654 struct device *dev = &adapter->pdev->dev; 655 656 if (lancer_chip(adapter)) { 657 status = lancer_wait_ready(adapter); 658 if (status) { 659 stage = status; 660 goto err; 661 } 662 return 0; 663 } 664 665 do { 666 /* There's no means to poll POST state on BE2/3 VFs */ 667 if (BEx_chip(adapter) && be_virtfn(adapter)) 668 return 0; 669 670 stage = be_POST_stage_get(adapter); 671 if (stage == POST_STAGE_ARMFW_RDY) 672 return 0; 673 674 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout); 675 if (msleep_interruptible(2000)) { 676 dev_err(dev, "Waiting for POST aborted\n"); 677 return -EINTR; 678 } 679 timeout += 2; 680 } while (timeout < 60); 681 682 err: 683 dev_err(dev, "POST timeout; stage=%#x\n", stage); 684 return -ETIMEDOUT; 685 } 686 687 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) 688 { 689 return &wrb->payload.sgl[0]; 690 } 691 692 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr) 693 { 694 wrb->tag0 = addr & 0xFFFFFFFF; 695 wrb->tag1 = upper_32_bits(addr); 696 } 697 698 /* Don't touch the hdr after it's prepared */ 699 /* mem will be NULL for embedded commands */ 700 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 701 u8 subsystem, u8 opcode, int cmd_len, 702 struct be_mcc_wrb *wrb, 703 struct be_dma_mem *mem) 704 { 705 struct be_sge *sge; 706 707 req_hdr->opcode = opcode; 708 req_hdr->subsystem = subsystem; 709 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 710 req_hdr->version = 0; 711 fill_wrb_tags(wrb, (ulong) req_hdr); 712 wrb->payload_length = cmd_len; 713 if (mem) { 714 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << 715 MCC_WRB_SGE_CNT_SHIFT; 716 sge = nonembedded_sgl(wrb); 717 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); 718 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); 719 sge->len = cpu_to_le32(mem->size); 720 } else 721 wrb->embedded |= MCC_WRB_EMBEDDED_MASK; 722 be_dws_cpu_to_le(wrb, 8); 723 } 724 725 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 726 struct be_dma_mem *mem) 727 { 728 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); 729 u64 dma = (u64)mem->dma; 730 731 for (i = 0; i < buf_pages; i++) { 732 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); 733 pages[i].hi = cpu_to_le32(upper_32_bits(dma)); 734 dma += PAGE_SIZE_4K; 735 } 736 } 737 738 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) 739 { 740 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 741 struct be_mcc_wrb *wrb 742 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; 743 memset(wrb, 0, sizeof(*wrb)); 744 return wrb; 745 } 746 747 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) 748 { 749 struct be_queue_info *mccq = &adapter->mcc_obj.q; 750 struct be_mcc_wrb *wrb; 751 752 if (!mccq->created) 753 return NULL; 754 755 if (atomic_read(&mccq->used) >= mccq->len) 756 return NULL; 757 758 wrb = queue_head_node(mccq); 759 queue_head_inc(mccq); 760 atomic_inc(&mccq->used); 761 memset(wrb, 0, sizeof(*wrb)); 762 return wrb; 763 } 764 765 static bool use_mcc(struct be_adapter *adapter) 766 { 767 return adapter->mcc_obj.q.created; 768 } 769 770 /* Must be used only in process context */ 771 static int be_cmd_lock(struct be_adapter *adapter) 772 { 773 if (use_mcc(adapter)) { 774 spin_lock_bh(&adapter->mcc_lock); 775 return 0; 776 } else { 777 return mutex_lock_interruptible(&adapter->mbox_lock); 778 } 779 } 780 781 /* Must be used only in process context */ 782 static void be_cmd_unlock(struct be_adapter *adapter) 783 { 784 if (use_mcc(adapter)) 785 spin_unlock_bh(&adapter->mcc_lock); 786 else 787 return mutex_unlock(&adapter->mbox_lock); 788 } 789 790 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter, 791 struct be_mcc_wrb *wrb) 792 { 793 struct be_mcc_wrb *dest_wrb; 794 795 if (use_mcc(adapter)) { 796 dest_wrb = wrb_from_mccq(adapter); 797 if (!dest_wrb) 798 return NULL; 799 } else { 800 dest_wrb = wrb_from_mbox(adapter); 801 } 802 803 memcpy(dest_wrb, wrb, sizeof(*wrb)); 804 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK)) 805 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb)); 806 807 return dest_wrb; 808 } 809 810 /* Must be used only in process context */ 811 static int be_cmd_notify_wait(struct be_adapter *adapter, 812 struct be_mcc_wrb *wrb) 813 { 814 struct be_mcc_wrb *dest_wrb; 815 int status; 816 817 status = be_cmd_lock(adapter); 818 if (status) 819 return status; 820 821 dest_wrb = be_cmd_copy(adapter, wrb); 822 if (!dest_wrb) 823 return -EBUSY; 824 825 if (use_mcc(adapter)) 826 status = be_mcc_notify_wait(adapter); 827 else 828 status = be_mbox_notify_wait(adapter); 829 830 if (!status) 831 memcpy(wrb, dest_wrb, sizeof(*wrb)); 832 833 be_cmd_unlock(adapter); 834 return status; 835 } 836 837 /* Tell fw we're about to start firing cmds by writing a 838 * special pattern across the wrb hdr; uses mbox 839 */ 840 int be_cmd_fw_init(struct be_adapter *adapter) 841 { 842 u8 *wrb; 843 int status; 844 845 if (lancer_chip(adapter)) 846 return 0; 847 848 if (mutex_lock_interruptible(&adapter->mbox_lock)) 849 return -1; 850 851 wrb = (u8 *)wrb_from_mbox(adapter); 852 *wrb++ = 0xFF; 853 *wrb++ = 0x12; 854 *wrb++ = 0x34; 855 *wrb++ = 0xFF; 856 *wrb++ = 0xFF; 857 *wrb++ = 0x56; 858 *wrb++ = 0x78; 859 *wrb = 0xFF; 860 861 status = be_mbox_notify_wait(adapter); 862 863 mutex_unlock(&adapter->mbox_lock); 864 return status; 865 } 866 867 /* Tell fw we're done with firing cmds by writing a 868 * special pattern across the wrb hdr; uses mbox 869 */ 870 int be_cmd_fw_clean(struct be_adapter *adapter) 871 { 872 u8 *wrb; 873 int status; 874 875 if (lancer_chip(adapter)) 876 return 0; 877 878 if (mutex_lock_interruptible(&adapter->mbox_lock)) 879 return -1; 880 881 wrb = (u8 *)wrb_from_mbox(adapter); 882 *wrb++ = 0xFF; 883 *wrb++ = 0xAA; 884 *wrb++ = 0xBB; 885 *wrb++ = 0xFF; 886 *wrb++ = 0xFF; 887 *wrb++ = 0xCC; 888 *wrb++ = 0xDD; 889 *wrb = 0xFF; 890 891 status = be_mbox_notify_wait(adapter); 892 893 mutex_unlock(&adapter->mbox_lock); 894 return status; 895 } 896 897 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo) 898 { 899 struct be_mcc_wrb *wrb; 900 struct be_cmd_req_eq_create *req; 901 struct be_dma_mem *q_mem = &eqo->q.dma_mem; 902 int status, ver = 0; 903 904 if (mutex_lock_interruptible(&adapter->mbox_lock)) 905 return -1; 906 907 wrb = wrb_from_mbox(adapter); 908 req = embedded_payload(wrb); 909 910 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 911 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, 912 NULL); 913 914 /* Support for EQ_CREATEv2 available only SH-R onwards */ 915 if (!(BEx_chip(adapter) || lancer_chip(adapter))) 916 ver = 2; 917 918 req->hdr.version = ver; 919 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 920 921 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); 922 /* 4byte eqe*/ 923 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); 924 AMAP_SET_BITS(struct amap_eq_context, count, req->context, 925 __ilog2_u32(eqo->q.len / 256)); 926 be_dws_cpu_to_le(req->context, sizeof(req->context)); 927 928 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 929 930 status = be_mbox_notify_wait(adapter); 931 if (!status) { 932 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); 933 934 eqo->q.id = le16_to_cpu(resp->eq_id); 935 eqo->msix_idx = 936 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx; 937 eqo->q.created = true; 938 } 939 940 mutex_unlock(&adapter->mbox_lock); 941 return status; 942 } 943 944 /* Use MCC */ 945 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 946 bool permanent, u32 if_handle, u32 pmac_id) 947 { 948 struct be_mcc_wrb *wrb; 949 struct be_cmd_req_mac_query *req; 950 int status; 951 952 spin_lock_bh(&adapter->mcc_lock); 953 954 wrb = wrb_from_mccq(adapter); 955 if (!wrb) { 956 status = -EBUSY; 957 goto err; 958 } 959 req = embedded_payload(wrb); 960 961 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 962 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, 963 NULL); 964 req->type = MAC_ADDRESS_TYPE_NETWORK; 965 if (permanent) { 966 req->permanent = 1; 967 } else { 968 req->if_id = cpu_to_le16((u16)if_handle); 969 req->pmac_id = cpu_to_le32(pmac_id); 970 req->permanent = 0; 971 } 972 973 status = be_mcc_notify_wait(adapter); 974 if (!status) { 975 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); 976 977 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 978 } 979 980 err: 981 spin_unlock_bh(&adapter->mcc_lock); 982 return status; 983 } 984 985 /* Uses synchronous MCCQ */ 986 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 987 u32 if_id, u32 *pmac_id, u32 domain) 988 { 989 struct be_mcc_wrb *wrb; 990 struct be_cmd_req_pmac_add *req; 991 int status; 992 993 spin_lock_bh(&adapter->mcc_lock); 994 995 wrb = wrb_from_mccq(adapter); 996 if (!wrb) { 997 status = -EBUSY; 998 goto err; 999 } 1000 req = embedded_payload(wrb); 1001 1002 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1003 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, 1004 NULL); 1005 1006 req->hdr.domain = domain; 1007 req->if_id = cpu_to_le32(if_id); 1008 memcpy(req->mac_address, mac_addr, ETH_ALEN); 1009 1010 status = be_mcc_notify_wait(adapter); 1011 if (!status) { 1012 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); 1013 1014 *pmac_id = le32_to_cpu(resp->pmac_id); 1015 } 1016 1017 err: 1018 spin_unlock_bh(&adapter->mcc_lock); 1019 1020 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) 1021 status = -EPERM; 1022 1023 return status; 1024 } 1025 1026 /* Uses synchronous MCCQ */ 1027 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) 1028 { 1029 struct be_mcc_wrb *wrb; 1030 struct be_cmd_req_pmac_del *req; 1031 int status; 1032 1033 if (pmac_id == -1) 1034 return 0; 1035 1036 spin_lock_bh(&adapter->mcc_lock); 1037 1038 wrb = wrb_from_mccq(adapter); 1039 if (!wrb) { 1040 status = -EBUSY; 1041 goto err; 1042 } 1043 req = embedded_payload(wrb); 1044 1045 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1046 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), 1047 wrb, NULL); 1048 1049 req->hdr.domain = dom; 1050 req->if_id = cpu_to_le32(if_id); 1051 req->pmac_id = cpu_to_le32(pmac_id); 1052 1053 status = be_mcc_notify_wait(adapter); 1054 1055 err: 1056 spin_unlock_bh(&adapter->mcc_lock); 1057 return status; 1058 } 1059 1060 /* Uses Mbox */ 1061 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, 1062 struct be_queue_info *eq, bool no_delay, int coalesce_wm) 1063 { 1064 struct be_mcc_wrb *wrb; 1065 struct be_cmd_req_cq_create *req; 1066 struct be_dma_mem *q_mem = &cq->dma_mem; 1067 void *ctxt; 1068 int status; 1069 1070 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1071 return -1; 1072 1073 wrb = wrb_from_mbox(adapter); 1074 req = embedded_payload(wrb); 1075 ctxt = &req->context; 1076 1077 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1078 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, 1079 NULL); 1080 1081 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1082 1083 if (BEx_chip(adapter)) { 1084 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 1085 coalesce_wm); 1086 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 1087 ctxt, no_delay); 1088 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, 1089 __ilog2_u32(cq->len / 256)); 1090 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 1091 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 1092 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); 1093 } else { 1094 req->hdr.version = 2; 1095 req->page_size = 1; /* 1 for 4K */ 1096 1097 /* coalesce-wm field in this cmd is not relevant to Lancer. 1098 * Lancer uses COMMON_MODIFY_CQ to set this field 1099 */ 1100 if (!lancer_chip(adapter)) 1101 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, 1102 ctxt, coalesce_wm); 1103 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1104 no_delay); 1105 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, 1106 __ilog2_u32(cq->len / 256)); 1107 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); 1108 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); 1109 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); 1110 } 1111 1112 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1113 1114 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1115 1116 status = be_mbox_notify_wait(adapter); 1117 if (!status) { 1118 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); 1119 1120 cq->id = le16_to_cpu(resp->cq_id); 1121 cq->created = true; 1122 } 1123 1124 mutex_unlock(&adapter->mbox_lock); 1125 1126 return status; 1127 } 1128 1129 static u32 be_encoded_q_len(int q_len) 1130 { 1131 u32 len_encoded = fls(q_len); /* log2(len) + 1 */ 1132 1133 if (len_encoded == 16) 1134 len_encoded = 0; 1135 return len_encoded; 1136 } 1137 1138 static int be_cmd_mccq_ext_create(struct be_adapter *adapter, 1139 struct be_queue_info *mccq, 1140 struct be_queue_info *cq) 1141 { 1142 struct be_mcc_wrb *wrb; 1143 struct be_cmd_req_mcc_ext_create *req; 1144 struct be_dma_mem *q_mem = &mccq->dma_mem; 1145 void *ctxt; 1146 int status; 1147 1148 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1149 return -1; 1150 1151 wrb = wrb_from_mbox(adapter); 1152 req = embedded_payload(wrb); 1153 ctxt = &req->context; 1154 1155 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1156 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, 1157 NULL); 1158 1159 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1160 if (BEx_chip(adapter)) { 1161 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1162 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1163 be_encoded_q_len(mccq->len)); 1164 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1165 } else { 1166 req->hdr.version = 1; 1167 req->cq_id = cpu_to_le16(cq->id); 1168 1169 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt, 1170 be_encoded_q_len(mccq->len)); 1171 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1); 1172 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id, 1173 ctxt, cq->id); 1174 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid, 1175 ctxt, 1); 1176 } 1177 1178 /* Subscribe to Link State, Sliport Event and Group 5 Events 1179 * (bits 1, 5 and 17 set) 1180 */ 1181 req->async_event_bitmap[0] = 1182 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) | 1183 BIT(ASYNC_EVENT_CODE_GRP_5) | 1184 BIT(ASYNC_EVENT_CODE_QNQ) | 1185 BIT(ASYNC_EVENT_CODE_SLIPORT)); 1186 1187 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1188 1189 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1190 1191 status = be_mbox_notify_wait(adapter); 1192 if (!status) { 1193 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1194 1195 mccq->id = le16_to_cpu(resp->id); 1196 mccq->created = true; 1197 } 1198 mutex_unlock(&adapter->mbox_lock); 1199 1200 return status; 1201 } 1202 1203 static int be_cmd_mccq_org_create(struct be_adapter *adapter, 1204 struct be_queue_info *mccq, 1205 struct be_queue_info *cq) 1206 { 1207 struct be_mcc_wrb *wrb; 1208 struct be_cmd_req_mcc_create *req; 1209 struct be_dma_mem *q_mem = &mccq->dma_mem; 1210 void *ctxt; 1211 int status; 1212 1213 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1214 return -1; 1215 1216 wrb = wrb_from_mbox(adapter); 1217 req = embedded_payload(wrb); 1218 ctxt = &req->context; 1219 1220 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1221 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, 1222 NULL); 1223 1224 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1225 1226 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1227 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1228 be_encoded_q_len(mccq->len)); 1229 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1230 1231 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1232 1233 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1234 1235 status = be_mbox_notify_wait(adapter); 1236 if (!status) { 1237 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1238 1239 mccq->id = le16_to_cpu(resp->id); 1240 mccq->created = true; 1241 } 1242 1243 mutex_unlock(&adapter->mbox_lock); 1244 return status; 1245 } 1246 1247 int be_cmd_mccq_create(struct be_adapter *adapter, 1248 struct be_queue_info *mccq, struct be_queue_info *cq) 1249 { 1250 int status; 1251 1252 status = be_cmd_mccq_ext_create(adapter, mccq, cq); 1253 if (status && BEx_chip(adapter)) { 1254 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " 1255 "or newer to avoid conflicting priorities between NIC " 1256 "and FCoE traffic"); 1257 status = be_cmd_mccq_org_create(adapter, mccq, cq); 1258 } 1259 return status; 1260 } 1261 1262 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) 1263 { 1264 struct be_mcc_wrb wrb = {0}; 1265 struct be_cmd_req_eth_tx_create *req; 1266 struct be_queue_info *txq = &txo->q; 1267 struct be_queue_info *cq = &txo->cq; 1268 struct be_dma_mem *q_mem = &txq->dma_mem; 1269 int status, ver = 0; 1270 1271 req = embedded_payload(&wrb); 1272 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1273 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); 1274 1275 if (lancer_chip(adapter)) { 1276 req->hdr.version = 1; 1277 } else if (BEx_chip(adapter)) { 1278 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) 1279 req->hdr.version = 2; 1280 } else { /* For SH */ 1281 req->hdr.version = 2; 1282 } 1283 1284 if (req->hdr.version > 0) 1285 req->if_id = cpu_to_le16(adapter->if_handle); 1286 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1287 req->ulp_num = BE_ULP1_NUM; 1288 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 1289 req->cq_id = cpu_to_le16(cq->id); 1290 req->queue_size = be_encoded_q_len(txq->len); 1291 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1292 ver = req->hdr.version; 1293 1294 status = be_cmd_notify_wait(adapter, &wrb); 1295 if (!status) { 1296 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb); 1297 1298 txq->id = le16_to_cpu(resp->cid); 1299 if (ver == 2) 1300 txo->db_offset = le32_to_cpu(resp->db_offset); 1301 else 1302 txo->db_offset = DB_TXULP1_OFFSET; 1303 txq->created = true; 1304 } 1305 1306 return status; 1307 } 1308 1309 /* Uses MCC */ 1310 int be_cmd_rxq_create(struct be_adapter *adapter, 1311 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 1312 u32 if_id, u32 rss, u8 *rss_id) 1313 { 1314 struct be_mcc_wrb *wrb; 1315 struct be_cmd_req_eth_rx_create *req; 1316 struct be_dma_mem *q_mem = &rxq->dma_mem; 1317 int status; 1318 1319 spin_lock_bh(&adapter->mcc_lock); 1320 1321 wrb = wrb_from_mccq(adapter); 1322 if (!wrb) { 1323 status = -EBUSY; 1324 goto err; 1325 } 1326 req = embedded_payload(wrb); 1327 1328 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1329 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); 1330 1331 req->cq_id = cpu_to_le16(cq_id); 1332 req->frag_size = fls(frag_size) - 1; 1333 req->num_pages = 2; 1334 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1335 req->interface_id = cpu_to_le32(if_id); 1336 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE); 1337 req->rss_queue = cpu_to_le32(rss); 1338 1339 status = be_mcc_notify_wait(adapter); 1340 if (!status) { 1341 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); 1342 1343 rxq->id = le16_to_cpu(resp->id); 1344 rxq->created = true; 1345 *rss_id = resp->rss_id; 1346 } 1347 1348 err: 1349 spin_unlock_bh(&adapter->mcc_lock); 1350 return status; 1351 } 1352 1353 /* Generic destroyer function for all types of queues 1354 * Uses Mbox 1355 */ 1356 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1357 int queue_type) 1358 { 1359 struct be_mcc_wrb *wrb; 1360 struct be_cmd_req_q_destroy *req; 1361 u8 subsys = 0, opcode = 0; 1362 int status; 1363 1364 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1365 return -1; 1366 1367 wrb = wrb_from_mbox(adapter); 1368 req = embedded_payload(wrb); 1369 1370 switch (queue_type) { 1371 case QTYPE_EQ: 1372 subsys = CMD_SUBSYSTEM_COMMON; 1373 opcode = OPCODE_COMMON_EQ_DESTROY; 1374 break; 1375 case QTYPE_CQ: 1376 subsys = CMD_SUBSYSTEM_COMMON; 1377 opcode = OPCODE_COMMON_CQ_DESTROY; 1378 break; 1379 case QTYPE_TXQ: 1380 subsys = CMD_SUBSYSTEM_ETH; 1381 opcode = OPCODE_ETH_TX_DESTROY; 1382 break; 1383 case QTYPE_RXQ: 1384 subsys = CMD_SUBSYSTEM_ETH; 1385 opcode = OPCODE_ETH_RX_DESTROY; 1386 break; 1387 case QTYPE_MCCQ: 1388 subsys = CMD_SUBSYSTEM_COMMON; 1389 opcode = OPCODE_COMMON_MCC_DESTROY; 1390 break; 1391 default: 1392 BUG(); 1393 } 1394 1395 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, 1396 NULL); 1397 req->id = cpu_to_le16(q->id); 1398 1399 status = be_mbox_notify_wait(adapter); 1400 q->created = false; 1401 1402 mutex_unlock(&adapter->mbox_lock); 1403 return status; 1404 } 1405 1406 /* Uses MCC */ 1407 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) 1408 { 1409 struct be_mcc_wrb *wrb; 1410 struct be_cmd_req_q_destroy *req; 1411 int status; 1412 1413 spin_lock_bh(&adapter->mcc_lock); 1414 1415 wrb = wrb_from_mccq(adapter); 1416 if (!wrb) { 1417 status = -EBUSY; 1418 goto err; 1419 } 1420 req = embedded_payload(wrb); 1421 1422 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1423 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); 1424 req->id = cpu_to_le16(q->id); 1425 1426 status = be_mcc_notify_wait(adapter); 1427 q->created = false; 1428 1429 err: 1430 spin_unlock_bh(&adapter->mcc_lock); 1431 return status; 1432 } 1433 1434 /* Create an rx filtering policy configuration on an i/f 1435 * Will use MBOX only if MCCQ has not been created. 1436 */ 1437 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 1438 u32 *if_handle, u32 domain) 1439 { 1440 struct be_mcc_wrb wrb = {0}; 1441 struct be_cmd_req_if_create *req; 1442 int status; 1443 1444 req = embedded_payload(&wrb); 1445 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1446 OPCODE_COMMON_NTWK_INTERFACE_CREATE, 1447 sizeof(*req), &wrb, NULL); 1448 req->hdr.domain = domain; 1449 req->capability_flags = cpu_to_le32(cap_flags); 1450 req->enable_flags = cpu_to_le32(en_flags); 1451 req->pmac_invalid = true; 1452 1453 status = be_cmd_notify_wait(adapter, &wrb); 1454 if (!status) { 1455 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb); 1456 1457 *if_handle = le32_to_cpu(resp->interface_id); 1458 1459 /* Hack to retrieve VF's pmac-id on BE3 */ 1460 if (BE3_chip(adapter) && !be_physfn(adapter)) 1461 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id); 1462 } 1463 return status; 1464 } 1465 1466 /* Uses MCCQ */ 1467 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) 1468 { 1469 struct be_mcc_wrb *wrb; 1470 struct be_cmd_req_if_destroy *req; 1471 int status; 1472 1473 if (interface_id == -1) 1474 return 0; 1475 1476 spin_lock_bh(&adapter->mcc_lock); 1477 1478 wrb = wrb_from_mccq(adapter); 1479 if (!wrb) { 1480 status = -EBUSY; 1481 goto err; 1482 } 1483 req = embedded_payload(wrb); 1484 1485 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1486 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, 1487 sizeof(*req), wrb, NULL); 1488 req->hdr.domain = domain; 1489 req->interface_id = cpu_to_le32(interface_id); 1490 1491 status = be_mcc_notify_wait(adapter); 1492 err: 1493 spin_unlock_bh(&adapter->mcc_lock); 1494 return status; 1495 } 1496 1497 /* Get stats is a non embedded command: the request is not embedded inside 1498 * WRB but is a separate dma memory block 1499 * Uses asynchronous MCC 1500 */ 1501 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) 1502 { 1503 struct be_mcc_wrb *wrb; 1504 struct be_cmd_req_hdr *hdr; 1505 int status = 0; 1506 1507 spin_lock_bh(&adapter->mcc_lock); 1508 1509 wrb = wrb_from_mccq(adapter); 1510 if (!wrb) { 1511 status = -EBUSY; 1512 goto err; 1513 } 1514 hdr = nonemb_cmd->va; 1515 1516 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1517 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, 1518 nonemb_cmd); 1519 1520 /* version 1 of the cmd is not supported only by BE2 */ 1521 if (BE2_chip(adapter)) 1522 hdr->version = 0; 1523 if (BE3_chip(adapter) || lancer_chip(adapter)) 1524 hdr->version = 1; 1525 else 1526 hdr->version = 2; 1527 1528 be_mcc_notify(adapter); 1529 adapter->stats_cmd_sent = true; 1530 1531 err: 1532 spin_unlock_bh(&adapter->mcc_lock); 1533 return status; 1534 } 1535 1536 /* Lancer Stats */ 1537 int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1538 struct be_dma_mem *nonemb_cmd) 1539 { 1540 struct be_mcc_wrb *wrb; 1541 struct lancer_cmd_req_pport_stats *req; 1542 int status = 0; 1543 1544 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS, 1545 CMD_SUBSYSTEM_ETH)) 1546 return -EPERM; 1547 1548 spin_lock_bh(&adapter->mcc_lock); 1549 1550 wrb = wrb_from_mccq(adapter); 1551 if (!wrb) { 1552 status = -EBUSY; 1553 goto err; 1554 } 1555 req = nonemb_cmd->va; 1556 1557 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1558 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, 1559 wrb, nonemb_cmd); 1560 1561 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); 1562 req->cmd_params.params.reset_stats = 0; 1563 1564 be_mcc_notify(adapter); 1565 adapter->stats_cmd_sent = true; 1566 1567 err: 1568 spin_unlock_bh(&adapter->mcc_lock); 1569 return status; 1570 } 1571 1572 static int be_mac_to_link_speed(int mac_speed) 1573 { 1574 switch (mac_speed) { 1575 case PHY_LINK_SPEED_ZERO: 1576 return 0; 1577 case PHY_LINK_SPEED_10MBPS: 1578 return 10; 1579 case PHY_LINK_SPEED_100MBPS: 1580 return 100; 1581 case PHY_LINK_SPEED_1GBPS: 1582 return 1000; 1583 case PHY_LINK_SPEED_10GBPS: 1584 return 10000; 1585 case PHY_LINK_SPEED_20GBPS: 1586 return 20000; 1587 case PHY_LINK_SPEED_25GBPS: 1588 return 25000; 1589 case PHY_LINK_SPEED_40GBPS: 1590 return 40000; 1591 } 1592 return 0; 1593 } 1594 1595 /* Uses synchronous mcc 1596 * Returns link_speed in Mbps 1597 */ 1598 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, 1599 u8 *link_status, u32 dom) 1600 { 1601 struct be_mcc_wrb *wrb; 1602 struct be_cmd_req_link_status *req; 1603 int status; 1604 1605 spin_lock_bh(&adapter->mcc_lock); 1606 1607 if (link_status) 1608 *link_status = LINK_DOWN; 1609 1610 wrb = wrb_from_mccq(adapter); 1611 if (!wrb) { 1612 status = -EBUSY; 1613 goto err; 1614 } 1615 req = embedded_payload(wrb); 1616 1617 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1618 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, 1619 sizeof(*req), wrb, NULL); 1620 1621 /* version 1 of the cmd is not supported only by BE2 */ 1622 if (!BE2_chip(adapter)) 1623 req->hdr.version = 1; 1624 1625 req->hdr.domain = dom; 1626 1627 status = be_mcc_notify_wait(adapter); 1628 if (!status) { 1629 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 1630 1631 if (link_speed) { 1632 *link_speed = resp->link_speed ? 1633 le16_to_cpu(resp->link_speed) * 10 : 1634 be_mac_to_link_speed(resp->mac_speed); 1635 1636 if (!resp->logical_link_status) 1637 *link_speed = 0; 1638 } 1639 if (link_status) 1640 *link_status = resp->logical_link_status; 1641 } 1642 1643 err: 1644 spin_unlock_bh(&adapter->mcc_lock); 1645 return status; 1646 } 1647 1648 /* Uses synchronous mcc */ 1649 int be_cmd_get_die_temperature(struct be_adapter *adapter) 1650 { 1651 struct be_mcc_wrb *wrb; 1652 struct be_cmd_req_get_cntl_addnl_attribs *req; 1653 int status = 0; 1654 1655 spin_lock_bh(&adapter->mcc_lock); 1656 1657 wrb = wrb_from_mccq(adapter); 1658 if (!wrb) { 1659 status = -EBUSY; 1660 goto err; 1661 } 1662 req = embedded_payload(wrb); 1663 1664 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1665 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, 1666 sizeof(*req), wrb, NULL); 1667 1668 be_mcc_notify(adapter); 1669 1670 err: 1671 spin_unlock_bh(&adapter->mcc_lock); 1672 return status; 1673 } 1674 1675 /* Uses synchronous mcc */ 1676 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) 1677 { 1678 struct be_mcc_wrb *wrb; 1679 struct be_cmd_req_get_fat *req; 1680 int status; 1681 1682 spin_lock_bh(&adapter->mcc_lock); 1683 1684 wrb = wrb_from_mccq(adapter); 1685 if (!wrb) { 1686 status = -EBUSY; 1687 goto err; 1688 } 1689 req = embedded_payload(wrb); 1690 1691 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1692 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, 1693 NULL); 1694 req->fat_operation = cpu_to_le32(QUERY_FAT); 1695 status = be_mcc_notify_wait(adapter); 1696 if (!status) { 1697 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); 1698 1699 if (log_size && resp->log_size) 1700 *log_size = le32_to_cpu(resp->log_size) - 1701 sizeof(u32); 1702 } 1703 err: 1704 spin_unlock_bh(&adapter->mcc_lock); 1705 return status; 1706 } 1707 1708 int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) 1709 { 1710 struct be_dma_mem get_fat_cmd; 1711 struct be_mcc_wrb *wrb; 1712 struct be_cmd_req_get_fat *req; 1713 u32 offset = 0, total_size, buf_size, 1714 log_offset = sizeof(u32), payload_len; 1715 int status = 0; 1716 1717 if (buf_len == 0) 1718 return -EIO; 1719 1720 total_size = buf_len; 1721 1722 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1723 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, 1724 get_fat_cmd.size, 1725 &get_fat_cmd.dma); 1726 if (!get_fat_cmd.va) { 1727 dev_err(&adapter->pdev->dev, 1728 "Memory allocation failure while reading FAT data\n"); 1729 return -ENOMEM; 1730 } 1731 1732 spin_lock_bh(&adapter->mcc_lock); 1733 1734 while (total_size) { 1735 buf_size = min(total_size, (u32)60*1024); 1736 total_size -= buf_size; 1737 1738 wrb = wrb_from_mccq(adapter); 1739 if (!wrb) { 1740 status = -EBUSY; 1741 goto err; 1742 } 1743 req = get_fat_cmd.va; 1744 1745 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; 1746 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1747 OPCODE_COMMON_MANAGE_FAT, payload_len, 1748 wrb, &get_fat_cmd); 1749 1750 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); 1751 req->read_log_offset = cpu_to_le32(log_offset); 1752 req->read_log_length = cpu_to_le32(buf_size); 1753 req->data_buffer_size = cpu_to_le32(buf_size); 1754 1755 status = be_mcc_notify_wait(adapter); 1756 if (!status) { 1757 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1758 1759 memcpy(buf + offset, 1760 resp->data_buffer, 1761 le32_to_cpu(resp->read_log_length)); 1762 } else { 1763 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); 1764 goto err; 1765 } 1766 offset += buf_size; 1767 log_offset += buf_size; 1768 } 1769 err: 1770 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1771 get_fat_cmd.va, get_fat_cmd.dma); 1772 spin_unlock_bh(&adapter->mcc_lock); 1773 return status; 1774 } 1775 1776 /* Uses synchronous mcc */ 1777 int be_cmd_get_fw_ver(struct be_adapter *adapter) 1778 { 1779 struct be_mcc_wrb *wrb; 1780 struct be_cmd_req_get_fw_version *req; 1781 int status; 1782 1783 spin_lock_bh(&adapter->mcc_lock); 1784 1785 wrb = wrb_from_mccq(adapter); 1786 if (!wrb) { 1787 status = -EBUSY; 1788 goto err; 1789 } 1790 1791 req = embedded_payload(wrb); 1792 1793 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1794 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, 1795 NULL); 1796 status = be_mcc_notify_wait(adapter); 1797 if (!status) { 1798 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); 1799 1800 strlcpy(adapter->fw_ver, resp->firmware_version_string, 1801 sizeof(adapter->fw_ver)); 1802 strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string, 1803 sizeof(adapter->fw_on_flash)); 1804 } 1805 err: 1806 spin_unlock_bh(&adapter->mcc_lock); 1807 return status; 1808 } 1809 1810 /* set the EQ delay interval of an EQ to specified value 1811 * Uses async mcc 1812 */ 1813 static int __be_cmd_modify_eqd(struct be_adapter *adapter, 1814 struct be_set_eqd *set_eqd, int num) 1815 { 1816 struct be_mcc_wrb *wrb; 1817 struct be_cmd_req_modify_eq_delay *req; 1818 int status = 0, i; 1819 1820 spin_lock_bh(&adapter->mcc_lock); 1821 1822 wrb = wrb_from_mccq(adapter); 1823 if (!wrb) { 1824 status = -EBUSY; 1825 goto err; 1826 } 1827 req = embedded_payload(wrb); 1828 1829 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1830 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, 1831 NULL); 1832 1833 req->num_eq = cpu_to_le32(num); 1834 for (i = 0; i < num; i++) { 1835 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); 1836 req->set_eqd[i].phase = 0; 1837 req->set_eqd[i].delay_multiplier = 1838 cpu_to_le32(set_eqd[i].delay_multiplier); 1839 } 1840 1841 be_mcc_notify(adapter); 1842 err: 1843 spin_unlock_bh(&adapter->mcc_lock); 1844 return status; 1845 } 1846 1847 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, 1848 int num) 1849 { 1850 int num_eqs, i = 0; 1851 1852 if (lancer_chip(adapter) && num > 8) { 1853 while (num) { 1854 num_eqs = min(num, 8); 1855 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); 1856 i += num_eqs; 1857 num -= num_eqs; 1858 } 1859 } else { 1860 __be_cmd_modify_eqd(adapter, set_eqd, num); 1861 } 1862 1863 return 0; 1864 } 1865 1866 /* Uses sycnhronous mcc */ 1867 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1868 u32 num) 1869 { 1870 struct be_mcc_wrb *wrb; 1871 struct be_cmd_req_vlan_config *req; 1872 int status; 1873 1874 spin_lock_bh(&adapter->mcc_lock); 1875 1876 wrb = wrb_from_mccq(adapter); 1877 if (!wrb) { 1878 status = -EBUSY; 1879 goto err; 1880 } 1881 req = embedded_payload(wrb); 1882 1883 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1884 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), 1885 wrb, NULL); 1886 1887 req->interface_id = if_id; 1888 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1889 req->num_vlan = num; 1890 memcpy(req->normal_vlan, vtag_array, 1891 req->num_vlan * sizeof(vtag_array[0])); 1892 1893 status = be_mcc_notify_wait(adapter); 1894 err: 1895 spin_unlock_bh(&adapter->mcc_lock); 1896 return status; 1897 } 1898 1899 static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1900 { 1901 struct be_mcc_wrb *wrb; 1902 struct be_dma_mem *mem = &adapter->rx_filter; 1903 struct be_cmd_req_rx_filter *req = mem->va; 1904 int status; 1905 1906 spin_lock_bh(&adapter->mcc_lock); 1907 1908 wrb = wrb_from_mccq(adapter); 1909 if (!wrb) { 1910 status = -EBUSY; 1911 goto err; 1912 } 1913 memset(req, 0, sizeof(*req)); 1914 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1915 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), 1916 wrb, mem); 1917 1918 req->if_id = cpu_to_le32(adapter->if_handle); 1919 req->if_flags_mask = cpu_to_le32(flags); 1920 req->if_flags = (value == ON) ? req->if_flags_mask : 0; 1921 1922 if (flags & BE_IF_FLAGS_MULTICAST) { 1923 struct netdev_hw_addr *ha; 1924 int i = 0; 1925 1926 /* Reset mcast promisc mode if already set by setting mask 1927 * and not setting flags field 1928 */ 1929 req->if_flags_mask |= 1930 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS & 1931 be_if_cap_flags(adapter)); 1932 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); 1933 netdev_for_each_mc_addr(ha, adapter->netdev) 1934 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1935 } 1936 1937 status = be_mcc_notify_wait(adapter); 1938 err: 1939 spin_unlock_bh(&adapter->mcc_lock); 1940 return status; 1941 } 1942 1943 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1944 { 1945 struct device *dev = &adapter->pdev->dev; 1946 1947 if ((flags & be_if_cap_flags(adapter)) != flags) { 1948 dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags); 1949 dev_warn(dev, "Interface is capable of 0x%x flags only\n", 1950 be_if_cap_flags(adapter)); 1951 } 1952 flags &= be_if_cap_flags(adapter); 1953 1954 return __be_cmd_rx_filter(adapter, flags, value); 1955 } 1956 1957 /* Uses synchrounous mcc */ 1958 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) 1959 { 1960 struct be_mcc_wrb *wrb; 1961 struct be_cmd_req_set_flow_control *req; 1962 int status; 1963 1964 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL, 1965 CMD_SUBSYSTEM_COMMON)) 1966 return -EPERM; 1967 1968 spin_lock_bh(&adapter->mcc_lock); 1969 1970 wrb = wrb_from_mccq(adapter); 1971 if (!wrb) { 1972 status = -EBUSY; 1973 goto err; 1974 } 1975 req = embedded_payload(wrb); 1976 1977 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1978 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), 1979 wrb, NULL); 1980 1981 req->hdr.version = 1; 1982 req->tx_flow_control = cpu_to_le16((u16)tx_fc); 1983 req->rx_flow_control = cpu_to_le16((u16)rx_fc); 1984 1985 status = be_mcc_notify_wait(adapter); 1986 1987 err: 1988 spin_unlock_bh(&adapter->mcc_lock); 1989 1990 if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED) 1991 return -EOPNOTSUPP; 1992 1993 return status; 1994 } 1995 1996 /* Uses sycn mcc */ 1997 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) 1998 { 1999 struct be_mcc_wrb *wrb; 2000 struct be_cmd_req_get_flow_control *req; 2001 int status; 2002 2003 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL, 2004 CMD_SUBSYSTEM_COMMON)) 2005 return -EPERM; 2006 2007 spin_lock_bh(&adapter->mcc_lock); 2008 2009 wrb = wrb_from_mccq(adapter); 2010 if (!wrb) { 2011 status = -EBUSY; 2012 goto err; 2013 } 2014 req = embedded_payload(wrb); 2015 2016 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2017 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), 2018 wrb, NULL); 2019 2020 status = be_mcc_notify_wait(adapter); 2021 if (!status) { 2022 struct be_cmd_resp_get_flow_control *resp = 2023 embedded_payload(wrb); 2024 2025 *tx_fc = le16_to_cpu(resp->tx_flow_control); 2026 *rx_fc = le16_to_cpu(resp->rx_flow_control); 2027 } 2028 2029 err: 2030 spin_unlock_bh(&adapter->mcc_lock); 2031 return status; 2032 } 2033 2034 /* Uses mbox */ 2035 int be_cmd_query_fw_cfg(struct be_adapter *adapter) 2036 { 2037 struct be_mcc_wrb *wrb; 2038 struct be_cmd_req_query_fw_cfg *req; 2039 int status; 2040 2041 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2042 return -1; 2043 2044 wrb = wrb_from_mbox(adapter); 2045 req = embedded_payload(wrb); 2046 2047 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2048 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, 2049 sizeof(*req), wrb, NULL); 2050 2051 status = be_mbox_notify_wait(adapter); 2052 if (!status) { 2053 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 2054 2055 adapter->port_num = le32_to_cpu(resp->phys_port); 2056 adapter->function_mode = le32_to_cpu(resp->function_mode); 2057 adapter->function_caps = le32_to_cpu(resp->function_caps); 2058 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF; 2059 dev_info(&adapter->pdev->dev, 2060 "FW config: function_mode=0x%x, function_caps=0x%x\n", 2061 adapter->function_mode, adapter->function_caps); 2062 } 2063 2064 mutex_unlock(&adapter->mbox_lock); 2065 return status; 2066 } 2067 2068 /* Uses mbox */ 2069 int be_cmd_reset_function(struct be_adapter *adapter) 2070 { 2071 struct be_mcc_wrb *wrb; 2072 struct be_cmd_req_hdr *req; 2073 int status; 2074 2075 if (lancer_chip(adapter)) { 2076 iowrite32(SLI_PORT_CONTROL_IP_MASK, 2077 adapter->db + SLIPORT_CONTROL_OFFSET); 2078 status = lancer_wait_ready(adapter); 2079 if (status) 2080 dev_err(&adapter->pdev->dev, 2081 "Adapter in non recoverable error\n"); 2082 return status; 2083 } 2084 2085 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2086 return -1; 2087 2088 wrb = wrb_from_mbox(adapter); 2089 req = embedded_payload(wrb); 2090 2091 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, 2092 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, 2093 NULL); 2094 2095 status = be_mbox_notify_wait(adapter); 2096 2097 mutex_unlock(&adapter->mbox_lock); 2098 return status; 2099 } 2100 2101 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 2102 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey) 2103 { 2104 struct be_mcc_wrb *wrb; 2105 struct be_cmd_req_rss_config *req; 2106 int status; 2107 2108 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) 2109 return 0; 2110 2111 spin_lock_bh(&adapter->mcc_lock); 2112 2113 wrb = wrb_from_mccq(adapter); 2114 if (!wrb) { 2115 status = -EBUSY; 2116 goto err; 2117 } 2118 req = embedded_payload(wrb); 2119 2120 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2121 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 2122 2123 req->if_id = cpu_to_le32(adapter->if_handle); 2124 req->enable_rss = cpu_to_le16(rss_hash_opts); 2125 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 2126 2127 if (!BEx_chip(adapter)) 2128 req->hdr.version = 1; 2129 2130 memcpy(req->cpu_table, rsstable, table_size); 2131 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN); 2132 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 2133 2134 status = be_mcc_notify_wait(adapter); 2135 err: 2136 spin_unlock_bh(&adapter->mcc_lock); 2137 return status; 2138 } 2139 2140 /* Uses sync mcc */ 2141 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 2142 u8 bcn, u8 sts, u8 state) 2143 { 2144 struct be_mcc_wrb *wrb; 2145 struct be_cmd_req_enable_disable_beacon *req; 2146 int status; 2147 2148 spin_lock_bh(&adapter->mcc_lock); 2149 2150 wrb = wrb_from_mccq(adapter); 2151 if (!wrb) { 2152 status = -EBUSY; 2153 goto err; 2154 } 2155 req = embedded_payload(wrb); 2156 2157 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2158 OPCODE_COMMON_ENABLE_DISABLE_BEACON, 2159 sizeof(*req), wrb, NULL); 2160 2161 req->port_num = port_num; 2162 req->beacon_state = state; 2163 req->beacon_duration = bcn; 2164 req->status_duration = sts; 2165 2166 status = be_mcc_notify_wait(adapter); 2167 2168 err: 2169 spin_unlock_bh(&adapter->mcc_lock); 2170 return status; 2171 } 2172 2173 /* Uses sync mcc */ 2174 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) 2175 { 2176 struct be_mcc_wrb *wrb; 2177 struct be_cmd_req_get_beacon_state *req; 2178 int status; 2179 2180 spin_lock_bh(&adapter->mcc_lock); 2181 2182 wrb = wrb_from_mccq(adapter); 2183 if (!wrb) { 2184 status = -EBUSY; 2185 goto err; 2186 } 2187 req = embedded_payload(wrb); 2188 2189 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2190 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), 2191 wrb, NULL); 2192 2193 req->port_num = port_num; 2194 2195 status = be_mcc_notify_wait(adapter); 2196 if (!status) { 2197 struct be_cmd_resp_get_beacon_state *resp = 2198 embedded_payload(wrb); 2199 2200 *state = resp->beacon_state; 2201 } 2202 2203 err: 2204 spin_unlock_bh(&adapter->mcc_lock); 2205 return status; 2206 } 2207 2208 /* Uses sync mcc */ 2209 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, 2210 u8 page_num, u8 *data) 2211 { 2212 struct be_dma_mem cmd; 2213 struct be_mcc_wrb *wrb; 2214 struct be_cmd_req_port_type *req; 2215 int status; 2216 2217 if (page_num > TR_PAGE_A2) 2218 return -EINVAL; 2219 2220 cmd.size = sizeof(struct be_cmd_resp_port_type); 2221 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2222 if (!cmd.va) { 2223 dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); 2224 return -ENOMEM; 2225 } 2226 memset(cmd.va, 0, cmd.size); 2227 2228 spin_lock_bh(&adapter->mcc_lock); 2229 2230 wrb = wrb_from_mccq(adapter); 2231 if (!wrb) { 2232 status = -EBUSY; 2233 goto err; 2234 } 2235 req = cmd.va; 2236 2237 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2238 OPCODE_COMMON_READ_TRANSRECV_DATA, 2239 cmd.size, wrb, &cmd); 2240 2241 req->port = cpu_to_le32(adapter->hba_port_num); 2242 req->page_num = cpu_to_le32(page_num); 2243 status = be_mcc_notify_wait(adapter); 2244 if (!status) { 2245 struct be_cmd_resp_port_type *resp = cmd.va; 2246 2247 memcpy(data, resp->page_data, PAGE_DATA_LEN); 2248 } 2249 err: 2250 spin_unlock_bh(&adapter->mcc_lock); 2251 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2252 return status; 2253 } 2254 2255 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2256 u32 data_size, u32 data_offset, 2257 const char *obj_name, u32 *data_written, 2258 u8 *change_status, u8 *addn_status) 2259 { 2260 struct be_mcc_wrb *wrb; 2261 struct lancer_cmd_req_write_object *req; 2262 struct lancer_cmd_resp_write_object *resp; 2263 void *ctxt = NULL; 2264 int status; 2265 2266 spin_lock_bh(&adapter->mcc_lock); 2267 adapter->flash_status = 0; 2268 2269 wrb = wrb_from_mccq(adapter); 2270 if (!wrb) { 2271 status = -EBUSY; 2272 goto err_unlock; 2273 } 2274 2275 req = embedded_payload(wrb); 2276 2277 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2278 OPCODE_COMMON_WRITE_OBJECT, 2279 sizeof(struct lancer_cmd_req_write_object), wrb, 2280 NULL); 2281 2282 ctxt = &req->context; 2283 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2284 write_length, ctxt, data_size); 2285 2286 if (data_size == 0) 2287 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2288 eof, ctxt, 1); 2289 else 2290 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2291 eof, ctxt, 0); 2292 2293 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 2294 req->write_offset = cpu_to_le32(data_offset); 2295 strlcpy(req->object_name, obj_name, sizeof(req->object_name)); 2296 req->descriptor_count = cpu_to_le32(1); 2297 req->buf_len = cpu_to_le32(data_size); 2298 req->addr_low = cpu_to_le32((cmd->dma + 2299 sizeof(struct lancer_cmd_req_write_object)) 2300 & 0xFFFFFFFF); 2301 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + 2302 sizeof(struct lancer_cmd_req_write_object))); 2303 2304 be_mcc_notify(adapter); 2305 spin_unlock_bh(&adapter->mcc_lock); 2306 2307 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2308 msecs_to_jiffies(60000))) 2309 status = -ETIMEDOUT; 2310 else 2311 status = adapter->flash_status; 2312 2313 resp = embedded_payload(wrb); 2314 if (!status) { 2315 *data_written = le32_to_cpu(resp->actual_write_len); 2316 *change_status = resp->change_status; 2317 } else { 2318 *addn_status = resp->additional_status; 2319 } 2320 2321 return status; 2322 2323 err_unlock: 2324 spin_unlock_bh(&adapter->mcc_lock); 2325 return status; 2326 } 2327 2328 int be_cmd_query_cable_type(struct be_adapter *adapter) 2329 { 2330 u8 page_data[PAGE_DATA_LEN]; 2331 int status; 2332 2333 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, 2334 page_data); 2335 if (!status) { 2336 switch (adapter->phy.interface_type) { 2337 case PHY_TYPE_QSFP: 2338 adapter->phy.cable_type = 2339 page_data[QSFP_PLUS_CABLE_TYPE_OFFSET]; 2340 break; 2341 case PHY_TYPE_SFP_PLUS_10GB: 2342 adapter->phy.cable_type = 2343 page_data[SFP_PLUS_CABLE_TYPE_OFFSET]; 2344 break; 2345 default: 2346 adapter->phy.cable_type = 0; 2347 break; 2348 } 2349 } 2350 return status; 2351 } 2352 2353 int be_cmd_query_sfp_info(struct be_adapter *adapter) 2354 { 2355 u8 page_data[PAGE_DATA_LEN]; 2356 int status; 2357 2358 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, 2359 page_data); 2360 if (!status) { 2361 strlcpy(adapter->phy.vendor_name, page_data + 2362 SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1); 2363 strlcpy(adapter->phy.vendor_pn, 2364 page_data + SFP_VENDOR_PN_OFFSET, 2365 SFP_VENDOR_NAME_LEN - 1); 2366 } 2367 2368 return status; 2369 } 2370 2371 int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name) 2372 { 2373 struct lancer_cmd_req_delete_object *req; 2374 struct be_mcc_wrb *wrb; 2375 int status; 2376 2377 spin_lock_bh(&adapter->mcc_lock); 2378 2379 wrb = wrb_from_mccq(adapter); 2380 if (!wrb) { 2381 status = -EBUSY; 2382 goto err; 2383 } 2384 2385 req = embedded_payload(wrb); 2386 2387 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2388 OPCODE_COMMON_DELETE_OBJECT, 2389 sizeof(*req), wrb, NULL); 2390 2391 strlcpy(req->object_name, obj_name, sizeof(req->object_name)); 2392 2393 status = be_mcc_notify_wait(adapter); 2394 err: 2395 spin_unlock_bh(&adapter->mcc_lock); 2396 return status; 2397 } 2398 2399 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2400 u32 data_size, u32 data_offset, const char *obj_name, 2401 u32 *data_read, u32 *eof, u8 *addn_status) 2402 { 2403 struct be_mcc_wrb *wrb; 2404 struct lancer_cmd_req_read_object *req; 2405 struct lancer_cmd_resp_read_object *resp; 2406 int status; 2407 2408 spin_lock_bh(&adapter->mcc_lock); 2409 2410 wrb = wrb_from_mccq(adapter); 2411 if (!wrb) { 2412 status = -EBUSY; 2413 goto err_unlock; 2414 } 2415 2416 req = embedded_payload(wrb); 2417 2418 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2419 OPCODE_COMMON_READ_OBJECT, 2420 sizeof(struct lancer_cmd_req_read_object), wrb, 2421 NULL); 2422 2423 req->desired_read_len = cpu_to_le32(data_size); 2424 req->read_offset = cpu_to_le32(data_offset); 2425 strcpy(req->object_name, obj_name); 2426 req->descriptor_count = cpu_to_le32(1); 2427 req->buf_len = cpu_to_le32(data_size); 2428 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); 2429 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); 2430 2431 status = be_mcc_notify_wait(adapter); 2432 2433 resp = embedded_payload(wrb); 2434 if (!status) { 2435 *data_read = le32_to_cpu(resp->actual_read_len); 2436 *eof = le32_to_cpu(resp->eof); 2437 } else { 2438 *addn_status = resp->additional_status; 2439 } 2440 2441 err_unlock: 2442 spin_unlock_bh(&adapter->mcc_lock); 2443 return status; 2444 } 2445 2446 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 2447 u32 flash_type, u32 flash_opcode, u32 img_offset, 2448 u32 buf_size) 2449 { 2450 struct be_mcc_wrb *wrb; 2451 struct be_cmd_write_flashrom *req; 2452 int status; 2453 2454 spin_lock_bh(&adapter->mcc_lock); 2455 adapter->flash_status = 0; 2456 2457 wrb = wrb_from_mccq(adapter); 2458 if (!wrb) { 2459 status = -EBUSY; 2460 goto err_unlock; 2461 } 2462 req = cmd->va; 2463 2464 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2465 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, 2466 cmd); 2467 2468 req->params.op_type = cpu_to_le32(flash_type); 2469 if (flash_type == OPTYPE_OFFSET_SPECIFIED) 2470 req->params.offset = cpu_to_le32(img_offset); 2471 2472 req->params.op_code = cpu_to_le32(flash_opcode); 2473 req->params.data_buf_size = cpu_to_le32(buf_size); 2474 2475 be_mcc_notify(adapter); 2476 spin_unlock_bh(&adapter->mcc_lock); 2477 2478 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2479 msecs_to_jiffies(40000))) 2480 status = -ETIMEDOUT; 2481 else 2482 status = adapter->flash_status; 2483 2484 return status; 2485 2486 err_unlock: 2487 spin_unlock_bh(&adapter->mcc_lock); 2488 return status; 2489 } 2490 2491 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2492 u16 img_optype, u32 img_offset, u32 crc_offset) 2493 { 2494 struct be_cmd_read_flash_crc *req; 2495 struct be_mcc_wrb *wrb; 2496 int status; 2497 2498 spin_lock_bh(&adapter->mcc_lock); 2499 2500 wrb = wrb_from_mccq(adapter); 2501 if (!wrb) { 2502 status = -EBUSY; 2503 goto err; 2504 } 2505 req = embedded_payload(wrb); 2506 2507 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2508 OPCODE_COMMON_READ_FLASHROM, sizeof(*req), 2509 wrb, NULL); 2510 2511 req->params.op_type = cpu_to_le32(img_optype); 2512 if (img_optype == OPTYPE_OFFSET_SPECIFIED) 2513 req->params.offset = cpu_to_le32(img_offset + crc_offset); 2514 else 2515 req->params.offset = cpu_to_le32(crc_offset); 2516 2517 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2518 req->params.data_buf_size = cpu_to_le32(0x4); 2519 2520 status = be_mcc_notify_wait(adapter); 2521 if (!status) 2522 memcpy(flashed_crc, req->crc, 4); 2523 2524 err: 2525 spin_unlock_bh(&adapter->mcc_lock); 2526 return status; 2527 } 2528 2529 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2530 struct be_dma_mem *nonemb_cmd) 2531 { 2532 struct be_mcc_wrb *wrb; 2533 struct be_cmd_req_acpi_wol_magic_config *req; 2534 int status; 2535 2536 spin_lock_bh(&adapter->mcc_lock); 2537 2538 wrb = wrb_from_mccq(adapter); 2539 if (!wrb) { 2540 status = -EBUSY; 2541 goto err; 2542 } 2543 req = nonemb_cmd->va; 2544 2545 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2546 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), 2547 wrb, nonemb_cmd); 2548 memcpy(req->magic_mac, mac, ETH_ALEN); 2549 2550 status = be_mcc_notify_wait(adapter); 2551 2552 err: 2553 spin_unlock_bh(&adapter->mcc_lock); 2554 return status; 2555 } 2556 2557 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 2558 u8 loopback_type, u8 enable) 2559 { 2560 struct be_mcc_wrb *wrb; 2561 struct be_cmd_req_set_lmode *req; 2562 int status; 2563 2564 spin_lock_bh(&adapter->mcc_lock); 2565 2566 wrb = wrb_from_mccq(adapter); 2567 if (!wrb) { 2568 status = -EBUSY; 2569 goto err; 2570 } 2571 2572 req = embedded_payload(wrb); 2573 2574 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2575 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), 2576 wrb, NULL); 2577 2578 req->src_port = port_num; 2579 req->dest_port = port_num; 2580 req->loopback_type = loopback_type; 2581 req->loopback_state = enable; 2582 2583 status = be_mcc_notify_wait(adapter); 2584 err: 2585 spin_unlock_bh(&adapter->mcc_lock); 2586 return status; 2587 } 2588 2589 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 2590 u32 loopback_type, u32 pkt_size, u32 num_pkts, 2591 u64 pattern) 2592 { 2593 struct be_mcc_wrb *wrb; 2594 struct be_cmd_req_loopback_test *req; 2595 struct be_cmd_resp_loopback_test *resp; 2596 int status; 2597 2598 spin_lock_bh(&adapter->mcc_lock); 2599 2600 wrb = wrb_from_mccq(adapter); 2601 if (!wrb) { 2602 status = -EBUSY; 2603 goto err; 2604 } 2605 2606 req = embedded_payload(wrb); 2607 2608 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2609 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, 2610 NULL); 2611 2612 req->hdr.timeout = cpu_to_le32(15); 2613 req->pattern = cpu_to_le64(pattern); 2614 req->src_port = cpu_to_le32(port_num); 2615 req->dest_port = cpu_to_le32(port_num); 2616 req->pkt_size = cpu_to_le32(pkt_size); 2617 req->num_pkts = cpu_to_le32(num_pkts); 2618 req->loopback_type = cpu_to_le32(loopback_type); 2619 2620 be_mcc_notify(adapter); 2621 2622 spin_unlock_bh(&adapter->mcc_lock); 2623 2624 wait_for_completion(&adapter->et_cmd_compl); 2625 resp = embedded_payload(wrb); 2626 status = le32_to_cpu(resp->status); 2627 2628 return status; 2629 err: 2630 spin_unlock_bh(&adapter->mcc_lock); 2631 return status; 2632 } 2633 2634 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 2635 u32 byte_cnt, struct be_dma_mem *cmd) 2636 { 2637 struct be_mcc_wrb *wrb; 2638 struct be_cmd_req_ddrdma_test *req; 2639 int status; 2640 int i, j = 0; 2641 2642 spin_lock_bh(&adapter->mcc_lock); 2643 2644 wrb = wrb_from_mccq(adapter); 2645 if (!wrb) { 2646 status = -EBUSY; 2647 goto err; 2648 } 2649 req = cmd->va; 2650 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2651 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, 2652 cmd); 2653 2654 req->pattern = cpu_to_le64(pattern); 2655 req->byte_count = cpu_to_le32(byte_cnt); 2656 for (i = 0; i < byte_cnt; i++) { 2657 req->snd_buff[i] = (u8)(pattern >> (j*8)); 2658 j++; 2659 if (j > 7) 2660 j = 0; 2661 } 2662 2663 status = be_mcc_notify_wait(adapter); 2664 2665 if (!status) { 2666 struct be_cmd_resp_ddrdma_test *resp; 2667 2668 resp = cmd->va; 2669 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || 2670 resp->snd_err) { 2671 status = -1; 2672 } 2673 } 2674 2675 err: 2676 spin_unlock_bh(&adapter->mcc_lock); 2677 return status; 2678 } 2679 2680 int be_cmd_get_seeprom_data(struct be_adapter *adapter, 2681 struct be_dma_mem *nonemb_cmd) 2682 { 2683 struct be_mcc_wrb *wrb; 2684 struct be_cmd_req_seeprom_read *req; 2685 int status; 2686 2687 spin_lock_bh(&adapter->mcc_lock); 2688 2689 wrb = wrb_from_mccq(adapter); 2690 if (!wrb) { 2691 status = -EBUSY; 2692 goto err; 2693 } 2694 req = nonemb_cmd->va; 2695 2696 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2697 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 2698 nonemb_cmd); 2699 2700 status = be_mcc_notify_wait(adapter); 2701 2702 err: 2703 spin_unlock_bh(&adapter->mcc_lock); 2704 return status; 2705 } 2706 2707 int be_cmd_get_phy_info(struct be_adapter *adapter) 2708 { 2709 struct be_mcc_wrb *wrb; 2710 struct be_cmd_req_get_phy_info *req; 2711 struct be_dma_mem cmd; 2712 int status; 2713 2714 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS, 2715 CMD_SUBSYSTEM_COMMON)) 2716 return -EPERM; 2717 2718 spin_lock_bh(&adapter->mcc_lock); 2719 2720 wrb = wrb_from_mccq(adapter); 2721 if (!wrb) { 2722 status = -EBUSY; 2723 goto err; 2724 } 2725 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2726 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2727 if (!cmd.va) { 2728 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2729 status = -ENOMEM; 2730 goto err; 2731 } 2732 2733 req = cmd.va; 2734 2735 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2736 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), 2737 wrb, &cmd); 2738 2739 status = be_mcc_notify_wait(adapter); 2740 if (!status) { 2741 struct be_phy_info *resp_phy_info = 2742 cmd.va + sizeof(struct be_cmd_req_hdr); 2743 2744 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type); 2745 adapter->phy.interface_type = 2746 le16_to_cpu(resp_phy_info->interface_type); 2747 adapter->phy.auto_speeds_supported = 2748 le16_to_cpu(resp_phy_info->auto_speeds_supported); 2749 adapter->phy.fixed_speeds_supported = 2750 le16_to_cpu(resp_phy_info->fixed_speeds_supported); 2751 adapter->phy.misc_params = 2752 le32_to_cpu(resp_phy_info->misc_params); 2753 2754 if (BE2_chip(adapter)) { 2755 adapter->phy.fixed_speeds_supported = 2756 BE_SUPPORTED_SPEED_10GBPS | 2757 BE_SUPPORTED_SPEED_1GBPS; 2758 } 2759 } 2760 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2761 err: 2762 spin_unlock_bh(&adapter->mcc_lock); 2763 return status; 2764 } 2765 2766 static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) 2767 { 2768 struct be_mcc_wrb *wrb; 2769 struct be_cmd_req_set_qos *req; 2770 int status; 2771 2772 spin_lock_bh(&adapter->mcc_lock); 2773 2774 wrb = wrb_from_mccq(adapter); 2775 if (!wrb) { 2776 status = -EBUSY; 2777 goto err; 2778 } 2779 2780 req = embedded_payload(wrb); 2781 2782 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2783 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); 2784 2785 req->hdr.domain = domain; 2786 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); 2787 req->max_bps_nic = cpu_to_le32(bps); 2788 2789 status = be_mcc_notify_wait(adapter); 2790 2791 err: 2792 spin_unlock_bh(&adapter->mcc_lock); 2793 return status; 2794 } 2795 2796 int be_cmd_get_cntl_attributes(struct be_adapter *adapter) 2797 { 2798 struct be_mcc_wrb *wrb; 2799 struct be_cmd_req_cntl_attribs *req; 2800 struct be_cmd_resp_cntl_attribs *resp; 2801 int status; 2802 int payload_len = max(sizeof(*req), sizeof(*resp)); 2803 struct mgmt_controller_attrib *attribs; 2804 struct be_dma_mem attribs_cmd; 2805 2806 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2807 return -1; 2808 2809 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2810 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2811 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2812 &attribs_cmd.dma); 2813 if (!attribs_cmd.va) { 2814 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 2815 status = -ENOMEM; 2816 goto err; 2817 } 2818 2819 wrb = wrb_from_mbox(adapter); 2820 if (!wrb) { 2821 status = -EBUSY; 2822 goto err; 2823 } 2824 req = attribs_cmd.va; 2825 2826 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2827 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, 2828 wrb, &attribs_cmd); 2829 2830 status = be_mbox_notify_wait(adapter); 2831 if (!status) { 2832 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); 2833 adapter->hba_port_num = attribs->hba_attribs.phy_port; 2834 } 2835 2836 err: 2837 mutex_unlock(&adapter->mbox_lock); 2838 if (attribs_cmd.va) 2839 pci_free_consistent(adapter->pdev, attribs_cmd.size, 2840 attribs_cmd.va, attribs_cmd.dma); 2841 return status; 2842 } 2843 2844 /* Uses mbox */ 2845 int be_cmd_req_native_mode(struct be_adapter *adapter) 2846 { 2847 struct be_mcc_wrb *wrb; 2848 struct be_cmd_req_set_func_cap *req; 2849 int status; 2850 2851 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2852 return -1; 2853 2854 wrb = wrb_from_mbox(adapter); 2855 if (!wrb) { 2856 status = -EBUSY; 2857 goto err; 2858 } 2859 2860 req = embedded_payload(wrb); 2861 2862 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2863 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, 2864 sizeof(*req), wrb, NULL); 2865 2866 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | 2867 CAPABILITY_BE3_NATIVE_ERX_API); 2868 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); 2869 2870 status = be_mbox_notify_wait(adapter); 2871 if (!status) { 2872 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); 2873 2874 adapter->be3_native = le32_to_cpu(resp->cap_flags) & 2875 CAPABILITY_BE3_NATIVE_ERX_API; 2876 if (!adapter->be3_native) 2877 dev_warn(&adapter->pdev->dev, 2878 "adapter not in advanced mode\n"); 2879 } 2880 err: 2881 mutex_unlock(&adapter->mbox_lock); 2882 return status; 2883 } 2884 2885 /* Get privilege(s) for a function */ 2886 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, 2887 u32 domain) 2888 { 2889 struct be_mcc_wrb *wrb; 2890 struct be_cmd_req_get_fn_privileges *req; 2891 int status; 2892 2893 spin_lock_bh(&adapter->mcc_lock); 2894 2895 wrb = wrb_from_mccq(adapter); 2896 if (!wrb) { 2897 status = -EBUSY; 2898 goto err; 2899 } 2900 2901 req = embedded_payload(wrb); 2902 2903 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2904 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req), 2905 wrb, NULL); 2906 2907 req->hdr.domain = domain; 2908 2909 status = be_mcc_notify_wait(adapter); 2910 if (!status) { 2911 struct be_cmd_resp_get_fn_privileges *resp = 2912 embedded_payload(wrb); 2913 2914 *privilege = le32_to_cpu(resp->privilege_mask); 2915 2916 /* In UMC mode FW does not return right privileges. 2917 * Override with correct privilege equivalent to PF. 2918 */ 2919 if (BEx_chip(adapter) && be_is_mc(adapter) && 2920 be_physfn(adapter)) 2921 *privilege = MAX_PRIVILEGES; 2922 } 2923 2924 err: 2925 spin_unlock_bh(&adapter->mcc_lock); 2926 return status; 2927 } 2928 2929 /* Set privilege(s) for a function */ 2930 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, 2931 u32 domain) 2932 { 2933 struct be_mcc_wrb *wrb; 2934 struct be_cmd_req_set_fn_privileges *req; 2935 int status; 2936 2937 spin_lock_bh(&adapter->mcc_lock); 2938 2939 wrb = wrb_from_mccq(adapter); 2940 if (!wrb) { 2941 status = -EBUSY; 2942 goto err; 2943 } 2944 2945 req = embedded_payload(wrb); 2946 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2947 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req), 2948 wrb, NULL); 2949 req->hdr.domain = domain; 2950 if (lancer_chip(adapter)) 2951 req->privileges_lancer = cpu_to_le32(privileges); 2952 else 2953 req->privileges = cpu_to_le32(privileges); 2954 2955 status = be_mcc_notify_wait(adapter); 2956 err: 2957 spin_unlock_bh(&adapter->mcc_lock); 2958 return status; 2959 } 2960 2961 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested. 2962 * pmac_id_valid: false => pmac_id or MAC address is requested. 2963 * If pmac_id is returned, pmac_id_valid is returned as true 2964 */ 2965 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 2966 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle, 2967 u8 domain) 2968 { 2969 struct be_mcc_wrb *wrb; 2970 struct be_cmd_req_get_mac_list *req; 2971 int status; 2972 int mac_count; 2973 struct be_dma_mem get_mac_list_cmd; 2974 int i; 2975 2976 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 2977 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 2978 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, 2979 get_mac_list_cmd.size, 2980 &get_mac_list_cmd.dma); 2981 2982 if (!get_mac_list_cmd.va) { 2983 dev_err(&adapter->pdev->dev, 2984 "Memory allocation failure during GET_MAC_LIST\n"); 2985 return -ENOMEM; 2986 } 2987 2988 spin_lock_bh(&adapter->mcc_lock); 2989 2990 wrb = wrb_from_mccq(adapter); 2991 if (!wrb) { 2992 status = -EBUSY; 2993 goto out; 2994 } 2995 2996 req = get_mac_list_cmd.va; 2997 2998 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2999 OPCODE_COMMON_GET_MAC_LIST, 3000 get_mac_list_cmd.size, wrb, &get_mac_list_cmd); 3001 req->hdr.domain = domain; 3002 req->mac_type = MAC_ADDRESS_TYPE_NETWORK; 3003 if (*pmac_id_valid) { 3004 req->mac_id = cpu_to_le32(*pmac_id); 3005 req->iface_id = cpu_to_le16(if_handle); 3006 req->perm_override = 0; 3007 } else { 3008 req->perm_override = 1; 3009 } 3010 3011 status = be_mcc_notify_wait(adapter); 3012 if (!status) { 3013 struct be_cmd_resp_get_mac_list *resp = 3014 get_mac_list_cmd.va; 3015 3016 if (*pmac_id_valid) { 3017 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr, 3018 ETH_ALEN); 3019 goto out; 3020 } 3021 3022 mac_count = resp->true_mac_count + resp->pseudo_mac_count; 3023 /* Mac list returned could contain one or more active mac_ids 3024 * or one or more true or pseudo permanent mac addresses. 3025 * If an active mac_id is present, return first active mac_id 3026 * found. 3027 */ 3028 for (i = 0; i < mac_count; i++) { 3029 struct get_list_macaddr *mac_entry; 3030 u16 mac_addr_size; 3031 u32 mac_id; 3032 3033 mac_entry = &resp->macaddr_list[i]; 3034 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size); 3035 /* mac_id is a 32 bit value and mac_addr size 3036 * is 6 bytes 3037 */ 3038 if (mac_addr_size == sizeof(u32)) { 3039 *pmac_id_valid = true; 3040 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; 3041 *pmac_id = le32_to_cpu(mac_id); 3042 goto out; 3043 } 3044 } 3045 /* If no active mac_id found, return first mac addr */ 3046 *pmac_id_valid = false; 3047 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 3048 ETH_ALEN); 3049 } 3050 3051 out: 3052 spin_unlock_bh(&adapter->mcc_lock); 3053 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, 3054 get_mac_list_cmd.va, get_mac_list_cmd.dma); 3055 return status; 3056 } 3057 3058 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, 3059 u8 *mac, u32 if_handle, bool active, u32 domain) 3060 { 3061 if (!active) 3062 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id, 3063 if_handle, domain); 3064 if (BEx_chip(adapter)) 3065 return be_cmd_mac_addr_query(adapter, mac, false, 3066 if_handle, curr_pmac_id); 3067 else 3068 /* Fetch the MAC address using pmac_id */ 3069 return be_cmd_get_mac_from_list(adapter, mac, &active, 3070 &curr_pmac_id, 3071 if_handle, domain); 3072 } 3073 3074 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) 3075 { 3076 int status; 3077 bool pmac_valid = false; 3078 3079 eth_zero_addr(mac); 3080 3081 if (BEx_chip(adapter)) { 3082 if (be_physfn(adapter)) 3083 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 3084 0); 3085 else 3086 status = be_cmd_mac_addr_query(adapter, mac, false, 3087 adapter->if_handle, 0); 3088 } else { 3089 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid, 3090 NULL, adapter->if_handle, 0); 3091 } 3092 3093 return status; 3094 } 3095 3096 /* Uses synchronous MCCQ */ 3097 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 3098 u8 mac_count, u32 domain) 3099 { 3100 struct be_mcc_wrb *wrb; 3101 struct be_cmd_req_set_mac_list *req; 3102 int status; 3103 struct be_dma_mem cmd; 3104 3105 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3106 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 3107 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 3108 &cmd.dma, GFP_KERNEL); 3109 if (!cmd.va) 3110 return -ENOMEM; 3111 3112 spin_lock_bh(&adapter->mcc_lock); 3113 3114 wrb = wrb_from_mccq(adapter); 3115 if (!wrb) { 3116 status = -EBUSY; 3117 goto err; 3118 } 3119 3120 req = cmd.va; 3121 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3122 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), 3123 wrb, &cmd); 3124 3125 req->hdr.domain = domain; 3126 req->mac_count = mac_count; 3127 if (mac_count) 3128 memcpy(req->mac, mac_array, ETH_ALEN*mac_count); 3129 3130 status = be_mcc_notify_wait(adapter); 3131 3132 err: 3133 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 3134 spin_unlock_bh(&adapter->mcc_lock); 3135 return status; 3136 } 3137 3138 /* Wrapper to delete any active MACs and provision the new mac. 3139 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the 3140 * current list are active. 3141 */ 3142 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom) 3143 { 3144 bool active_mac = false; 3145 u8 old_mac[ETH_ALEN]; 3146 u32 pmac_id; 3147 int status; 3148 3149 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, 3150 &pmac_id, if_id, dom); 3151 3152 if (!status && active_mac) 3153 be_cmd_pmac_del(adapter, if_id, pmac_id, dom); 3154 3155 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom); 3156 } 3157 3158 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 3159 u32 domain, u16 intf_id, u16 hsw_mode) 3160 { 3161 struct be_mcc_wrb *wrb; 3162 struct be_cmd_req_set_hsw_config *req; 3163 void *ctxt; 3164 int status; 3165 3166 spin_lock_bh(&adapter->mcc_lock); 3167 3168 wrb = wrb_from_mccq(adapter); 3169 if (!wrb) { 3170 status = -EBUSY; 3171 goto err; 3172 } 3173 3174 req = embedded_payload(wrb); 3175 ctxt = &req->context; 3176 3177 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3178 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, 3179 NULL); 3180 3181 req->hdr.domain = domain; 3182 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); 3183 if (pvid) { 3184 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); 3185 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); 3186 } 3187 if (!BEx_chip(adapter) && hsw_mode) { 3188 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, 3189 ctxt, adapter->hba_port_num); 3190 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); 3191 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type, 3192 ctxt, hsw_mode); 3193 } 3194 3195 be_dws_cpu_to_le(req->context, sizeof(req->context)); 3196 status = be_mcc_notify_wait(adapter); 3197 3198 err: 3199 spin_unlock_bh(&adapter->mcc_lock); 3200 return status; 3201 } 3202 3203 /* Get Hyper switch config */ 3204 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 3205 u32 domain, u16 intf_id, u8 *mode) 3206 { 3207 struct be_mcc_wrb *wrb; 3208 struct be_cmd_req_get_hsw_config *req; 3209 void *ctxt; 3210 int status; 3211 u16 vid; 3212 3213 spin_lock_bh(&adapter->mcc_lock); 3214 3215 wrb = wrb_from_mccq(adapter); 3216 if (!wrb) { 3217 status = -EBUSY; 3218 goto err; 3219 } 3220 3221 req = embedded_payload(wrb); 3222 ctxt = &req->context; 3223 3224 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3225 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, 3226 NULL); 3227 3228 req->hdr.domain = domain; 3229 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3230 ctxt, intf_id); 3231 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); 3232 3233 if (!BEx_chip(adapter) && mode) { 3234 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3235 ctxt, adapter->hba_port_num); 3236 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1); 3237 } 3238 be_dws_cpu_to_le(req->context, sizeof(req->context)); 3239 3240 status = be_mcc_notify_wait(adapter); 3241 if (!status) { 3242 struct be_cmd_resp_get_hsw_config *resp = 3243 embedded_payload(wrb); 3244 3245 be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); 3246 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3247 pvid, &resp->context); 3248 if (pvid) 3249 *pvid = le16_to_cpu(vid); 3250 if (mode) 3251 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3252 port_fwd_type, &resp->context); 3253 } 3254 3255 err: 3256 spin_unlock_bh(&adapter->mcc_lock); 3257 return status; 3258 } 3259 3260 static bool be_is_wol_excluded(struct be_adapter *adapter) 3261 { 3262 struct pci_dev *pdev = adapter->pdev; 3263 3264 if (!be_physfn(adapter)) 3265 return true; 3266 3267 switch (pdev->subsystem_device) { 3268 case OC_SUBSYS_DEVICE_ID1: 3269 case OC_SUBSYS_DEVICE_ID2: 3270 case OC_SUBSYS_DEVICE_ID3: 3271 case OC_SUBSYS_DEVICE_ID4: 3272 return true; 3273 default: 3274 return false; 3275 } 3276 } 3277 3278 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) 3279 { 3280 struct be_mcc_wrb *wrb; 3281 struct be_cmd_req_acpi_wol_magic_config_v1 *req; 3282 int status = 0; 3283 struct be_dma_mem cmd; 3284 3285 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 3286 CMD_SUBSYSTEM_ETH)) 3287 return -EPERM; 3288 3289 if (be_is_wol_excluded(adapter)) 3290 return status; 3291 3292 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3293 return -1; 3294 3295 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3296 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 3297 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3298 if (!cmd.va) { 3299 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3300 status = -ENOMEM; 3301 goto err; 3302 } 3303 3304 wrb = wrb_from_mbox(adapter); 3305 if (!wrb) { 3306 status = -EBUSY; 3307 goto err; 3308 } 3309 3310 req = cmd.va; 3311 3312 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 3313 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 3314 sizeof(*req), wrb, &cmd); 3315 3316 req->hdr.version = 1; 3317 req->query_options = BE_GET_WOL_CAP; 3318 3319 status = be_mbox_notify_wait(adapter); 3320 if (!status) { 3321 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; 3322 3323 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va; 3324 3325 adapter->wol_cap = resp->wol_settings; 3326 if (adapter->wol_cap & BE_WOL_CAP) 3327 adapter->wol_en = true; 3328 } 3329 err: 3330 mutex_unlock(&adapter->mbox_lock); 3331 if (cmd.va) 3332 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3333 return status; 3334 3335 } 3336 3337 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) 3338 { 3339 struct be_dma_mem extfat_cmd; 3340 struct be_fat_conf_params *cfgs; 3341 int status; 3342 int i, j; 3343 3344 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3345 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3346 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3347 &extfat_cmd.dma); 3348 if (!extfat_cmd.va) 3349 return -ENOMEM; 3350 3351 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); 3352 if (status) 3353 goto err; 3354 3355 cfgs = (struct be_fat_conf_params *) 3356 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); 3357 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { 3358 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); 3359 3360 for (j = 0; j < num_modes; j++) { 3361 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) 3362 cfgs->module[i].trace_lvl[j].dbg_lvl = 3363 cpu_to_le32(level); 3364 } 3365 } 3366 3367 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); 3368 err: 3369 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, 3370 extfat_cmd.dma); 3371 return status; 3372 } 3373 3374 int be_cmd_get_fw_log_level(struct be_adapter *adapter) 3375 { 3376 struct be_dma_mem extfat_cmd; 3377 struct be_fat_conf_params *cfgs; 3378 int status, j; 3379 int level = 0; 3380 3381 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3382 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3383 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3384 &extfat_cmd.dma); 3385 3386 if (!extfat_cmd.va) { 3387 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", 3388 __func__); 3389 goto err; 3390 } 3391 3392 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); 3393 if (!status) { 3394 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + 3395 sizeof(struct be_cmd_resp_hdr)); 3396 3397 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { 3398 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) 3399 level = cfgs->module[0].trace_lvl[j].dbg_lvl; 3400 } 3401 } 3402 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, 3403 extfat_cmd.dma); 3404 err: 3405 return level; 3406 } 3407 3408 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, 3409 struct be_dma_mem *cmd) 3410 { 3411 struct be_mcc_wrb *wrb; 3412 struct be_cmd_req_get_ext_fat_caps *req; 3413 int status; 3414 3415 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3416 return -1; 3417 3418 wrb = wrb_from_mbox(adapter); 3419 if (!wrb) { 3420 status = -EBUSY; 3421 goto err; 3422 } 3423 3424 req = cmd->va; 3425 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3426 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES, 3427 cmd->size, wrb, cmd); 3428 req->parameter_type = cpu_to_le32(1); 3429 3430 status = be_mbox_notify_wait(adapter); 3431 err: 3432 mutex_unlock(&adapter->mbox_lock); 3433 return status; 3434 } 3435 3436 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, 3437 struct be_dma_mem *cmd, 3438 struct be_fat_conf_params *configs) 3439 { 3440 struct be_mcc_wrb *wrb; 3441 struct be_cmd_req_set_ext_fat_caps *req; 3442 int status; 3443 3444 spin_lock_bh(&adapter->mcc_lock); 3445 3446 wrb = wrb_from_mccq(adapter); 3447 if (!wrb) { 3448 status = -EBUSY; 3449 goto err; 3450 } 3451 3452 req = cmd->va; 3453 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params)); 3454 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3455 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES, 3456 cmd->size, wrb, cmd); 3457 3458 status = be_mcc_notify_wait(adapter); 3459 err: 3460 spin_unlock_bh(&adapter->mcc_lock); 3461 return status; 3462 } 3463 3464 int be_cmd_query_port_name(struct be_adapter *adapter) 3465 { 3466 struct be_cmd_req_get_port_name *req; 3467 struct be_mcc_wrb *wrb; 3468 int status; 3469 3470 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3471 return -1; 3472 3473 wrb = wrb_from_mbox(adapter); 3474 req = embedded_payload(wrb); 3475 3476 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3477 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb, 3478 NULL); 3479 if (!BEx_chip(adapter)) 3480 req->hdr.version = 1; 3481 3482 status = be_mbox_notify_wait(adapter); 3483 if (!status) { 3484 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb); 3485 3486 adapter->port_name = resp->port_name[adapter->hba_port_num]; 3487 } else { 3488 adapter->port_name = adapter->hba_port_num + '0'; 3489 } 3490 3491 mutex_unlock(&adapter->mbox_lock); 3492 return status; 3493 } 3494 3495 /* Descriptor type */ 3496 enum { 3497 FUNC_DESC = 1, 3498 VFT_DESC = 2 3499 }; 3500 3501 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count, 3502 int desc_type) 3503 { 3504 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3505 struct be_nic_res_desc *nic; 3506 int i; 3507 3508 for (i = 0; i < desc_count; i++) { 3509 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || 3510 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) { 3511 nic = (struct be_nic_res_desc *)hdr; 3512 if (desc_type == FUNC_DESC || 3513 (desc_type == VFT_DESC && 3514 nic->flags & (1 << VFT_SHIFT))) 3515 return nic; 3516 } 3517 3518 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3519 hdr = (void *)hdr + hdr->desc_len; 3520 } 3521 return NULL; 3522 } 3523 3524 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count) 3525 { 3526 return be_get_nic_desc(buf, desc_count, VFT_DESC); 3527 } 3528 3529 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count) 3530 { 3531 return be_get_nic_desc(buf, desc_count, FUNC_DESC); 3532 } 3533 3534 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf, 3535 u32 desc_count) 3536 { 3537 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3538 struct be_pcie_res_desc *pcie; 3539 int i; 3540 3541 for (i = 0; i < desc_count; i++) { 3542 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || 3543 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) { 3544 pcie = (struct be_pcie_res_desc *)hdr; 3545 if (pcie->pf_num == devfn) 3546 return pcie; 3547 } 3548 3549 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3550 hdr = (void *)hdr + hdr->desc_len; 3551 } 3552 return NULL; 3553 } 3554 3555 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count) 3556 { 3557 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3558 int i; 3559 3560 for (i = 0; i < desc_count; i++) { 3561 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1) 3562 return (struct be_port_res_desc *)hdr; 3563 3564 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3565 hdr = (void *)hdr + hdr->desc_len; 3566 } 3567 return NULL; 3568 } 3569 3570 static void be_copy_nic_desc(struct be_resources *res, 3571 struct be_nic_res_desc *desc) 3572 { 3573 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count); 3574 res->max_vlans = le16_to_cpu(desc->vlan_count); 3575 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count); 3576 res->max_tx_qs = le16_to_cpu(desc->txq_count); 3577 res->max_rss_qs = le16_to_cpu(desc->rssq_count); 3578 res->max_rx_qs = le16_to_cpu(desc->rq_count); 3579 res->max_evt_qs = le16_to_cpu(desc->eq_count); 3580 res->max_cq_count = le16_to_cpu(desc->cq_count); 3581 res->max_iface_count = le16_to_cpu(desc->iface_count); 3582 res->max_mcc_count = le16_to_cpu(desc->mcc_count); 3583 /* Clear flags that driver is not interested in */ 3584 res->if_cap_flags = le32_to_cpu(desc->cap_flags) & 3585 BE_IF_CAP_FLAGS_WANT; 3586 } 3587 3588 /* Uses Mbox */ 3589 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) 3590 { 3591 struct be_mcc_wrb *wrb; 3592 struct be_cmd_req_get_func_config *req; 3593 int status; 3594 struct be_dma_mem cmd; 3595 3596 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3597 return -1; 3598 3599 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3600 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 3601 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3602 if (!cmd.va) { 3603 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3604 status = -ENOMEM; 3605 goto err; 3606 } 3607 3608 wrb = wrb_from_mbox(adapter); 3609 if (!wrb) { 3610 status = -EBUSY; 3611 goto err; 3612 } 3613 3614 req = cmd.va; 3615 3616 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3617 OPCODE_COMMON_GET_FUNC_CONFIG, 3618 cmd.size, wrb, &cmd); 3619 3620 if (skyhawk_chip(adapter)) 3621 req->hdr.version = 1; 3622 3623 status = be_mbox_notify_wait(adapter); 3624 if (!status) { 3625 struct be_cmd_resp_get_func_config *resp = cmd.va; 3626 u32 desc_count = le32_to_cpu(resp->desc_count); 3627 struct be_nic_res_desc *desc; 3628 3629 desc = be_get_func_nic_desc(resp->func_param, desc_count); 3630 if (!desc) { 3631 status = -EINVAL; 3632 goto err; 3633 } 3634 3635 adapter->pf_number = desc->pf_num; 3636 be_copy_nic_desc(res, desc); 3637 } 3638 err: 3639 mutex_unlock(&adapter->mbox_lock); 3640 if (cmd.va) 3641 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3642 return status; 3643 } 3644 3645 /* Will use MBOX only if MCCQ has not been created */ 3646 int be_cmd_get_profile_config(struct be_adapter *adapter, 3647 struct be_resources *res, u8 query, u8 domain) 3648 { 3649 struct be_cmd_resp_get_profile_config *resp; 3650 struct be_cmd_req_get_profile_config *req; 3651 struct be_nic_res_desc *vf_res; 3652 struct be_pcie_res_desc *pcie; 3653 struct be_port_res_desc *port; 3654 struct be_nic_res_desc *nic; 3655 struct be_mcc_wrb wrb = {0}; 3656 struct be_dma_mem cmd; 3657 u16 desc_count; 3658 int status; 3659 3660 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3661 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 3662 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3663 if (!cmd.va) 3664 return -ENOMEM; 3665 3666 req = cmd.va; 3667 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3668 OPCODE_COMMON_GET_PROFILE_CONFIG, 3669 cmd.size, &wrb, &cmd); 3670 3671 req->hdr.domain = domain; 3672 if (!lancer_chip(adapter)) 3673 req->hdr.version = 1; 3674 req->type = ACTIVE_PROFILE_TYPE; 3675 3676 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the 3677 * descriptors with all bits set to "1" for the fields which can be 3678 * modified using SET_PROFILE_CONFIG cmd. 3679 */ 3680 if (query == RESOURCE_MODIFIABLE) 3681 req->type |= QUERY_MODIFIABLE_FIELDS_TYPE; 3682 3683 status = be_cmd_notify_wait(adapter, &wrb); 3684 if (status) 3685 goto err; 3686 3687 resp = cmd.va; 3688 desc_count = le16_to_cpu(resp->desc_count); 3689 3690 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, 3691 desc_count); 3692 if (pcie) 3693 res->max_vfs = le16_to_cpu(pcie->num_vfs); 3694 3695 port = be_get_port_desc(resp->func_param, desc_count); 3696 if (port) 3697 adapter->mc_type = port->mc_type; 3698 3699 nic = be_get_func_nic_desc(resp->func_param, desc_count); 3700 if (nic) 3701 be_copy_nic_desc(res, nic); 3702 3703 vf_res = be_get_vft_desc(resp->func_param, desc_count); 3704 if (vf_res) 3705 res->vf_if_cap_flags = vf_res->cap_flags; 3706 err: 3707 if (cmd.va) 3708 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3709 return status; 3710 } 3711 3712 /* Will use MBOX only if MCCQ has not been created */ 3713 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, 3714 int size, int count, u8 version, u8 domain) 3715 { 3716 struct be_cmd_req_set_profile_config *req; 3717 struct be_mcc_wrb wrb = {0}; 3718 struct be_dma_mem cmd; 3719 int status; 3720 3721 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3722 cmd.size = sizeof(struct be_cmd_req_set_profile_config); 3723 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3724 if (!cmd.va) 3725 return -ENOMEM; 3726 3727 req = cmd.va; 3728 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3729 OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size, 3730 &wrb, &cmd); 3731 req->hdr.version = version; 3732 req->hdr.domain = domain; 3733 req->desc_count = cpu_to_le32(count); 3734 memcpy(req->desc, desc, size); 3735 3736 status = be_cmd_notify_wait(adapter, &wrb); 3737 3738 if (cmd.va) 3739 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3740 return status; 3741 } 3742 3743 /* Mark all fields invalid */ 3744 static void be_reset_nic_desc(struct be_nic_res_desc *nic) 3745 { 3746 memset(nic, 0, sizeof(*nic)); 3747 nic->unicast_mac_count = 0xFFFF; 3748 nic->mcc_count = 0xFFFF; 3749 nic->vlan_count = 0xFFFF; 3750 nic->mcast_mac_count = 0xFFFF; 3751 nic->txq_count = 0xFFFF; 3752 nic->rq_count = 0xFFFF; 3753 nic->rssq_count = 0xFFFF; 3754 nic->lro_count = 0xFFFF; 3755 nic->cq_count = 0xFFFF; 3756 nic->toe_conn_count = 0xFFFF; 3757 nic->eq_count = 0xFFFF; 3758 nic->iface_count = 0xFFFF; 3759 nic->link_param = 0xFF; 3760 nic->channel_id_param = cpu_to_le16(0xF000); 3761 nic->acpi_params = 0xFF; 3762 nic->wol_param = 0x0F; 3763 nic->tunnel_iface_count = 0xFFFF; 3764 nic->direct_tenant_iface_count = 0xFFFF; 3765 nic->bw_min = 0xFFFFFFFF; 3766 nic->bw_max = 0xFFFFFFFF; 3767 } 3768 3769 /* Mark all fields invalid */ 3770 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie) 3771 { 3772 memset(pcie, 0, sizeof(*pcie)); 3773 pcie->sriov_state = 0xFF; 3774 pcie->pf_state = 0xFF; 3775 pcie->pf_type = 0xFF; 3776 pcie->num_vfs = 0xFFFF; 3777 } 3778 3779 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, 3780 u8 domain) 3781 { 3782 struct be_nic_res_desc nic_desc; 3783 u32 bw_percent; 3784 u16 version = 0; 3785 3786 if (BE3_chip(adapter)) 3787 return be_cmd_set_qos(adapter, max_rate / 10, domain); 3788 3789 be_reset_nic_desc(&nic_desc); 3790 nic_desc.pf_num = adapter->pf_number; 3791 nic_desc.vf_num = domain; 3792 nic_desc.bw_min = 0; 3793 if (lancer_chip(adapter)) { 3794 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 3795 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; 3796 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) | 3797 (1 << NOSV_SHIFT); 3798 nic_desc.bw_max = cpu_to_le32(max_rate / 10); 3799 } else { 3800 version = 1; 3801 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; 3802 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3803 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); 3804 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100; 3805 nic_desc.bw_max = cpu_to_le32(bw_percent); 3806 } 3807 3808 return be_cmd_set_profile_config(adapter, &nic_desc, 3809 nic_desc.hdr.desc_len, 3810 1, version, domain); 3811 } 3812 3813 static void be_fill_vf_res_template(struct be_adapter *adapter, 3814 struct be_resources pool_res, 3815 u16 num_vfs, u16 num_vf_qs, 3816 struct be_nic_res_desc *nic_vft) 3817 { 3818 u32 vf_if_cap_flags = pool_res.vf_if_cap_flags; 3819 struct be_resources res_mod = {0}; 3820 3821 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd, 3822 * which are modifiable using SET_PROFILE_CONFIG cmd. 3823 */ 3824 be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0); 3825 3826 /* If RSS IFACE capability flags are modifiable for a VF, set the 3827 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if 3828 * more than 1 RSSQ is available for a VF. 3829 * Otherwise, provision only 1 queue pair for VF. 3830 */ 3831 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) { 3832 nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); 3833 if (num_vf_qs > 1) { 3834 vf_if_cap_flags |= BE_IF_FLAGS_RSS; 3835 if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS) 3836 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS; 3837 } else { 3838 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | 3839 BE_IF_FLAGS_DEFQ_RSS); 3840 } 3841 3842 nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); 3843 } else { 3844 num_vf_qs = 1; 3845 } 3846 3847 nic_vft->rq_count = cpu_to_le16(num_vf_qs); 3848 nic_vft->txq_count = cpu_to_le16(num_vf_qs); 3849 nic_vft->rssq_count = cpu_to_le16(num_vf_qs); 3850 nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count / 3851 (num_vfs + 1)); 3852 3853 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally 3854 * among the PF and it's VFs, if the fields are changeable 3855 */ 3856 if (res_mod.max_uc_mac == FIELD_MODIFIABLE) 3857 nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac / 3858 (num_vfs + 1)); 3859 3860 if (res_mod.max_vlans == FIELD_MODIFIABLE) 3861 nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans / 3862 (num_vfs + 1)); 3863 3864 if (res_mod.max_iface_count == FIELD_MODIFIABLE) 3865 nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count / 3866 (num_vfs + 1)); 3867 3868 if (res_mod.max_mcc_count == FIELD_MODIFIABLE) 3869 nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count / 3870 (num_vfs + 1)); 3871 } 3872 3873 int be_cmd_set_sriov_config(struct be_adapter *adapter, 3874 struct be_resources pool_res, u16 num_vfs, 3875 u16 num_vf_qs) 3876 { 3877 struct { 3878 struct be_pcie_res_desc pcie; 3879 struct be_nic_res_desc nic_vft; 3880 } __packed desc; 3881 3882 /* PF PCIE descriptor */ 3883 be_reset_pcie_desc(&desc.pcie); 3884 desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1; 3885 desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3886 desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); 3887 desc.pcie.pf_num = adapter->pdev->devfn; 3888 desc.pcie.sriov_state = num_vfs ? 1 : 0; 3889 desc.pcie.num_vfs = cpu_to_le16(num_vfs); 3890 3891 /* VF NIC Template descriptor */ 3892 be_reset_nic_desc(&desc.nic_vft); 3893 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; 3894 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3895 desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); 3896 desc.nic_vft.pf_num = adapter->pdev->devfn; 3897 desc.nic_vft.vf_num = 0; 3898 3899 be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs, 3900 &desc.nic_vft); 3901 3902 return be_cmd_set_profile_config(adapter, &desc, 3903 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0); 3904 } 3905 3906 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) 3907 { 3908 struct be_mcc_wrb *wrb; 3909 struct be_cmd_req_manage_iface_filters *req; 3910 int status; 3911 3912 if (iface == 0xFFFFFFFF) 3913 return -1; 3914 3915 spin_lock_bh(&adapter->mcc_lock); 3916 3917 wrb = wrb_from_mccq(adapter); 3918 if (!wrb) { 3919 status = -EBUSY; 3920 goto err; 3921 } 3922 req = embedded_payload(wrb); 3923 3924 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3925 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req), 3926 wrb, NULL); 3927 req->op = op; 3928 req->target_iface_id = cpu_to_le32(iface); 3929 3930 status = be_mcc_notify_wait(adapter); 3931 err: 3932 spin_unlock_bh(&adapter->mcc_lock); 3933 return status; 3934 } 3935 3936 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port) 3937 { 3938 struct be_port_res_desc port_desc; 3939 3940 memset(&port_desc, 0, sizeof(port_desc)); 3941 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1; 3942 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3943 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); 3944 port_desc.link_num = adapter->hba_port_num; 3945 if (port) { 3946 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) | 3947 (1 << RCVID_SHIFT); 3948 port_desc.nv_port = swab16(port); 3949 } else { 3950 port_desc.nv_flags = NV_TYPE_DISABLED; 3951 port_desc.nv_port = 0; 3952 } 3953 3954 return be_cmd_set_profile_config(adapter, &port_desc, 3955 RESOURCE_DESC_SIZE_V1, 1, 1, 0); 3956 } 3957 3958 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, 3959 int vf_num) 3960 { 3961 struct be_mcc_wrb *wrb; 3962 struct be_cmd_req_get_iface_list *req; 3963 struct be_cmd_resp_get_iface_list *resp; 3964 int status; 3965 3966 spin_lock_bh(&adapter->mcc_lock); 3967 3968 wrb = wrb_from_mccq(adapter); 3969 if (!wrb) { 3970 status = -EBUSY; 3971 goto err; 3972 } 3973 req = embedded_payload(wrb); 3974 3975 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3976 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp), 3977 wrb, NULL); 3978 req->hdr.domain = vf_num + 1; 3979 3980 status = be_mcc_notify_wait(adapter); 3981 if (!status) { 3982 resp = (struct be_cmd_resp_get_iface_list *)req; 3983 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id); 3984 } 3985 3986 err: 3987 spin_unlock_bh(&adapter->mcc_lock); 3988 return status; 3989 } 3990 3991 static int lancer_wait_idle(struct be_adapter *adapter) 3992 { 3993 #define SLIPORT_IDLE_TIMEOUT 30 3994 u32 reg_val; 3995 int status = 0, i; 3996 3997 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) { 3998 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET); 3999 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0) 4000 break; 4001 4002 ssleep(1); 4003 } 4004 4005 if (i == SLIPORT_IDLE_TIMEOUT) 4006 status = -1; 4007 4008 return status; 4009 } 4010 4011 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask) 4012 { 4013 int status = 0; 4014 4015 status = lancer_wait_idle(adapter); 4016 if (status) 4017 return status; 4018 4019 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET); 4020 4021 return status; 4022 } 4023 4024 /* Routine to check whether dump image is present or not */ 4025 bool dump_present(struct be_adapter *adapter) 4026 { 4027 u32 sliport_status = 0; 4028 4029 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 4030 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK); 4031 } 4032 4033 int lancer_initiate_dump(struct be_adapter *adapter) 4034 { 4035 struct device *dev = &adapter->pdev->dev; 4036 int status; 4037 4038 if (dump_present(adapter)) { 4039 dev_info(dev, "Previous dump not cleared, not forcing dump\n"); 4040 return -EEXIST; 4041 } 4042 4043 /* give firmware reset and diagnostic dump */ 4044 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK | 4045 PHYSDEV_CONTROL_DD_MASK); 4046 if (status < 0) { 4047 dev_err(dev, "FW reset failed\n"); 4048 return status; 4049 } 4050 4051 status = lancer_wait_idle(adapter); 4052 if (status) 4053 return status; 4054 4055 if (!dump_present(adapter)) { 4056 dev_err(dev, "FW dump not generated\n"); 4057 return -EIO; 4058 } 4059 4060 return 0; 4061 } 4062 4063 int lancer_delete_dump(struct be_adapter *adapter) 4064 { 4065 int status; 4066 4067 status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE); 4068 return be_cmd_status(status); 4069 } 4070 4071 /* Uses sync mcc */ 4072 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) 4073 { 4074 struct be_mcc_wrb *wrb; 4075 struct be_cmd_enable_disable_vf *req; 4076 int status; 4077 4078 if (BEx_chip(adapter)) 4079 return 0; 4080 4081 spin_lock_bh(&adapter->mcc_lock); 4082 4083 wrb = wrb_from_mccq(adapter); 4084 if (!wrb) { 4085 status = -EBUSY; 4086 goto err; 4087 } 4088 4089 req = embedded_payload(wrb); 4090 4091 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4092 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req), 4093 wrb, NULL); 4094 4095 req->hdr.domain = domain; 4096 req->enable = 1; 4097 status = be_mcc_notify_wait(adapter); 4098 err: 4099 spin_unlock_bh(&adapter->mcc_lock); 4100 return status; 4101 } 4102 4103 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable) 4104 { 4105 struct be_mcc_wrb *wrb; 4106 struct be_cmd_req_intr_set *req; 4107 int status; 4108 4109 if (mutex_lock_interruptible(&adapter->mbox_lock)) 4110 return -1; 4111 4112 wrb = wrb_from_mbox(adapter); 4113 4114 req = embedded_payload(wrb); 4115 4116 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4117 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req), 4118 wrb, NULL); 4119 4120 req->intr_enabled = intr_enable; 4121 4122 status = be_mbox_notify_wait(adapter); 4123 4124 mutex_unlock(&adapter->mbox_lock); 4125 return status; 4126 } 4127 4128 /* Uses MBOX */ 4129 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id) 4130 { 4131 struct be_cmd_req_get_active_profile *req; 4132 struct be_mcc_wrb *wrb; 4133 int status; 4134 4135 if (mutex_lock_interruptible(&adapter->mbox_lock)) 4136 return -1; 4137 4138 wrb = wrb_from_mbox(adapter); 4139 if (!wrb) { 4140 status = -EBUSY; 4141 goto err; 4142 } 4143 4144 req = embedded_payload(wrb); 4145 4146 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4147 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req), 4148 wrb, NULL); 4149 4150 status = be_mbox_notify_wait(adapter); 4151 if (!status) { 4152 struct be_cmd_resp_get_active_profile *resp = 4153 embedded_payload(wrb); 4154 4155 *profile_id = le16_to_cpu(resp->active_profile_id); 4156 } 4157 4158 err: 4159 mutex_unlock(&adapter->mbox_lock); 4160 return status; 4161 } 4162 4163 int be_cmd_set_logical_link_config(struct be_adapter *adapter, 4164 int link_state, u8 domain) 4165 { 4166 struct be_mcc_wrb *wrb; 4167 struct be_cmd_req_set_ll_link *req; 4168 int status; 4169 4170 if (BEx_chip(adapter) || lancer_chip(adapter)) 4171 return -EOPNOTSUPP; 4172 4173 spin_lock_bh(&adapter->mcc_lock); 4174 4175 wrb = wrb_from_mccq(adapter); 4176 if (!wrb) { 4177 status = -EBUSY; 4178 goto err; 4179 } 4180 4181 req = embedded_payload(wrb); 4182 4183 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4184 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG, 4185 sizeof(*req), wrb, NULL); 4186 4187 req->hdr.version = 1; 4188 req->hdr.domain = domain; 4189 4190 if (link_state == IFLA_VF_LINK_STATE_ENABLE) 4191 req->link_config |= 1; 4192 4193 if (link_state == IFLA_VF_LINK_STATE_AUTO) 4194 req->link_config |= 1 << PLINK_TRACK_SHIFT; 4195 4196 status = be_mcc_notify_wait(adapter); 4197 err: 4198 spin_unlock_bh(&adapter->mcc_lock); 4199 return status; 4200 } 4201 4202 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 4203 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 4204 { 4205 struct be_adapter *adapter = netdev_priv(netdev_handle); 4206 struct be_mcc_wrb *wrb; 4207 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload; 4208 struct be_cmd_req_hdr *req; 4209 struct be_cmd_resp_hdr *resp; 4210 int status; 4211 4212 spin_lock_bh(&adapter->mcc_lock); 4213 4214 wrb = wrb_from_mccq(adapter); 4215 if (!wrb) { 4216 status = -EBUSY; 4217 goto err; 4218 } 4219 req = embedded_payload(wrb); 4220 resp = embedded_payload(wrb); 4221 4222 be_wrb_cmd_hdr_prepare(req, hdr->subsystem, 4223 hdr->opcode, wrb_payload_size, wrb, NULL); 4224 memcpy(req, wrb_payload, wrb_payload_size); 4225 be_dws_cpu_to_le(req, wrb_payload_size); 4226 4227 status = be_mcc_notify_wait(adapter); 4228 if (cmd_status) 4229 *cmd_status = (status & 0xffff); 4230 if (ext_status) 4231 *ext_status = 0; 4232 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); 4233 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); 4234 err: 4235 spin_unlock_bh(&adapter->mcc_lock); 4236 return status; 4237 } 4238 EXPORT_SYMBOL(be_roce_mcc_cmd); 4239