1 /* 2 * Copyright (C) 2005 - 2015 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Contact Information: 11 * linux-drivers@emulex.com 12 * 13 * Emulex 14 * 3333 Susan Street 15 * Costa Mesa, CA 92626 16 */ 17 18 #include <linux/module.h> 19 #include "be.h" 20 #include "be_cmds.h" 21 22 static char *be_port_misconfig_evt_desc[] = { 23 "A valid SFP module detected", 24 "Optics faulted/ incorrectly installed/ not installed.", 25 "Optics of two types installed.", 26 "Incompatible optics.", 27 "Unknown port SFP status" 28 }; 29 30 static char *be_port_misconfig_remedy_desc[] = { 31 "", 32 "Reseat optics. If issue not resolved, replace", 33 "Remove one optic or install matching pair of optics", 34 "Replace with compatible optics for card to function", 35 "" 36 }; 37 38 static struct be_cmd_priv_map cmd_priv_map[] = { 39 { 40 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 41 CMD_SUBSYSTEM_ETH, 42 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 43 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 44 }, 45 { 46 OPCODE_COMMON_GET_FLOW_CONTROL, 47 CMD_SUBSYSTEM_COMMON, 48 BE_PRIV_LNKQUERY | BE_PRIV_VHADM | 49 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 50 }, 51 { 52 OPCODE_COMMON_SET_FLOW_CONTROL, 53 CMD_SUBSYSTEM_COMMON, 54 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 55 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 56 }, 57 { 58 OPCODE_ETH_GET_PPORT_STATS, 59 CMD_SUBSYSTEM_ETH, 60 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 61 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 62 }, 63 { 64 OPCODE_COMMON_GET_PHY_DETAILS, 65 CMD_SUBSYSTEM_COMMON, 66 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 67 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 68 } 69 }; 70 71 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) 72 { 73 int i; 74 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map); 75 u32 cmd_privileges = adapter->cmd_privileges; 76 77 for (i = 0; i < num_entries; i++) 78 if (opcode == cmd_priv_map[i].opcode && 79 subsystem == cmd_priv_map[i].subsystem) 80 if (!(cmd_privileges & cmd_priv_map[i].priv_mask)) 81 return false; 82 83 return true; 84 } 85 86 static inline void *embedded_payload(struct be_mcc_wrb *wrb) 87 { 88 return wrb->payload.embedded_payload; 89 } 90 91 static int be_mcc_notify(struct be_adapter *adapter) 92 { 93 struct be_queue_info *mccq = &adapter->mcc_obj.q; 94 u32 val = 0; 95 96 if (be_check_error(adapter, BE_ERROR_ANY)) 97 return -EIO; 98 99 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 100 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 101 102 wmb(); 103 iowrite32(val, adapter->db + DB_MCCQ_OFFSET); 104 105 return 0; 106 } 107 108 /* To check if valid bit is set, check the entire word as we don't know 109 * the endianness of the data (old entry is host endian while a new entry is 110 * little endian) */ 111 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) 112 { 113 u32 flags; 114 115 if (compl->flags != 0) { 116 flags = le32_to_cpu(compl->flags); 117 if (flags & CQE_FLAGS_VALID_MASK) { 118 compl->flags = flags; 119 return true; 120 } 121 } 122 return false; 123 } 124 125 /* Need to reset the entire word that houses the valid bit */ 126 static inline void be_mcc_compl_use(struct be_mcc_compl *compl) 127 { 128 compl->flags = 0; 129 } 130 131 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1) 132 { 133 unsigned long addr; 134 135 addr = tag1; 136 addr = ((addr << 16) << 16) | tag0; 137 return (void *)addr; 138 } 139 140 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status) 141 { 142 if (base_status == MCC_STATUS_NOT_SUPPORTED || 143 base_status == MCC_STATUS_ILLEGAL_REQUEST || 144 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES || 145 addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS || 146 (opcode == OPCODE_COMMON_WRITE_FLASHROM && 147 (base_status == MCC_STATUS_ILLEGAL_FIELD || 148 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH))) 149 return true; 150 else 151 return false; 152 } 153 154 /* Place holder for all the async MCC cmds wherein the caller is not in a busy 155 * loop (has not issued be_mcc_notify_wait()) 156 */ 157 static void be_async_cmd_process(struct be_adapter *adapter, 158 struct be_mcc_compl *compl, 159 struct be_cmd_resp_hdr *resp_hdr) 160 { 161 enum mcc_base_status base_status = base_status(compl->status); 162 u8 opcode = 0, subsystem = 0; 163 164 if (resp_hdr) { 165 opcode = resp_hdr->opcode; 166 subsystem = resp_hdr->subsystem; 167 } 168 169 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && 170 subsystem == CMD_SUBSYSTEM_LOWLEVEL) { 171 complete(&adapter->et_cmd_compl); 172 return; 173 } 174 175 if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE && 176 subsystem == CMD_SUBSYSTEM_LOWLEVEL) { 177 complete(&adapter->et_cmd_compl); 178 return; 179 } 180 181 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM || 182 opcode == OPCODE_COMMON_WRITE_OBJECT) && 183 subsystem == CMD_SUBSYSTEM_COMMON) { 184 adapter->flash_status = compl->status; 185 complete(&adapter->et_cmd_compl); 186 return; 187 } 188 189 if ((opcode == OPCODE_ETH_GET_STATISTICS || 190 opcode == OPCODE_ETH_GET_PPORT_STATS) && 191 subsystem == CMD_SUBSYSTEM_ETH && 192 base_status == MCC_STATUS_SUCCESS) { 193 be_parse_stats(adapter); 194 adapter->stats_cmd_sent = false; 195 return; 196 } 197 198 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && 199 subsystem == CMD_SUBSYSTEM_COMMON) { 200 if (base_status == MCC_STATUS_SUCCESS) { 201 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 202 (void *)resp_hdr; 203 adapter->hwmon_info.be_on_die_temp = 204 resp->on_die_temperature; 205 } else { 206 adapter->be_get_temp_freq = 0; 207 adapter->hwmon_info.be_on_die_temp = 208 BE_INVALID_DIE_TEMP; 209 } 210 return; 211 } 212 } 213 214 static int be_mcc_compl_process(struct be_adapter *adapter, 215 struct be_mcc_compl *compl) 216 { 217 enum mcc_base_status base_status; 218 enum mcc_addl_status addl_status; 219 struct be_cmd_resp_hdr *resp_hdr; 220 u8 opcode = 0, subsystem = 0; 221 222 /* Just swap the status to host endian; mcc tag is opaquely copied 223 * from mcc_wrb */ 224 be_dws_le_to_cpu(compl, 4); 225 226 base_status = base_status(compl->status); 227 addl_status = addl_status(compl->status); 228 229 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); 230 if (resp_hdr) { 231 opcode = resp_hdr->opcode; 232 subsystem = resp_hdr->subsystem; 233 } 234 235 be_async_cmd_process(adapter, compl, resp_hdr); 236 237 if (base_status != MCC_STATUS_SUCCESS && 238 !be_skip_err_log(opcode, base_status, addl_status)) { 239 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 240 dev_warn(&adapter->pdev->dev, 241 "VF is not privileged to issue opcode %d-%d\n", 242 opcode, subsystem); 243 } else { 244 dev_err(&adapter->pdev->dev, 245 "opcode %d-%d failed:status %d-%d\n", 246 opcode, subsystem, base_status, addl_status); 247 } 248 } 249 return compl->status; 250 } 251 252 /* Link state evt is a string of bytes; no need for endian swapping */ 253 static void be_async_link_state_process(struct be_adapter *adapter, 254 struct be_mcc_compl *compl) 255 { 256 struct be_async_event_link_state *evt = 257 (struct be_async_event_link_state *)compl; 258 259 /* When link status changes, link speed must be re-queried from FW */ 260 adapter->phy.link_speed = -1; 261 262 /* On BEx the FW does not send a separate link status 263 * notification for physical and logical link. 264 * On other chips just process the logical link 265 * status notification 266 */ 267 if (!BEx_chip(adapter) && 268 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK)) 269 return; 270 271 /* For the initial link status do not rely on the ASYNC event as 272 * it may not be received in some cases. 273 */ 274 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) 275 be_link_status_update(adapter, 276 evt->port_link_status & LINK_STATUS_MASK); 277 } 278 279 static void be_async_port_misconfig_event_process(struct be_adapter *adapter, 280 struct be_mcc_compl *compl) 281 { 282 struct be_async_event_misconfig_port *evt = 283 (struct be_async_event_misconfig_port *)compl; 284 u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1); 285 struct device *dev = &adapter->pdev->dev; 286 u8 port_misconfig_evt; 287 288 port_misconfig_evt = 289 ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff); 290 291 /* Log an error message that would allow a user to determine 292 * whether the SFPs have an issue 293 */ 294 dev_info(dev, "Port %c: %s %s", adapter->port_name, 295 be_port_misconfig_evt_desc[port_misconfig_evt], 296 be_port_misconfig_remedy_desc[port_misconfig_evt]); 297 298 if (port_misconfig_evt == INCOMPATIBLE_SFP) 299 adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP; 300 } 301 302 /* Grp5 CoS Priority evt */ 303 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 304 struct be_mcc_compl *compl) 305 { 306 struct be_async_event_grp5_cos_priority *evt = 307 (struct be_async_event_grp5_cos_priority *)compl; 308 309 if (evt->valid) { 310 adapter->vlan_prio_bmap = evt->available_priority_bmap; 311 adapter->recommended_prio &= ~VLAN_PRIO_MASK; 312 adapter->recommended_prio = 313 evt->reco_default_priority << VLAN_PRIO_SHIFT; 314 } 315 } 316 317 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */ 318 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 319 struct be_mcc_compl *compl) 320 { 321 struct be_async_event_grp5_qos_link_speed *evt = 322 (struct be_async_event_grp5_qos_link_speed *)compl; 323 324 if (adapter->phy.link_speed >= 0 && 325 evt->physical_port == adapter->port_num) 326 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10; 327 } 328 329 /*Grp5 PVID evt*/ 330 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 331 struct be_mcc_compl *compl) 332 { 333 struct be_async_event_grp5_pvid_state *evt = 334 (struct be_async_event_grp5_pvid_state *)compl; 335 336 if (evt->enabled) { 337 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 338 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid); 339 } else { 340 adapter->pvid = 0; 341 } 342 } 343 344 #define MGMT_ENABLE_MASK 0x4 345 static void be_async_grp5_fw_control_process(struct be_adapter *adapter, 346 struct be_mcc_compl *compl) 347 { 348 struct be_async_fw_control *evt = (struct be_async_fw_control *)compl; 349 u32 evt_dw1 = le32_to_cpu(evt->event_data_word1); 350 351 if (evt_dw1 & MGMT_ENABLE_MASK) { 352 adapter->flags |= BE_FLAGS_OS2BMC; 353 adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2); 354 } else { 355 adapter->flags &= ~BE_FLAGS_OS2BMC; 356 } 357 } 358 359 static void be_async_grp5_evt_process(struct be_adapter *adapter, 360 struct be_mcc_compl *compl) 361 { 362 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) & 363 ASYNC_EVENT_TYPE_MASK; 364 365 switch (event_type) { 366 case ASYNC_EVENT_COS_PRIORITY: 367 be_async_grp5_cos_priority_process(adapter, compl); 368 break; 369 case ASYNC_EVENT_QOS_SPEED: 370 be_async_grp5_qos_speed_process(adapter, compl); 371 break; 372 case ASYNC_EVENT_PVID_STATE: 373 be_async_grp5_pvid_state_process(adapter, compl); 374 break; 375 /* Async event to disable/enable os2bmc and/or mac-learning */ 376 case ASYNC_EVENT_FW_CONTROL: 377 be_async_grp5_fw_control_process(adapter, compl); 378 break; 379 default: 380 break; 381 } 382 } 383 384 static void be_async_dbg_evt_process(struct be_adapter *adapter, 385 struct be_mcc_compl *cmp) 386 { 387 u8 event_type = 0; 388 struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp; 389 390 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & 391 ASYNC_EVENT_TYPE_MASK; 392 393 switch (event_type) { 394 case ASYNC_DEBUG_EVENT_TYPE_QNQ: 395 if (evt->valid) 396 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag); 397 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD; 398 break; 399 default: 400 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n", 401 event_type); 402 break; 403 } 404 } 405 406 static void be_async_sliport_evt_process(struct be_adapter *adapter, 407 struct be_mcc_compl *cmp) 408 { 409 u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & 410 ASYNC_EVENT_TYPE_MASK; 411 412 if (event_type == ASYNC_EVENT_PORT_MISCONFIG) 413 be_async_port_misconfig_event_process(adapter, cmp); 414 } 415 416 static inline bool is_link_state_evt(u32 flags) 417 { 418 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 419 ASYNC_EVENT_CODE_LINK_STATE; 420 } 421 422 static inline bool is_grp5_evt(u32 flags) 423 { 424 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 425 ASYNC_EVENT_CODE_GRP_5; 426 } 427 428 static inline bool is_dbg_evt(u32 flags) 429 { 430 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 431 ASYNC_EVENT_CODE_QNQ; 432 } 433 434 static inline bool is_sliport_evt(u32 flags) 435 { 436 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 437 ASYNC_EVENT_CODE_SLIPORT; 438 } 439 440 static void be_mcc_event_process(struct be_adapter *adapter, 441 struct be_mcc_compl *compl) 442 { 443 if (is_link_state_evt(compl->flags)) 444 be_async_link_state_process(adapter, compl); 445 else if (is_grp5_evt(compl->flags)) 446 be_async_grp5_evt_process(adapter, compl); 447 else if (is_dbg_evt(compl->flags)) 448 be_async_dbg_evt_process(adapter, compl); 449 else if (is_sliport_evt(compl->flags)) 450 be_async_sliport_evt_process(adapter, compl); 451 } 452 453 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 454 { 455 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; 456 struct be_mcc_compl *compl = queue_tail_node(mcc_cq); 457 458 if (be_mcc_compl_is_new(compl)) { 459 queue_tail_inc(mcc_cq); 460 return compl; 461 } 462 return NULL; 463 } 464 465 void be_async_mcc_enable(struct be_adapter *adapter) 466 { 467 spin_lock_bh(&adapter->mcc_cq_lock); 468 469 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); 470 adapter->mcc_obj.rearm_cq = true; 471 472 spin_unlock_bh(&adapter->mcc_cq_lock); 473 } 474 475 void be_async_mcc_disable(struct be_adapter *adapter) 476 { 477 spin_lock_bh(&adapter->mcc_cq_lock); 478 479 adapter->mcc_obj.rearm_cq = false; 480 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); 481 482 spin_unlock_bh(&adapter->mcc_cq_lock); 483 } 484 485 int be_process_mcc(struct be_adapter *adapter) 486 { 487 struct be_mcc_compl *compl; 488 int num = 0, status = 0; 489 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 490 491 spin_lock(&adapter->mcc_cq_lock); 492 493 while ((compl = be_mcc_compl_get(adapter))) { 494 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 495 be_mcc_event_process(adapter, compl); 496 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 497 status = be_mcc_compl_process(adapter, compl); 498 atomic_dec(&mcc_obj->q.used); 499 } 500 be_mcc_compl_use(compl); 501 num++; 502 } 503 504 if (num) 505 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); 506 507 spin_unlock(&adapter->mcc_cq_lock); 508 return status; 509 } 510 511 /* Wait till no more pending mcc requests are present */ 512 static int be_mcc_wait_compl(struct be_adapter *adapter) 513 { 514 #define mcc_timeout 120000 /* 12s timeout */ 515 int i, status = 0; 516 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 517 518 for (i = 0; i < mcc_timeout; i++) { 519 if (be_check_error(adapter, BE_ERROR_ANY)) 520 return -EIO; 521 522 local_bh_disable(); 523 status = be_process_mcc(adapter); 524 local_bh_enable(); 525 526 if (atomic_read(&mcc_obj->q.used) == 0) 527 break; 528 udelay(100); 529 } 530 if (i == mcc_timeout) { 531 dev_err(&adapter->pdev->dev, "FW not responding\n"); 532 be_set_error(adapter, BE_ERROR_FW); 533 return -EIO; 534 } 535 return status; 536 } 537 538 /* Notify MCC requests and wait for completion */ 539 static int be_mcc_notify_wait(struct be_adapter *adapter) 540 { 541 int status; 542 struct be_mcc_wrb *wrb; 543 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 544 u16 index = mcc_obj->q.head; 545 struct be_cmd_resp_hdr *resp; 546 547 index_dec(&index, mcc_obj->q.len); 548 wrb = queue_index_node(&mcc_obj->q, index); 549 550 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1); 551 552 status = be_mcc_notify(adapter); 553 if (status) 554 goto out; 555 556 status = be_mcc_wait_compl(adapter); 557 if (status == -EIO) 558 goto out; 559 560 status = (resp->base_status | 561 ((resp->addl_status & CQE_ADDL_STATUS_MASK) << 562 CQE_ADDL_STATUS_SHIFT)); 563 out: 564 return status; 565 } 566 567 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) 568 { 569 int msecs = 0; 570 u32 ready; 571 572 do { 573 if (be_check_error(adapter, BE_ERROR_ANY)) 574 return -EIO; 575 576 ready = ioread32(db); 577 if (ready == 0xffffffff) 578 return -1; 579 580 ready &= MPU_MAILBOX_DB_RDY_MASK; 581 if (ready) 582 break; 583 584 if (msecs > 4000) { 585 dev_err(&adapter->pdev->dev, "FW not responding\n"); 586 be_set_error(adapter, BE_ERROR_FW); 587 be_detect_error(adapter); 588 return -1; 589 } 590 591 msleep(1); 592 msecs++; 593 } while (true); 594 595 return 0; 596 } 597 598 /* 599 * Insert the mailbox address into the doorbell in two steps 600 * Polls on the mbox doorbell till a command completion (or a timeout) occurs 601 */ 602 static int be_mbox_notify_wait(struct be_adapter *adapter) 603 { 604 int status; 605 u32 val = 0; 606 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; 607 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 608 struct be_mcc_mailbox *mbox = mbox_mem->va; 609 struct be_mcc_compl *compl = &mbox->compl; 610 611 /* wait for ready to be set */ 612 status = be_mbox_db_ready_wait(adapter, db); 613 if (status != 0) 614 return status; 615 616 val |= MPU_MAILBOX_DB_HI_MASK; 617 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ 618 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 619 iowrite32(val, db); 620 621 /* wait for ready to be set */ 622 status = be_mbox_db_ready_wait(adapter, db); 623 if (status != 0) 624 return status; 625 626 val = 0; 627 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ 628 val |= (u32)(mbox_mem->dma >> 4) << 2; 629 iowrite32(val, db); 630 631 status = be_mbox_db_ready_wait(adapter, db); 632 if (status != 0) 633 return status; 634 635 /* A cq entry has been made now */ 636 if (be_mcc_compl_is_new(compl)) { 637 status = be_mcc_compl_process(adapter, &mbox->compl); 638 be_mcc_compl_use(compl); 639 if (status) 640 return status; 641 } else { 642 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); 643 return -1; 644 } 645 return 0; 646 } 647 648 static u16 be_POST_stage_get(struct be_adapter *adapter) 649 { 650 u32 sem; 651 652 if (BEx_chip(adapter)) 653 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx); 654 else 655 pci_read_config_dword(adapter->pdev, 656 SLIPORT_SEMAPHORE_OFFSET_SH, &sem); 657 658 return sem & POST_STAGE_MASK; 659 } 660 661 static int lancer_wait_ready(struct be_adapter *adapter) 662 { 663 #define SLIPORT_READY_TIMEOUT 30 664 u32 sliport_status; 665 int i; 666 667 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { 668 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 669 if (sliport_status & SLIPORT_STATUS_RDY_MASK) 670 return 0; 671 672 if (sliport_status & SLIPORT_STATUS_ERR_MASK && 673 !(sliport_status & SLIPORT_STATUS_RN_MASK)) 674 return -EIO; 675 676 msleep(1000); 677 } 678 679 return sliport_status ? : -1; 680 } 681 682 int be_fw_wait_ready(struct be_adapter *adapter) 683 { 684 u16 stage; 685 int status, timeout = 0; 686 struct device *dev = &adapter->pdev->dev; 687 688 if (lancer_chip(adapter)) { 689 status = lancer_wait_ready(adapter); 690 if (status) { 691 stage = status; 692 goto err; 693 } 694 return 0; 695 } 696 697 do { 698 /* There's no means to poll POST state on BE2/3 VFs */ 699 if (BEx_chip(adapter) && be_virtfn(adapter)) 700 return 0; 701 702 stage = be_POST_stage_get(adapter); 703 if (stage == POST_STAGE_ARMFW_RDY) 704 return 0; 705 706 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout); 707 if (msleep_interruptible(2000)) { 708 dev_err(dev, "Waiting for POST aborted\n"); 709 return -EINTR; 710 } 711 timeout += 2; 712 } while (timeout < 60); 713 714 err: 715 dev_err(dev, "POST timeout; stage=%#x\n", stage); 716 return -ETIMEDOUT; 717 } 718 719 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) 720 { 721 return &wrb->payload.sgl[0]; 722 } 723 724 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr) 725 { 726 wrb->tag0 = addr & 0xFFFFFFFF; 727 wrb->tag1 = upper_32_bits(addr); 728 } 729 730 /* Don't touch the hdr after it's prepared */ 731 /* mem will be NULL for embedded commands */ 732 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 733 u8 subsystem, u8 opcode, int cmd_len, 734 struct be_mcc_wrb *wrb, 735 struct be_dma_mem *mem) 736 { 737 struct be_sge *sge; 738 739 req_hdr->opcode = opcode; 740 req_hdr->subsystem = subsystem; 741 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 742 req_hdr->version = 0; 743 fill_wrb_tags(wrb, (ulong) req_hdr); 744 wrb->payload_length = cmd_len; 745 if (mem) { 746 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << 747 MCC_WRB_SGE_CNT_SHIFT; 748 sge = nonembedded_sgl(wrb); 749 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); 750 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); 751 sge->len = cpu_to_le32(mem->size); 752 } else 753 wrb->embedded |= MCC_WRB_EMBEDDED_MASK; 754 be_dws_cpu_to_le(wrb, 8); 755 } 756 757 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 758 struct be_dma_mem *mem) 759 { 760 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); 761 u64 dma = (u64)mem->dma; 762 763 for (i = 0; i < buf_pages; i++) { 764 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); 765 pages[i].hi = cpu_to_le32(upper_32_bits(dma)); 766 dma += PAGE_SIZE_4K; 767 } 768 } 769 770 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) 771 { 772 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 773 struct be_mcc_wrb *wrb 774 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; 775 memset(wrb, 0, sizeof(*wrb)); 776 return wrb; 777 } 778 779 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) 780 { 781 struct be_queue_info *mccq = &adapter->mcc_obj.q; 782 struct be_mcc_wrb *wrb; 783 784 if (!mccq->created) 785 return NULL; 786 787 if (atomic_read(&mccq->used) >= mccq->len) 788 return NULL; 789 790 wrb = queue_head_node(mccq); 791 queue_head_inc(mccq); 792 atomic_inc(&mccq->used); 793 memset(wrb, 0, sizeof(*wrb)); 794 return wrb; 795 } 796 797 static bool use_mcc(struct be_adapter *adapter) 798 { 799 return adapter->mcc_obj.q.created; 800 } 801 802 /* Must be used only in process context */ 803 static int be_cmd_lock(struct be_adapter *adapter) 804 { 805 if (use_mcc(adapter)) { 806 spin_lock_bh(&adapter->mcc_lock); 807 return 0; 808 } else { 809 return mutex_lock_interruptible(&adapter->mbox_lock); 810 } 811 } 812 813 /* Must be used only in process context */ 814 static void be_cmd_unlock(struct be_adapter *adapter) 815 { 816 if (use_mcc(adapter)) 817 spin_unlock_bh(&adapter->mcc_lock); 818 else 819 return mutex_unlock(&adapter->mbox_lock); 820 } 821 822 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter, 823 struct be_mcc_wrb *wrb) 824 { 825 struct be_mcc_wrb *dest_wrb; 826 827 if (use_mcc(adapter)) { 828 dest_wrb = wrb_from_mccq(adapter); 829 if (!dest_wrb) 830 return NULL; 831 } else { 832 dest_wrb = wrb_from_mbox(adapter); 833 } 834 835 memcpy(dest_wrb, wrb, sizeof(*wrb)); 836 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK)) 837 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb)); 838 839 return dest_wrb; 840 } 841 842 /* Must be used only in process context */ 843 static int be_cmd_notify_wait(struct be_adapter *adapter, 844 struct be_mcc_wrb *wrb) 845 { 846 struct be_mcc_wrb *dest_wrb; 847 int status; 848 849 status = be_cmd_lock(adapter); 850 if (status) 851 return status; 852 853 dest_wrb = be_cmd_copy(adapter, wrb); 854 if (!dest_wrb) 855 return -EBUSY; 856 857 if (use_mcc(adapter)) 858 status = be_mcc_notify_wait(adapter); 859 else 860 status = be_mbox_notify_wait(adapter); 861 862 if (!status) 863 memcpy(wrb, dest_wrb, sizeof(*wrb)); 864 865 be_cmd_unlock(adapter); 866 return status; 867 } 868 869 /* Tell fw we're about to start firing cmds by writing a 870 * special pattern across the wrb hdr; uses mbox 871 */ 872 int be_cmd_fw_init(struct be_adapter *adapter) 873 { 874 u8 *wrb; 875 int status; 876 877 if (lancer_chip(adapter)) 878 return 0; 879 880 if (mutex_lock_interruptible(&adapter->mbox_lock)) 881 return -1; 882 883 wrb = (u8 *)wrb_from_mbox(adapter); 884 *wrb++ = 0xFF; 885 *wrb++ = 0x12; 886 *wrb++ = 0x34; 887 *wrb++ = 0xFF; 888 *wrb++ = 0xFF; 889 *wrb++ = 0x56; 890 *wrb++ = 0x78; 891 *wrb = 0xFF; 892 893 status = be_mbox_notify_wait(adapter); 894 895 mutex_unlock(&adapter->mbox_lock); 896 return status; 897 } 898 899 /* Tell fw we're done with firing cmds by writing a 900 * special pattern across the wrb hdr; uses mbox 901 */ 902 int be_cmd_fw_clean(struct be_adapter *adapter) 903 { 904 u8 *wrb; 905 int status; 906 907 if (lancer_chip(adapter)) 908 return 0; 909 910 if (mutex_lock_interruptible(&adapter->mbox_lock)) 911 return -1; 912 913 wrb = (u8 *)wrb_from_mbox(adapter); 914 *wrb++ = 0xFF; 915 *wrb++ = 0xAA; 916 *wrb++ = 0xBB; 917 *wrb++ = 0xFF; 918 *wrb++ = 0xFF; 919 *wrb++ = 0xCC; 920 *wrb++ = 0xDD; 921 *wrb = 0xFF; 922 923 status = be_mbox_notify_wait(adapter); 924 925 mutex_unlock(&adapter->mbox_lock); 926 return status; 927 } 928 929 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo) 930 { 931 struct be_mcc_wrb *wrb; 932 struct be_cmd_req_eq_create *req; 933 struct be_dma_mem *q_mem = &eqo->q.dma_mem; 934 int status, ver = 0; 935 936 if (mutex_lock_interruptible(&adapter->mbox_lock)) 937 return -1; 938 939 wrb = wrb_from_mbox(adapter); 940 req = embedded_payload(wrb); 941 942 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 943 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, 944 NULL); 945 946 /* Support for EQ_CREATEv2 available only SH-R onwards */ 947 if (!(BEx_chip(adapter) || lancer_chip(adapter))) 948 ver = 2; 949 950 req->hdr.version = ver; 951 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 952 953 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); 954 /* 4byte eqe*/ 955 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); 956 AMAP_SET_BITS(struct amap_eq_context, count, req->context, 957 __ilog2_u32(eqo->q.len / 256)); 958 be_dws_cpu_to_le(req->context, sizeof(req->context)); 959 960 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 961 962 status = be_mbox_notify_wait(adapter); 963 if (!status) { 964 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); 965 966 eqo->q.id = le16_to_cpu(resp->eq_id); 967 eqo->msix_idx = 968 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx; 969 eqo->q.created = true; 970 } 971 972 mutex_unlock(&adapter->mbox_lock); 973 return status; 974 } 975 976 /* Use MCC */ 977 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 978 bool permanent, u32 if_handle, u32 pmac_id) 979 { 980 struct be_mcc_wrb *wrb; 981 struct be_cmd_req_mac_query *req; 982 int status; 983 984 spin_lock_bh(&adapter->mcc_lock); 985 986 wrb = wrb_from_mccq(adapter); 987 if (!wrb) { 988 status = -EBUSY; 989 goto err; 990 } 991 req = embedded_payload(wrb); 992 993 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 994 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, 995 NULL); 996 req->type = MAC_ADDRESS_TYPE_NETWORK; 997 if (permanent) { 998 req->permanent = 1; 999 } else { 1000 req->if_id = cpu_to_le16((u16)if_handle); 1001 req->pmac_id = cpu_to_le32(pmac_id); 1002 req->permanent = 0; 1003 } 1004 1005 status = be_mcc_notify_wait(adapter); 1006 if (!status) { 1007 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); 1008 1009 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 1010 } 1011 1012 err: 1013 spin_unlock_bh(&adapter->mcc_lock); 1014 return status; 1015 } 1016 1017 /* Uses synchronous MCCQ */ 1018 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 1019 u32 if_id, u32 *pmac_id, u32 domain) 1020 { 1021 struct be_mcc_wrb *wrb; 1022 struct be_cmd_req_pmac_add *req; 1023 int status; 1024 1025 spin_lock_bh(&adapter->mcc_lock); 1026 1027 wrb = wrb_from_mccq(adapter); 1028 if (!wrb) { 1029 status = -EBUSY; 1030 goto err; 1031 } 1032 req = embedded_payload(wrb); 1033 1034 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1035 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, 1036 NULL); 1037 1038 req->hdr.domain = domain; 1039 req->if_id = cpu_to_le32(if_id); 1040 memcpy(req->mac_address, mac_addr, ETH_ALEN); 1041 1042 status = be_mcc_notify_wait(adapter); 1043 if (!status) { 1044 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); 1045 1046 *pmac_id = le32_to_cpu(resp->pmac_id); 1047 } 1048 1049 err: 1050 spin_unlock_bh(&adapter->mcc_lock); 1051 1052 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) 1053 status = -EPERM; 1054 1055 return status; 1056 } 1057 1058 /* Uses synchronous MCCQ */ 1059 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) 1060 { 1061 struct be_mcc_wrb *wrb; 1062 struct be_cmd_req_pmac_del *req; 1063 int status; 1064 1065 if (pmac_id == -1) 1066 return 0; 1067 1068 spin_lock_bh(&adapter->mcc_lock); 1069 1070 wrb = wrb_from_mccq(adapter); 1071 if (!wrb) { 1072 status = -EBUSY; 1073 goto err; 1074 } 1075 req = embedded_payload(wrb); 1076 1077 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1078 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), 1079 wrb, NULL); 1080 1081 req->hdr.domain = dom; 1082 req->if_id = cpu_to_le32(if_id); 1083 req->pmac_id = cpu_to_le32(pmac_id); 1084 1085 status = be_mcc_notify_wait(adapter); 1086 1087 err: 1088 spin_unlock_bh(&adapter->mcc_lock); 1089 return status; 1090 } 1091 1092 /* Uses Mbox */ 1093 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, 1094 struct be_queue_info *eq, bool no_delay, int coalesce_wm) 1095 { 1096 struct be_mcc_wrb *wrb; 1097 struct be_cmd_req_cq_create *req; 1098 struct be_dma_mem *q_mem = &cq->dma_mem; 1099 void *ctxt; 1100 int status; 1101 1102 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1103 return -1; 1104 1105 wrb = wrb_from_mbox(adapter); 1106 req = embedded_payload(wrb); 1107 ctxt = &req->context; 1108 1109 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1110 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, 1111 NULL); 1112 1113 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1114 1115 if (BEx_chip(adapter)) { 1116 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 1117 coalesce_wm); 1118 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 1119 ctxt, no_delay); 1120 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, 1121 __ilog2_u32(cq->len / 256)); 1122 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 1123 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 1124 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); 1125 } else { 1126 req->hdr.version = 2; 1127 req->page_size = 1; /* 1 for 4K */ 1128 1129 /* coalesce-wm field in this cmd is not relevant to Lancer. 1130 * Lancer uses COMMON_MODIFY_CQ to set this field 1131 */ 1132 if (!lancer_chip(adapter)) 1133 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, 1134 ctxt, coalesce_wm); 1135 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1136 no_delay); 1137 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, 1138 __ilog2_u32(cq->len / 256)); 1139 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); 1140 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); 1141 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); 1142 } 1143 1144 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1145 1146 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1147 1148 status = be_mbox_notify_wait(adapter); 1149 if (!status) { 1150 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); 1151 1152 cq->id = le16_to_cpu(resp->cq_id); 1153 cq->created = true; 1154 } 1155 1156 mutex_unlock(&adapter->mbox_lock); 1157 1158 return status; 1159 } 1160 1161 static u32 be_encoded_q_len(int q_len) 1162 { 1163 u32 len_encoded = fls(q_len); /* log2(len) + 1 */ 1164 1165 if (len_encoded == 16) 1166 len_encoded = 0; 1167 return len_encoded; 1168 } 1169 1170 static int be_cmd_mccq_ext_create(struct be_adapter *adapter, 1171 struct be_queue_info *mccq, 1172 struct be_queue_info *cq) 1173 { 1174 struct be_mcc_wrb *wrb; 1175 struct be_cmd_req_mcc_ext_create *req; 1176 struct be_dma_mem *q_mem = &mccq->dma_mem; 1177 void *ctxt; 1178 int status; 1179 1180 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1181 return -1; 1182 1183 wrb = wrb_from_mbox(adapter); 1184 req = embedded_payload(wrb); 1185 ctxt = &req->context; 1186 1187 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1188 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, 1189 NULL); 1190 1191 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1192 if (BEx_chip(adapter)) { 1193 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1194 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1195 be_encoded_q_len(mccq->len)); 1196 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1197 } else { 1198 req->hdr.version = 1; 1199 req->cq_id = cpu_to_le16(cq->id); 1200 1201 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt, 1202 be_encoded_q_len(mccq->len)); 1203 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1); 1204 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id, 1205 ctxt, cq->id); 1206 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid, 1207 ctxt, 1); 1208 } 1209 1210 /* Subscribe to Link State, Sliport Event and Group 5 Events 1211 * (bits 1, 5 and 17 set) 1212 */ 1213 req->async_event_bitmap[0] = 1214 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) | 1215 BIT(ASYNC_EVENT_CODE_GRP_5) | 1216 BIT(ASYNC_EVENT_CODE_QNQ) | 1217 BIT(ASYNC_EVENT_CODE_SLIPORT)); 1218 1219 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1220 1221 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1222 1223 status = be_mbox_notify_wait(adapter); 1224 if (!status) { 1225 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1226 1227 mccq->id = le16_to_cpu(resp->id); 1228 mccq->created = true; 1229 } 1230 mutex_unlock(&adapter->mbox_lock); 1231 1232 return status; 1233 } 1234 1235 static int be_cmd_mccq_org_create(struct be_adapter *adapter, 1236 struct be_queue_info *mccq, 1237 struct be_queue_info *cq) 1238 { 1239 struct be_mcc_wrb *wrb; 1240 struct be_cmd_req_mcc_create *req; 1241 struct be_dma_mem *q_mem = &mccq->dma_mem; 1242 void *ctxt; 1243 int status; 1244 1245 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1246 return -1; 1247 1248 wrb = wrb_from_mbox(adapter); 1249 req = embedded_payload(wrb); 1250 ctxt = &req->context; 1251 1252 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1253 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, 1254 NULL); 1255 1256 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1257 1258 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1259 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1260 be_encoded_q_len(mccq->len)); 1261 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1262 1263 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1264 1265 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1266 1267 status = be_mbox_notify_wait(adapter); 1268 if (!status) { 1269 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1270 1271 mccq->id = le16_to_cpu(resp->id); 1272 mccq->created = true; 1273 } 1274 1275 mutex_unlock(&adapter->mbox_lock); 1276 return status; 1277 } 1278 1279 int be_cmd_mccq_create(struct be_adapter *adapter, 1280 struct be_queue_info *mccq, struct be_queue_info *cq) 1281 { 1282 int status; 1283 1284 status = be_cmd_mccq_ext_create(adapter, mccq, cq); 1285 if (status && BEx_chip(adapter)) { 1286 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " 1287 "or newer to avoid conflicting priorities between NIC " 1288 "and FCoE traffic"); 1289 status = be_cmd_mccq_org_create(adapter, mccq, cq); 1290 } 1291 return status; 1292 } 1293 1294 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) 1295 { 1296 struct be_mcc_wrb wrb = {0}; 1297 struct be_cmd_req_eth_tx_create *req; 1298 struct be_queue_info *txq = &txo->q; 1299 struct be_queue_info *cq = &txo->cq; 1300 struct be_dma_mem *q_mem = &txq->dma_mem; 1301 int status, ver = 0; 1302 1303 req = embedded_payload(&wrb); 1304 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1305 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); 1306 1307 if (lancer_chip(adapter)) { 1308 req->hdr.version = 1; 1309 } else if (BEx_chip(adapter)) { 1310 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) 1311 req->hdr.version = 2; 1312 } else { /* For SH */ 1313 req->hdr.version = 2; 1314 } 1315 1316 if (req->hdr.version > 0) 1317 req->if_id = cpu_to_le16(adapter->if_handle); 1318 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1319 req->ulp_num = BE_ULP1_NUM; 1320 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 1321 req->cq_id = cpu_to_le16(cq->id); 1322 req->queue_size = be_encoded_q_len(txq->len); 1323 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1324 ver = req->hdr.version; 1325 1326 status = be_cmd_notify_wait(adapter, &wrb); 1327 if (!status) { 1328 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb); 1329 1330 txq->id = le16_to_cpu(resp->cid); 1331 if (ver == 2) 1332 txo->db_offset = le32_to_cpu(resp->db_offset); 1333 else 1334 txo->db_offset = DB_TXULP1_OFFSET; 1335 txq->created = true; 1336 } 1337 1338 return status; 1339 } 1340 1341 /* Uses MCC */ 1342 int be_cmd_rxq_create(struct be_adapter *adapter, 1343 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 1344 u32 if_id, u32 rss, u8 *rss_id) 1345 { 1346 struct be_mcc_wrb *wrb; 1347 struct be_cmd_req_eth_rx_create *req; 1348 struct be_dma_mem *q_mem = &rxq->dma_mem; 1349 int status; 1350 1351 spin_lock_bh(&adapter->mcc_lock); 1352 1353 wrb = wrb_from_mccq(adapter); 1354 if (!wrb) { 1355 status = -EBUSY; 1356 goto err; 1357 } 1358 req = embedded_payload(wrb); 1359 1360 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1361 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); 1362 1363 req->cq_id = cpu_to_le16(cq_id); 1364 req->frag_size = fls(frag_size) - 1; 1365 req->num_pages = 2; 1366 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1367 req->interface_id = cpu_to_le32(if_id); 1368 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE); 1369 req->rss_queue = cpu_to_le32(rss); 1370 1371 status = be_mcc_notify_wait(adapter); 1372 if (!status) { 1373 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); 1374 1375 rxq->id = le16_to_cpu(resp->id); 1376 rxq->created = true; 1377 *rss_id = resp->rss_id; 1378 } 1379 1380 err: 1381 spin_unlock_bh(&adapter->mcc_lock); 1382 return status; 1383 } 1384 1385 /* Generic destroyer function for all types of queues 1386 * Uses Mbox 1387 */ 1388 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1389 int queue_type) 1390 { 1391 struct be_mcc_wrb *wrb; 1392 struct be_cmd_req_q_destroy *req; 1393 u8 subsys = 0, opcode = 0; 1394 int status; 1395 1396 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1397 return -1; 1398 1399 wrb = wrb_from_mbox(adapter); 1400 req = embedded_payload(wrb); 1401 1402 switch (queue_type) { 1403 case QTYPE_EQ: 1404 subsys = CMD_SUBSYSTEM_COMMON; 1405 opcode = OPCODE_COMMON_EQ_DESTROY; 1406 break; 1407 case QTYPE_CQ: 1408 subsys = CMD_SUBSYSTEM_COMMON; 1409 opcode = OPCODE_COMMON_CQ_DESTROY; 1410 break; 1411 case QTYPE_TXQ: 1412 subsys = CMD_SUBSYSTEM_ETH; 1413 opcode = OPCODE_ETH_TX_DESTROY; 1414 break; 1415 case QTYPE_RXQ: 1416 subsys = CMD_SUBSYSTEM_ETH; 1417 opcode = OPCODE_ETH_RX_DESTROY; 1418 break; 1419 case QTYPE_MCCQ: 1420 subsys = CMD_SUBSYSTEM_COMMON; 1421 opcode = OPCODE_COMMON_MCC_DESTROY; 1422 break; 1423 default: 1424 BUG(); 1425 } 1426 1427 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, 1428 NULL); 1429 req->id = cpu_to_le16(q->id); 1430 1431 status = be_mbox_notify_wait(adapter); 1432 q->created = false; 1433 1434 mutex_unlock(&adapter->mbox_lock); 1435 return status; 1436 } 1437 1438 /* Uses MCC */ 1439 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) 1440 { 1441 struct be_mcc_wrb *wrb; 1442 struct be_cmd_req_q_destroy *req; 1443 int status; 1444 1445 spin_lock_bh(&adapter->mcc_lock); 1446 1447 wrb = wrb_from_mccq(adapter); 1448 if (!wrb) { 1449 status = -EBUSY; 1450 goto err; 1451 } 1452 req = embedded_payload(wrb); 1453 1454 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1455 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); 1456 req->id = cpu_to_le16(q->id); 1457 1458 status = be_mcc_notify_wait(adapter); 1459 q->created = false; 1460 1461 err: 1462 spin_unlock_bh(&adapter->mcc_lock); 1463 return status; 1464 } 1465 1466 /* Create an rx filtering policy configuration on an i/f 1467 * Will use MBOX only if MCCQ has not been created. 1468 */ 1469 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 1470 u32 *if_handle, u32 domain) 1471 { 1472 struct be_mcc_wrb wrb = {0}; 1473 struct be_cmd_req_if_create *req; 1474 int status; 1475 1476 req = embedded_payload(&wrb); 1477 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1478 OPCODE_COMMON_NTWK_INTERFACE_CREATE, 1479 sizeof(*req), &wrb, NULL); 1480 req->hdr.domain = domain; 1481 req->capability_flags = cpu_to_le32(cap_flags); 1482 req->enable_flags = cpu_to_le32(en_flags); 1483 req->pmac_invalid = true; 1484 1485 status = be_cmd_notify_wait(adapter, &wrb); 1486 if (!status) { 1487 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb); 1488 1489 *if_handle = le32_to_cpu(resp->interface_id); 1490 1491 /* Hack to retrieve VF's pmac-id on BE3 */ 1492 if (BE3_chip(adapter) && be_virtfn(adapter)) 1493 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id); 1494 } 1495 return status; 1496 } 1497 1498 /* Uses MCCQ */ 1499 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) 1500 { 1501 struct be_mcc_wrb *wrb; 1502 struct be_cmd_req_if_destroy *req; 1503 int status; 1504 1505 if (interface_id == -1) 1506 return 0; 1507 1508 spin_lock_bh(&adapter->mcc_lock); 1509 1510 wrb = wrb_from_mccq(adapter); 1511 if (!wrb) { 1512 status = -EBUSY; 1513 goto err; 1514 } 1515 req = embedded_payload(wrb); 1516 1517 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1518 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, 1519 sizeof(*req), wrb, NULL); 1520 req->hdr.domain = domain; 1521 req->interface_id = cpu_to_le32(interface_id); 1522 1523 status = be_mcc_notify_wait(adapter); 1524 err: 1525 spin_unlock_bh(&adapter->mcc_lock); 1526 return status; 1527 } 1528 1529 /* Get stats is a non embedded command: the request is not embedded inside 1530 * WRB but is a separate dma memory block 1531 * Uses asynchronous MCC 1532 */ 1533 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) 1534 { 1535 struct be_mcc_wrb *wrb; 1536 struct be_cmd_req_hdr *hdr; 1537 int status = 0; 1538 1539 spin_lock_bh(&adapter->mcc_lock); 1540 1541 wrb = wrb_from_mccq(adapter); 1542 if (!wrb) { 1543 status = -EBUSY; 1544 goto err; 1545 } 1546 hdr = nonemb_cmd->va; 1547 1548 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1549 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, 1550 nonemb_cmd); 1551 1552 /* version 1 of the cmd is not supported only by BE2 */ 1553 if (BE2_chip(adapter)) 1554 hdr->version = 0; 1555 if (BE3_chip(adapter) || lancer_chip(adapter)) 1556 hdr->version = 1; 1557 else 1558 hdr->version = 2; 1559 1560 status = be_mcc_notify(adapter); 1561 if (status) 1562 goto err; 1563 1564 adapter->stats_cmd_sent = true; 1565 1566 err: 1567 spin_unlock_bh(&adapter->mcc_lock); 1568 return status; 1569 } 1570 1571 /* Lancer Stats */ 1572 int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1573 struct be_dma_mem *nonemb_cmd) 1574 { 1575 struct be_mcc_wrb *wrb; 1576 struct lancer_cmd_req_pport_stats *req; 1577 int status = 0; 1578 1579 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS, 1580 CMD_SUBSYSTEM_ETH)) 1581 return -EPERM; 1582 1583 spin_lock_bh(&adapter->mcc_lock); 1584 1585 wrb = wrb_from_mccq(adapter); 1586 if (!wrb) { 1587 status = -EBUSY; 1588 goto err; 1589 } 1590 req = nonemb_cmd->va; 1591 1592 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1593 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, 1594 wrb, nonemb_cmd); 1595 1596 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); 1597 req->cmd_params.params.reset_stats = 0; 1598 1599 status = be_mcc_notify(adapter); 1600 if (status) 1601 goto err; 1602 1603 adapter->stats_cmd_sent = true; 1604 1605 err: 1606 spin_unlock_bh(&adapter->mcc_lock); 1607 return status; 1608 } 1609 1610 static int be_mac_to_link_speed(int mac_speed) 1611 { 1612 switch (mac_speed) { 1613 case PHY_LINK_SPEED_ZERO: 1614 return 0; 1615 case PHY_LINK_SPEED_10MBPS: 1616 return 10; 1617 case PHY_LINK_SPEED_100MBPS: 1618 return 100; 1619 case PHY_LINK_SPEED_1GBPS: 1620 return 1000; 1621 case PHY_LINK_SPEED_10GBPS: 1622 return 10000; 1623 case PHY_LINK_SPEED_20GBPS: 1624 return 20000; 1625 case PHY_LINK_SPEED_25GBPS: 1626 return 25000; 1627 case PHY_LINK_SPEED_40GBPS: 1628 return 40000; 1629 } 1630 return 0; 1631 } 1632 1633 /* Uses synchronous mcc 1634 * Returns link_speed in Mbps 1635 */ 1636 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, 1637 u8 *link_status, u32 dom) 1638 { 1639 struct be_mcc_wrb *wrb; 1640 struct be_cmd_req_link_status *req; 1641 int status; 1642 1643 spin_lock_bh(&adapter->mcc_lock); 1644 1645 if (link_status) 1646 *link_status = LINK_DOWN; 1647 1648 wrb = wrb_from_mccq(adapter); 1649 if (!wrb) { 1650 status = -EBUSY; 1651 goto err; 1652 } 1653 req = embedded_payload(wrb); 1654 1655 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1656 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, 1657 sizeof(*req), wrb, NULL); 1658 1659 /* version 1 of the cmd is not supported only by BE2 */ 1660 if (!BE2_chip(adapter)) 1661 req->hdr.version = 1; 1662 1663 req->hdr.domain = dom; 1664 1665 status = be_mcc_notify_wait(adapter); 1666 if (!status) { 1667 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 1668 1669 if (link_speed) { 1670 *link_speed = resp->link_speed ? 1671 le16_to_cpu(resp->link_speed) * 10 : 1672 be_mac_to_link_speed(resp->mac_speed); 1673 1674 if (!resp->logical_link_status) 1675 *link_speed = 0; 1676 } 1677 if (link_status) 1678 *link_status = resp->logical_link_status; 1679 } 1680 1681 err: 1682 spin_unlock_bh(&adapter->mcc_lock); 1683 return status; 1684 } 1685 1686 /* Uses synchronous mcc */ 1687 int be_cmd_get_die_temperature(struct be_adapter *adapter) 1688 { 1689 struct be_mcc_wrb *wrb; 1690 struct be_cmd_req_get_cntl_addnl_attribs *req; 1691 int status = 0; 1692 1693 spin_lock_bh(&adapter->mcc_lock); 1694 1695 wrb = wrb_from_mccq(adapter); 1696 if (!wrb) { 1697 status = -EBUSY; 1698 goto err; 1699 } 1700 req = embedded_payload(wrb); 1701 1702 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1703 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, 1704 sizeof(*req), wrb, NULL); 1705 1706 status = be_mcc_notify(adapter); 1707 err: 1708 spin_unlock_bh(&adapter->mcc_lock); 1709 return status; 1710 } 1711 1712 /* Uses synchronous mcc */ 1713 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) 1714 { 1715 struct be_mcc_wrb *wrb; 1716 struct be_cmd_req_get_fat *req; 1717 int status; 1718 1719 spin_lock_bh(&adapter->mcc_lock); 1720 1721 wrb = wrb_from_mccq(adapter); 1722 if (!wrb) { 1723 status = -EBUSY; 1724 goto err; 1725 } 1726 req = embedded_payload(wrb); 1727 1728 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1729 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, 1730 NULL); 1731 req->fat_operation = cpu_to_le32(QUERY_FAT); 1732 status = be_mcc_notify_wait(adapter); 1733 if (!status) { 1734 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); 1735 1736 if (log_size && resp->log_size) 1737 *log_size = le32_to_cpu(resp->log_size) - 1738 sizeof(u32); 1739 } 1740 err: 1741 spin_unlock_bh(&adapter->mcc_lock); 1742 return status; 1743 } 1744 1745 int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) 1746 { 1747 struct be_dma_mem get_fat_cmd; 1748 struct be_mcc_wrb *wrb; 1749 struct be_cmd_req_get_fat *req; 1750 u32 offset = 0, total_size, buf_size, 1751 log_offset = sizeof(u32), payload_len; 1752 int status = 0; 1753 1754 if (buf_len == 0) 1755 return -EIO; 1756 1757 total_size = buf_len; 1758 1759 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1760 get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 1761 get_fat_cmd.size, 1762 &get_fat_cmd.dma, GFP_ATOMIC); 1763 if (!get_fat_cmd.va) { 1764 dev_err(&adapter->pdev->dev, 1765 "Memory allocation failure while reading FAT data\n"); 1766 return -ENOMEM; 1767 } 1768 1769 spin_lock_bh(&adapter->mcc_lock); 1770 1771 while (total_size) { 1772 buf_size = min(total_size, (u32)60*1024); 1773 total_size -= buf_size; 1774 1775 wrb = wrb_from_mccq(adapter); 1776 if (!wrb) { 1777 status = -EBUSY; 1778 goto err; 1779 } 1780 req = get_fat_cmd.va; 1781 1782 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; 1783 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1784 OPCODE_COMMON_MANAGE_FAT, payload_len, 1785 wrb, &get_fat_cmd); 1786 1787 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); 1788 req->read_log_offset = cpu_to_le32(log_offset); 1789 req->read_log_length = cpu_to_le32(buf_size); 1790 req->data_buffer_size = cpu_to_le32(buf_size); 1791 1792 status = be_mcc_notify_wait(adapter); 1793 if (!status) { 1794 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1795 1796 memcpy(buf + offset, 1797 resp->data_buffer, 1798 le32_to_cpu(resp->read_log_length)); 1799 } else { 1800 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); 1801 goto err; 1802 } 1803 offset += buf_size; 1804 log_offset += buf_size; 1805 } 1806 err: 1807 dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, 1808 get_fat_cmd.va, get_fat_cmd.dma); 1809 spin_unlock_bh(&adapter->mcc_lock); 1810 return status; 1811 } 1812 1813 /* Uses synchronous mcc */ 1814 int be_cmd_get_fw_ver(struct be_adapter *adapter) 1815 { 1816 struct be_mcc_wrb *wrb; 1817 struct be_cmd_req_get_fw_version *req; 1818 int status; 1819 1820 spin_lock_bh(&adapter->mcc_lock); 1821 1822 wrb = wrb_from_mccq(adapter); 1823 if (!wrb) { 1824 status = -EBUSY; 1825 goto err; 1826 } 1827 1828 req = embedded_payload(wrb); 1829 1830 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1831 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, 1832 NULL); 1833 status = be_mcc_notify_wait(adapter); 1834 if (!status) { 1835 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); 1836 1837 strlcpy(adapter->fw_ver, resp->firmware_version_string, 1838 sizeof(adapter->fw_ver)); 1839 strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string, 1840 sizeof(adapter->fw_on_flash)); 1841 } 1842 err: 1843 spin_unlock_bh(&adapter->mcc_lock); 1844 return status; 1845 } 1846 1847 /* set the EQ delay interval of an EQ to specified value 1848 * Uses async mcc 1849 */ 1850 static int __be_cmd_modify_eqd(struct be_adapter *adapter, 1851 struct be_set_eqd *set_eqd, int num) 1852 { 1853 struct be_mcc_wrb *wrb; 1854 struct be_cmd_req_modify_eq_delay *req; 1855 int status = 0, i; 1856 1857 spin_lock_bh(&adapter->mcc_lock); 1858 1859 wrb = wrb_from_mccq(adapter); 1860 if (!wrb) { 1861 status = -EBUSY; 1862 goto err; 1863 } 1864 req = embedded_payload(wrb); 1865 1866 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1867 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, 1868 NULL); 1869 1870 req->num_eq = cpu_to_le32(num); 1871 for (i = 0; i < num; i++) { 1872 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); 1873 req->set_eqd[i].phase = 0; 1874 req->set_eqd[i].delay_multiplier = 1875 cpu_to_le32(set_eqd[i].delay_multiplier); 1876 } 1877 1878 status = be_mcc_notify(adapter); 1879 err: 1880 spin_unlock_bh(&adapter->mcc_lock); 1881 return status; 1882 } 1883 1884 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, 1885 int num) 1886 { 1887 int num_eqs, i = 0; 1888 1889 while (num) { 1890 num_eqs = min(num, 8); 1891 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); 1892 i += num_eqs; 1893 num -= num_eqs; 1894 } 1895 1896 return 0; 1897 } 1898 1899 /* Uses sycnhronous mcc */ 1900 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1901 u32 num, u32 domain) 1902 { 1903 struct be_mcc_wrb *wrb; 1904 struct be_cmd_req_vlan_config *req; 1905 int status; 1906 1907 spin_lock_bh(&adapter->mcc_lock); 1908 1909 wrb = wrb_from_mccq(adapter); 1910 if (!wrb) { 1911 status = -EBUSY; 1912 goto err; 1913 } 1914 req = embedded_payload(wrb); 1915 1916 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1917 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), 1918 wrb, NULL); 1919 req->hdr.domain = domain; 1920 1921 req->interface_id = if_id; 1922 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1923 req->num_vlan = num; 1924 memcpy(req->normal_vlan, vtag_array, 1925 req->num_vlan * sizeof(vtag_array[0])); 1926 1927 status = be_mcc_notify_wait(adapter); 1928 err: 1929 spin_unlock_bh(&adapter->mcc_lock); 1930 return status; 1931 } 1932 1933 static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1934 { 1935 struct be_mcc_wrb *wrb; 1936 struct be_dma_mem *mem = &adapter->rx_filter; 1937 struct be_cmd_req_rx_filter *req = mem->va; 1938 int status; 1939 1940 spin_lock_bh(&adapter->mcc_lock); 1941 1942 wrb = wrb_from_mccq(adapter); 1943 if (!wrb) { 1944 status = -EBUSY; 1945 goto err; 1946 } 1947 memset(req, 0, sizeof(*req)); 1948 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1949 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), 1950 wrb, mem); 1951 1952 req->if_id = cpu_to_le32(adapter->if_handle); 1953 req->if_flags_mask = cpu_to_le32(flags); 1954 req->if_flags = (value == ON) ? req->if_flags_mask : 0; 1955 1956 if (flags & BE_IF_FLAGS_MULTICAST) { 1957 struct netdev_hw_addr *ha; 1958 int i = 0; 1959 1960 /* Reset mcast promisc mode if already set by setting mask 1961 * and not setting flags field 1962 */ 1963 req->if_flags_mask |= 1964 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS & 1965 be_if_cap_flags(adapter)); 1966 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); 1967 netdev_for_each_mc_addr(ha, adapter->netdev) 1968 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1969 } 1970 1971 status = be_mcc_notify_wait(adapter); 1972 err: 1973 spin_unlock_bh(&adapter->mcc_lock); 1974 return status; 1975 } 1976 1977 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1978 { 1979 struct device *dev = &adapter->pdev->dev; 1980 1981 if ((flags & be_if_cap_flags(adapter)) != flags) { 1982 dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags); 1983 dev_warn(dev, "Interface is capable of 0x%x flags only\n", 1984 be_if_cap_flags(adapter)); 1985 } 1986 flags &= be_if_cap_flags(adapter); 1987 1988 return __be_cmd_rx_filter(adapter, flags, value); 1989 } 1990 1991 /* Uses synchrounous mcc */ 1992 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) 1993 { 1994 struct be_mcc_wrb *wrb; 1995 struct be_cmd_req_set_flow_control *req; 1996 int status; 1997 1998 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL, 1999 CMD_SUBSYSTEM_COMMON)) 2000 return -EPERM; 2001 2002 spin_lock_bh(&adapter->mcc_lock); 2003 2004 wrb = wrb_from_mccq(adapter); 2005 if (!wrb) { 2006 status = -EBUSY; 2007 goto err; 2008 } 2009 req = embedded_payload(wrb); 2010 2011 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2012 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), 2013 wrb, NULL); 2014 2015 req->hdr.version = 1; 2016 req->tx_flow_control = cpu_to_le16((u16)tx_fc); 2017 req->rx_flow_control = cpu_to_le16((u16)rx_fc); 2018 2019 status = be_mcc_notify_wait(adapter); 2020 2021 err: 2022 spin_unlock_bh(&adapter->mcc_lock); 2023 2024 if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED) 2025 return -EOPNOTSUPP; 2026 2027 return status; 2028 } 2029 2030 /* Uses sycn mcc */ 2031 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) 2032 { 2033 struct be_mcc_wrb *wrb; 2034 struct be_cmd_req_get_flow_control *req; 2035 int status; 2036 2037 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL, 2038 CMD_SUBSYSTEM_COMMON)) 2039 return -EPERM; 2040 2041 spin_lock_bh(&adapter->mcc_lock); 2042 2043 wrb = wrb_from_mccq(adapter); 2044 if (!wrb) { 2045 status = -EBUSY; 2046 goto err; 2047 } 2048 req = embedded_payload(wrb); 2049 2050 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2051 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), 2052 wrb, NULL); 2053 2054 status = be_mcc_notify_wait(adapter); 2055 if (!status) { 2056 struct be_cmd_resp_get_flow_control *resp = 2057 embedded_payload(wrb); 2058 2059 *tx_fc = le16_to_cpu(resp->tx_flow_control); 2060 *rx_fc = le16_to_cpu(resp->rx_flow_control); 2061 } 2062 2063 err: 2064 spin_unlock_bh(&adapter->mcc_lock); 2065 return status; 2066 } 2067 2068 /* Uses mbox */ 2069 int be_cmd_query_fw_cfg(struct be_adapter *adapter) 2070 { 2071 struct be_mcc_wrb *wrb; 2072 struct be_cmd_req_query_fw_cfg *req; 2073 int status; 2074 2075 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2076 return -1; 2077 2078 wrb = wrb_from_mbox(adapter); 2079 req = embedded_payload(wrb); 2080 2081 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2082 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, 2083 sizeof(*req), wrb, NULL); 2084 2085 status = be_mbox_notify_wait(adapter); 2086 if (!status) { 2087 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 2088 2089 adapter->port_num = le32_to_cpu(resp->phys_port); 2090 adapter->function_mode = le32_to_cpu(resp->function_mode); 2091 adapter->function_caps = le32_to_cpu(resp->function_caps); 2092 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF; 2093 dev_info(&adapter->pdev->dev, 2094 "FW config: function_mode=0x%x, function_caps=0x%x\n", 2095 adapter->function_mode, adapter->function_caps); 2096 } 2097 2098 mutex_unlock(&adapter->mbox_lock); 2099 return status; 2100 } 2101 2102 /* Uses mbox */ 2103 int be_cmd_reset_function(struct be_adapter *adapter) 2104 { 2105 struct be_mcc_wrb *wrb; 2106 struct be_cmd_req_hdr *req; 2107 int status; 2108 2109 if (lancer_chip(adapter)) { 2110 iowrite32(SLI_PORT_CONTROL_IP_MASK, 2111 adapter->db + SLIPORT_CONTROL_OFFSET); 2112 status = lancer_wait_ready(adapter); 2113 if (status) 2114 dev_err(&adapter->pdev->dev, 2115 "Adapter in non recoverable error\n"); 2116 return status; 2117 } 2118 2119 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2120 return -1; 2121 2122 wrb = wrb_from_mbox(adapter); 2123 req = embedded_payload(wrb); 2124 2125 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, 2126 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, 2127 NULL); 2128 2129 status = be_mbox_notify_wait(adapter); 2130 2131 mutex_unlock(&adapter->mbox_lock); 2132 return status; 2133 } 2134 2135 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 2136 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey) 2137 { 2138 struct be_mcc_wrb *wrb; 2139 struct be_cmd_req_rss_config *req; 2140 int status; 2141 2142 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) 2143 return 0; 2144 2145 spin_lock_bh(&adapter->mcc_lock); 2146 2147 wrb = wrb_from_mccq(adapter); 2148 if (!wrb) { 2149 status = -EBUSY; 2150 goto err; 2151 } 2152 req = embedded_payload(wrb); 2153 2154 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2155 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 2156 2157 req->if_id = cpu_to_le32(adapter->if_handle); 2158 req->enable_rss = cpu_to_le16(rss_hash_opts); 2159 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 2160 2161 if (!BEx_chip(adapter)) 2162 req->hdr.version = 1; 2163 2164 memcpy(req->cpu_table, rsstable, table_size); 2165 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN); 2166 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 2167 2168 status = be_mcc_notify_wait(adapter); 2169 err: 2170 spin_unlock_bh(&adapter->mcc_lock); 2171 return status; 2172 } 2173 2174 /* Uses sync mcc */ 2175 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 2176 u8 bcn, u8 sts, u8 state) 2177 { 2178 struct be_mcc_wrb *wrb; 2179 struct be_cmd_req_enable_disable_beacon *req; 2180 int status; 2181 2182 spin_lock_bh(&adapter->mcc_lock); 2183 2184 wrb = wrb_from_mccq(adapter); 2185 if (!wrb) { 2186 status = -EBUSY; 2187 goto err; 2188 } 2189 req = embedded_payload(wrb); 2190 2191 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2192 OPCODE_COMMON_ENABLE_DISABLE_BEACON, 2193 sizeof(*req), wrb, NULL); 2194 2195 req->port_num = port_num; 2196 req->beacon_state = state; 2197 req->beacon_duration = bcn; 2198 req->status_duration = sts; 2199 2200 status = be_mcc_notify_wait(adapter); 2201 2202 err: 2203 spin_unlock_bh(&adapter->mcc_lock); 2204 return status; 2205 } 2206 2207 /* Uses sync mcc */ 2208 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) 2209 { 2210 struct be_mcc_wrb *wrb; 2211 struct be_cmd_req_get_beacon_state *req; 2212 int status; 2213 2214 spin_lock_bh(&adapter->mcc_lock); 2215 2216 wrb = wrb_from_mccq(adapter); 2217 if (!wrb) { 2218 status = -EBUSY; 2219 goto err; 2220 } 2221 req = embedded_payload(wrb); 2222 2223 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2224 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), 2225 wrb, NULL); 2226 2227 req->port_num = port_num; 2228 2229 status = be_mcc_notify_wait(adapter); 2230 if (!status) { 2231 struct be_cmd_resp_get_beacon_state *resp = 2232 embedded_payload(wrb); 2233 2234 *state = resp->beacon_state; 2235 } 2236 2237 err: 2238 spin_unlock_bh(&adapter->mcc_lock); 2239 return status; 2240 } 2241 2242 /* Uses sync mcc */ 2243 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, 2244 u8 page_num, u8 *data) 2245 { 2246 struct be_dma_mem cmd; 2247 struct be_mcc_wrb *wrb; 2248 struct be_cmd_req_port_type *req; 2249 int status; 2250 2251 if (page_num > TR_PAGE_A2) 2252 return -EINVAL; 2253 2254 cmd.size = sizeof(struct be_cmd_resp_port_type); 2255 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2256 GFP_ATOMIC); 2257 if (!cmd.va) { 2258 dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); 2259 return -ENOMEM; 2260 } 2261 2262 spin_lock_bh(&adapter->mcc_lock); 2263 2264 wrb = wrb_from_mccq(adapter); 2265 if (!wrb) { 2266 status = -EBUSY; 2267 goto err; 2268 } 2269 req = cmd.va; 2270 2271 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2272 OPCODE_COMMON_READ_TRANSRECV_DATA, 2273 cmd.size, wrb, &cmd); 2274 2275 req->port = cpu_to_le32(adapter->hba_port_num); 2276 req->page_num = cpu_to_le32(page_num); 2277 status = be_mcc_notify_wait(adapter); 2278 if (!status) { 2279 struct be_cmd_resp_port_type *resp = cmd.va; 2280 2281 memcpy(data, resp->page_data, PAGE_DATA_LEN); 2282 } 2283 err: 2284 spin_unlock_bh(&adapter->mcc_lock); 2285 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 2286 return status; 2287 } 2288 2289 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2290 u32 data_size, u32 data_offset, 2291 const char *obj_name, u32 *data_written, 2292 u8 *change_status, u8 *addn_status) 2293 { 2294 struct be_mcc_wrb *wrb; 2295 struct lancer_cmd_req_write_object *req; 2296 struct lancer_cmd_resp_write_object *resp; 2297 void *ctxt = NULL; 2298 int status; 2299 2300 spin_lock_bh(&adapter->mcc_lock); 2301 adapter->flash_status = 0; 2302 2303 wrb = wrb_from_mccq(adapter); 2304 if (!wrb) { 2305 status = -EBUSY; 2306 goto err_unlock; 2307 } 2308 2309 req = embedded_payload(wrb); 2310 2311 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2312 OPCODE_COMMON_WRITE_OBJECT, 2313 sizeof(struct lancer_cmd_req_write_object), wrb, 2314 NULL); 2315 2316 ctxt = &req->context; 2317 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2318 write_length, ctxt, data_size); 2319 2320 if (data_size == 0) 2321 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2322 eof, ctxt, 1); 2323 else 2324 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2325 eof, ctxt, 0); 2326 2327 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 2328 req->write_offset = cpu_to_le32(data_offset); 2329 strlcpy(req->object_name, obj_name, sizeof(req->object_name)); 2330 req->descriptor_count = cpu_to_le32(1); 2331 req->buf_len = cpu_to_le32(data_size); 2332 req->addr_low = cpu_to_le32((cmd->dma + 2333 sizeof(struct lancer_cmd_req_write_object)) 2334 & 0xFFFFFFFF); 2335 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + 2336 sizeof(struct lancer_cmd_req_write_object))); 2337 2338 status = be_mcc_notify(adapter); 2339 if (status) 2340 goto err_unlock; 2341 2342 spin_unlock_bh(&adapter->mcc_lock); 2343 2344 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2345 msecs_to_jiffies(60000))) 2346 status = -ETIMEDOUT; 2347 else 2348 status = adapter->flash_status; 2349 2350 resp = embedded_payload(wrb); 2351 if (!status) { 2352 *data_written = le32_to_cpu(resp->actual_write_len); 2353 *change_status = resp->change_status; 2354 } else { 2355 *addn_status = resp->additional_status; 2356 } 2357 2358 return status; 2359 2360 err_unlock: 2361 spin_unlock_bh(&adapter->mcc_lock); 2362 return status; 2363 } 2364 2365 int be_cmd_query_cable_type(struct be_adapter *adapter) 2366 { 2367 u8 page_data[PAGE_DATA_LEN]; 2368 int status; 2369 2370 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, 2371 page_data); 2372 if (!status) { 2373 switch (adapter->phy.interface_type) { 2374 case PHY_TYPE_QSFP: 2375 adapter->phy.cable_type = 2376 page_data[QSFP_PLUS_CABLE_TYPE_OFFSET]; 2377 break; 2378 case PHY_TYPE_SFP_PLUS_10GB: 2379 adapter->phy.cable_type = 2380 page_data[SFP_PLUS_CABLE_TYPE_OFFSET]; 2381 break; 2382 default: 2383 adapter->phy.cable_type = 0; 2384 break; 2385 } 2386 } 2387 return status; 2388 } 2389 2390 int be_cmd_query_sfp_info(struct be_adapter *adapter) 2391 { 2392 u8 page_data[PAGE_DATA_LEN]; 2393 int status; 2394 2395 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, 2396 page_data); 2397 if (!status) { 2398 strlcpy(adapter->phy.vendor_name, page_data + 2399 SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1); 2400 strlcpy(adapter->phy.vendor_pn, 2401 page_data + SFP_VENDOR_PN_OFFSET, 2402 SFP_VENDOR_NAME_LEN - 1); 2403 } 2404 2405 return status; 2406 } 2407 2408 int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name) 2409 { 2410 struct lancer_cmd_req_delete_object *req; 2411 struct be_mcc_wrb *wrb; 2412 int status; 2413 2414 spin_lock_bh(&adapter->mcc_lock); 2415 2416 wrb = wrb_from_mccq(adapter); 2417 if (!wrb) { 2418 status = -EBUSY; 2419 goto err; 2420 } 2421 2422 req = embedded_payload(wrb); 2423 2424 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2425 OPCODE_COMMON_DELETE_OBJECT, 2426 sizeof(*req), wrb, NULL); 2427 2428 strlcpy(req->object_name, obj_name, sizeof(req->object_name)); 2429 2430 status = be_mcc_notify_wait(adapter); 2431 err: 2432 spin_unlock_bh(&adapter->mcc_lock); 2433 return status; 2434 } 2435 2436 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2437 u32 data_size, u32 data_offset, const char *obj_name, 2438 u32 *data_read, u32 *eof, u8 *addn_status) 2439 { 2440 struct be_mcc_wrb *wrb; 2441 struct lancer_cmd_req_read_object *req; 2442 struct lancer_cmd_resp_read_object *resp; 2443 int status; 2444 2445 spin_lock_bh(&adapter->mcc_lock); 2446 2447 wrb = wrb_from_mccq(adapter); 2448 if (!wrb) { 2449 status = -EBUSY; 2450 goto err_unlock; 2451 } 2452 2453 req = embedded_payload(wrb); 2454 2455 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2456 OPCODE_COMMON_READ_OBJECT, 2457 sizeof(struct lancer_cmd_req_read_object), wrb, 2458 NULL); 2459 2460 req->desired_read_len = cpu_to_le32(data_size); 2461 req->read_offset = cpu_to_le32(data_offset); 2462 strcpy(req->object_name, obj_name); 2463 req->descriptor_count = cpu_to_le32(1); 2464 req->buf_len = cpu_to_le32(data_size); 2465 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); 2466 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); 2467 2468 status = be_mcc_notify_wait(adapter); 2469 2470 resp = embedded_payload(wrb); 2471 if (!status) { 2472 *data_read = le32_to_cpu(resp->actual_read_len); 2473 *eof = le32_to_cpu(resp->eof); 2474 } else { 2475 *addn_status = resp->additional_status; 2476 } 2477 2478 err_unlock: 2479 spin_unlock_bh(&adapter->mcc_lock); 2480 return status; 2481 } 2482 2483 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 2484 u32 flash_type, u32 flash_opcode, u32 img_offset, 2485 u32 buf_size) 2486 { 2487 struct be_mcc_wrb *wrb; 2488 struct be_cmd_write_flashrom *req; 2489 int status; 2490 2491 spin_lock_bh(&adapter->mcc_lock); 2492 adapter->flash_status = 0; 2493 2494 wrb = wrb_from_mccq(adapter); 2495 if (!wrb) { 2496 status = -EBUSY; 2497 goto err_unlock; 2498 } 2499 req = cmd->va; 2500 2501 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2502 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, 2503 cmd); 2504 2505 req->params.op_type = cpu_to_le32(flash_type); 2506 if (flash_type == OPTYPE_OFFSET_SPECIFIED) 2507 req->params.offset = cpu_to_le32(img_offset); 2508 2509 req->params.op_code = cpu_to_le32(flash_opcode); 2510 req->params.data_buf_size = cpu_to_le32(buf_size); 2511 2512 status = be_mcc_notify(adapter); 2513 if (status) 2514 goto err_unlock; 2515 2516 spin_unlock_bh(&adapter->mcc_lock); 2517 2518 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2519 msecs_to_jiffies(40000))) 2520 status = -ETIMEDOUT; 2521 else 2522 status = adapter->flash_status; 2523 2524 return status; 2525 2526 err_unlock: 2527 spin_unlock_bh(&adapter->mcc_lock); 2528 return status; 2529 } 2530 2531 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2532 u16 img_optype, u32 img_offset, u32 crc_offset) 2533 { 2534 struct be_cmd_read_flash_crc *req; 2535 struct be_mcc_wrb *wrb; 2536 int status; 2537 2538 spin_lock_bh(&adapter->mcc_lock); 2539 2540 wrb = wrb_from_mccq(adapter); 2541 if (!wrb) { 2542 status = -EBUSY; 2543 goto err; 2544 } 2545 req = embedded_payload(wrb); 2546 2547 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2548 OPCODE_COMMON_READ_FLASHROM, sizeof(*req), 2549 wrb, NULL); 2550 2551 req->params.op_type = cpu_to_le32(img_optype); 2552 if (img_optype == OPTYPE_OFFSET_SPECIFIED) 2553 req->params.offset = cpu_to_le32(img_offset + crc_offset); 2554 else 2555 req->params.offset = cpu_to_le32(crc_offset); 2556 2557 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2558 req->params.data_buf_size = cpu_to_le32(0x4); 2559 2560 status = be_mcc_notify_wait(adapter); 2561 if (!status) 2562 memcpy(flashed_crc, req->crc, 4); 2563 2564 err: 2565 spin_unlock_bh(&adapter->mcc_lock); 2566 return status; 2567 } 2568 2569 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2570 struct be_dma_mem *nonemb_cmd) 2571 { 2572 struct be_mcc_wrb *wrb; 2573 struct be_cmd_req_acpi_wol_magic_config *req; 2574 int status; 2575 2576 spin_lock_bh(&adapter->mcc_lock); 2577 2578 wrb = wrb_from_mccq(adapter); 2579 if (!wrb) { 2580 status = -EBUSY; 2581 goto err; 2582 } 2583 req = nonemb_cmd->va; 2584 2585 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2586 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), 2587 wrb, nonemb_cmd); 2588 memcpy(req->magic_mac, mac, ETH_ALEN); 2589 2590 status = be_mcc_notify_wait(adapter); 2591 2592 err: 2593 spin_unlock_bh(&adapter->mcc_lock); 2594 return status; 2595 } 2596 2597 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 2598 u8 loopback_type, u8 enable) 2599 { 2600 struct be_mcc_wrb *wrb; 2601 struct be_cmd_req_set_lmode *req; 2602 int status; 2603 2604 spin_lock_bh(&adapter->mcc_lock); 2605 2606 wrb = wrb_from_mccq(adapter); 2607 if (!wrb) { 2608 status = -EBUSY; 2609 goto err_unlock; 2610 } 2611 2612 req = embedded_payload(wrb); 2613 2614 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2615 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), 2616 wrb, NULL); 2617 2618 req->src_port = port_num; 2619 req->dest_port = port_num; 2620 req->loopback_type = loopback_type; 2621 req->loopback_state = enable; 2622 2623 status = be_mcc_notify(adapter); 2624 if (status) 2625 goto err_unlock; 2626 2627 spin_unlock_bh(&adapter->mcc_lock); 2628 2629 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2630 msecs_to_jiffies(SET_LB_MODE_TIMEOUT))) 2631 status = -ETIMEDOUT; 2632 2633 return status; 2634 2635 err_unlock: 2636 spin_unlock_bh(&adapter->mcc_lock); 2637 return status; 2638 } 2639 2640 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 2641 u32 loopback_type, u32 pkt_size, u32 num_pkts, 2642 u64 pattern) 2643 { 2644 struct be_mcc_wrb *wrb; 2645 struct be_cmd_req_loopback_test *req; 2646 struct be_cmd_resp_loopback_test *resp; 2647 int status; 2648 2649 spin_lock_bh(&adapter->mcc_lock); 2650 2651 wrb = wrb_from_mccq(adapter); 2652 if (!wrb) { 2653 status = -EBUSY; 2654 goto err; 2655 } 2656 2657 req = embedded_payload(wrb); 2658 2659 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2660 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, 2661 NULL); 2662 2663 req->hdr.timeout = cpu_to_le32(15); 2664 req->pattern = cpu_to_le64(pattern); 2665 req->src_port = cpu_to_le32(port_num); 2666 req->dest_port = cpu_to_le32(port_num); 2667 req->pkt_size = cpu_to_le32(pkt_size); 2668 req->num_pkts = cpu_to_le32(num_pkts); 2669 req->loopback_type = cpu_to_le32(loopback_type); 2670 2671 status = be_mcc_notify(adapter); 2672 if (status) 2673 goto err; 2674 2675 spin_unlock_bh(&adapter->mcc_lock); 2676 2677 wait_for_completion(&adapter->et_cmd_compl); 2678 resp = embedded_payload(wrb); 2679 status = le32_to_cpu(resp->status); 2680 2681 return status; 2682 err: 2683 spin_unlock_bh(&adapter->mcc_lock); 2684 return status; 2685 } 2686 2687 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 2688 u32 byte_cnt, struct be_dma_mem *cmd) 2689 { 2690 struct be_mcc_wrb *wrb; 2691 struct be_cmd_req_ddrdma_test *req; 2692 int status; 2693 int i, j = 0; 2694 2695 spin_lock_bh(&adapter->mcc_lock); 2696 2697 wrb = wrb_from_mccq(adapter); 2698 if (!wrb) { 2699 status = -EBUSY; 2700 goto err; 2701 } 2702 req = cmd->va; 2703 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2704 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, 2705 cmd); 2706 2707 req->pattern = cpu_to_le64(pattern); 2708 req->byte_count = cpu_to_le32(byte_cnt); 2709 for (i = 0; i < byte_cnt; i++) { 2710 req->snd_buff[i] = (u8)(pattern >> (j*8)); 2711 j++; 2712 if (j > 7) 2713 j = 0; 2714 } 2715 2716 status = be_mcc_notify_wait(adapter); 2717 2718 if (!status) { 2719 struct be_cmd_resp_ddrdma_test *resp; 2720 2721 resp = cmd->va; 2722 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || 2723 resp->snd_err) { 2724 status = -1; 2725 } 2726 } 2727 2728 err: 2729 spin_unlock_bh(&adapter->mcc_lock); 2730 return status; 2731 } 2732 2733 int be_cmd_get_seeprom_data(struct be_adapter *adapter, 2734 struct be_dma_mem *nonemb_cmd) 2735 { 2736 struct be_mcc_wrb *wrb; 2737 struct be_cmd_req_seeprom_read *req; 2738 int status; 2739 2740 spin_lock_bh(&adapter->mcc_lock); 2741 2742 wrb = wrb_from_mccq(adapter); 2743 if (!wrb) { 2744 status = -EBUSY; 2745 goto err; 2746 } 2747 req = nonemb_cmd->va; 2748 2749 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2750 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 2751 nonemb_cmd); 2752 2753 status = be_mcc_notify_wait(adapter); 2754 2755 err: 2756 spin_unlock_bh(&adapter->mcc_lock); 2757 return status; 2758 } 2759 2760 int be_cmd_get_phy_info(struct be_adapter *adapter) 2761 { 2762 struct be_mcc_wrb *wrb; 2763 struct be_cmd_req_get_phy_info *req; 2764 struct be_dma_mem cmd; 2765 int status; 2766 2767 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS, 2768 CMD_SUBSYSTEM_COMMON)) 2769 return -EPERM; 2770 2771 spin_lock_bh(&adapter->mcc_lock); 2772 2773 wrb = wrb_from_mccq(adapter); 2774 if (!wrb) { 2775 status = -EBUSY; 2776 goto err; 2777 } 2778 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2779 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2780 GFP_ATOMIC); 2781 if (!cmd.va) { 2782 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2783 status = -ENOMEM; 2784 goto err; 2785 } 2786 2787 req = cmd.va; 2788 2789 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2790 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), 2791 wrb, &cmd); 2792 2793 status = be_mcc_notify_wait(adapter); 2794 if (!status) { 2795 struct be_phy_info *resp_phy_info = 2796 cmd.va + sizeof(struct be_cmd_req_hdr); 2797 2798 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type); 2799 adapter->phy.interface_type = 2800 le16_to_cpu(resp_phy_info->interface_type); 2801 adapter->phy.auto_speeds_supported = 2802 le16_to_cpu(resp_phy_info->auto_speeds_supported); 2803 adapter->phy.fixed_speeds_supported = 2804 le16_to_cpu(resp_phy_info->fixed_speeds_supported); 2805 adapter->phy.misc_params = 2806 le32_to_cpu(resp_phy_info->misc_params); 2807 2808 if (BE2_chip(adapter)) { 2809 adapter->phy.fixed_speeds_supported = 2810 BE_SUPPORTED_SPEED_10GBPS | 2811 BE_SUPPORTED_SPEED_1GBPS; 2812 } 2813 } 2814 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 2815 err: 2816 spin_unlock_bh(&adapter->mcc_lock); 2817 return status; 2818 } 2819 2820 static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) 2821 { 2822 struct be_mcc_wrb *wrb; 2823 struct be_cmd_req_set_qos *req; 2824 int status; 2825 2826 spin_lock_bh(&adapter->mcc_lock); 2827 2828 wrb = wrb_from_mccq(adapter); 2829 if (!wrb) { 2830 status = -EBUSY; 2831 goto err; 2832 } 2833 2834 req = embedded_payload(wrb); 2835 2836 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2837 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); 2838 2839 req->hdr.domain = domain; 2840 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); 2841 req->max_bps_nic = cpu_to_le32(bps); 2842 2843 status = be_mcc_notify_wait(adapter); 2844 2845 err: 2846 spin_unlock_bh(&adapter->mcc_lock); 2847 return status; 2848 } 2849 2850 int be_cmd_get_cntl_attributes(struct be_adapter *adapter) 2851 { 2852 struct be_mcc_wrb *wrb; 2853 struct be_cmd_req_cntl_attribs *req; 2854 struct be_cmd_resp_cntl_attribs *resp; 2855 int status, i; 2856 int payload_len = max(sizeof(*req), sizeof(*resp)); 2857 struct mgmt_controller_attrib *attribs; 2858 struct be_dma_mem attribs_cmd; 2859 u32 *serial_num; 2860 2861 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2862 return -1; 2863 2864 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2865 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2866 attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 2867 attribs_cmd.size, 2868 &attribs_cmd.dma, GFP_ATOMIC); 2869 if (!attribs_cmd.va) { 2870 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 2871 status = -ENOMEM; 2872 goto err; 2873 } 2874 2875 wrb = wrb_from_mbox(adapter); 2876 if (!wrb) { 2877 status = -EBUSY; 2878 goto err; 2879 } 2880 req = attribs_cmd.va; 2881 2882 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2883 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, 2884 wrb, &attribs_cmd); 2885 2886 status = be_mbox_notify_wait(adapter); 2887 if (!status) { 2888 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); 2889 adapter->hba_port_num = attribs->hba_attribs.phy_port; 2890 serial_num = attribs->hba_attribs.controller_serial_number; 2891 for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++) 2892 adapter->serial_num[i] = le32_to_cpu(serial_num[i]) & 2893 (BIT_MASK(16) - 1); 2894 } 2895 2896 err: 2897 mutex_unlock(&adapter->mbox_lock); 2898 if (attribs_cmd.va) 2899 dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size, 2900 attribs_cmd.va, attribs_cmd.dma); 2901 return status; 2902 } 2903 2904 /* Uses mbox */ 2905 int be_cmd_req_native_mode(struct be_adapter *adapter) 2906 { 2907 struct be_mcc_wrb *wrb; 2908 struct be_cmd_req_set_func_cap *req; 2909 int status; 2910 2911 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2912 return -1; 2913 2914 wrb = wrb_from_mbox(adapter); 2915 if (!wrb) { 2916 status = -EBUSY; 2917 goto err; 2918 } 2919 2920 req = embedded_payload(wrb); 2921 2922 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2923 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, 2924 sizeof(*req), wrb, NULL); 2925 2926 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | 2927 CAPABILITY_BE3_NATIVE_ERX_API); 2928 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); 2929 2930 status = be_mbox_notify_wait(adapter); 2931 if (!status) { 2932 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); 2933 2934 adapter->be3_native = le32_to_cpu(resp->cap_flags) & 2935 CAPABILITY_BE3_NATIVE_ERX_API; 2936 if (!adapter->be3_native) 2937 dev_warn(&adapter->pdev->dev, 2938 "adapter not in advanced mode\n"); 2939 } 2940 err: 2941 mutex_unlock(&adapter->mbox_lock); 2942 return status; 2943 } 2944 2945 /* Get privilege(s) for a function */ 2946 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, 2947 u32 domain) 2948 { 2949 struct be_mcc_wrb *wrb; 2950 struct be_cmd_req_get_fn_privileges *req; 2951 int status; 2952 2953 spin_lock_bh(&adapter->mcc_lock); 2954 2955 wrb = wrb_from_mccq(adapter); 2956 if (!wrb) { 2957 status = -EBUSY; 2958 goto err; 2959 } 2960 2961 req = embedded_payload(wrb); 2962 2963 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2964 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req), 2965 wrb, NULL); 2966 2967 req->hdr.domain = domain; 2968 2969 status = be_mcc_notify_wait(adapter); 2970 if (!status) { 2971 struct be_cmd_resp_get_fn_privileges *resp = 2972 embedded_payload(wrb); 2973 2974 *privilege = le32_to_cpu(resp->privilege_mask); 2975 2976 /* In UMC mode FW does not return right privileges. 2977 * Override with correct privilege equivalent to PF. 2978 */ 2979 if (BEx_chip(adapter) && be_is_mc(adapter) && 2980 be_physfn(adapter)) 2981 *privilege = MAX_PRIVILEGES; 2982 } 2983 2984 err: 2985 spin_unlock_bh(&adapter->mcc_lock); 2986 return status; 2987 } 2988 2989 /* Set privilege(s) for a function */ 2990 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, 2991 u32 domain) 2992 { 2993 struct be_mcc_wrb *wrb; 2994 struct be_cmd_req_set_fn_privileges *req; 2995 int status; 2996 2997 spin_lock_bh(&adapter->mcc_lock); 2998 2999 wrb = wrb_from_mccq(adapter); 3000 if (!wrb) { 3001 status = -EBUSY; 3002 goto err; 3003 } 3004 3005 req = embedded_payload(wrb); 3006 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3007 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req), 3008 wrb, NULL); 3009 req->hdr.domain = domain; 3010 if (lancer_chip(adapter)) 3011 req->privileges_lancer = cpu_to_le32(privileges); 3012 else 3013 req->privileges = cpu_to_le32(privileges); 3014 3015 status = be_mcc_notify_wait(adapter); 3016 err: 3017 spin_unlock_bh(&adapter->mcc_lock); 3018 return status; 3019 } 3020 3021 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested. 3022 * pmac_id_valid: false => pmac_id or MAC address is requested. 3023 * If pmac_id is returned, pmac_id_valid is returned as true 3024 */ 3025 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 3026 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle, 3027 u8 domain) 3028 { 3029 struct be_mcc_wrb *wrb; 3030 struct be_cmd_req_get_mac_list *req; 3031 int status; 3032 int mac_count; 3033 struct be_dma_mem get_mac_list_cmd; 3034 int i; 3035 3036 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 3037 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 3038 get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 3039 get_mac_list_cmd.size, 3040 &get_mac_list_cmd.dma, 3041 GFP_ATOMIC); 3042 3043 if (!get_mac_list_cmd.va) { 3044 dev_err(&adapter->pdev->dev, 3045 "Memory allocation failure during GET_MAC_LIST\n"); 3046 return -ENOMEM; 3047 } 3048 3049 spin_lock_bh(&adapter->mcc_lock); 3050 3051 wrb = wrb_from_mccq(adapter); 3052 if (!wrb) { 3053 status = -EBUSY; 3054 goto out; 3055 } 3056 3057 req = get_mac_list_cmd.va; 3058 3059 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3060 OPCODE_COMMON_GET_MAC_LIST, 3061 get_mac_list_cmd.size, wrb, &get_mac_list_cmd); 3062 req->hdr.domain = domain; 3063 req->mac_type = MAC_ADDRESS_TYPE_NETWORK; 3064 if (*pmac_id_valid) { 3065 req->mac_id = cpu_to_le32(*pmac_id); 3066 req->iface_id = cpu_to_le16(if_handle); 3067 req->perm_override = 0; 3068 } else { 3069 req->perm_override = 1; 3070 } 3071 3072 status = be_mcc_notify_wait(adapter); 3073 if (!status) { 3074 struct be_cmd_resp_get_mac_list *resp = 3075 get_mac_list_cmd.va; 3076 3077 if (*pmac_id_valid) { 3078 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr, 3079 ETH_ALEN); 3080 goto out; 3081 } 3082 3083 mac_count = resp->true_mac_count + resp->pseudo_mac_count; 3084 /* Mac list returned could contain one or more active mac_ids 3085 * or one or more true or pseudo permanent mac addresses. 3086 * If an active mac_id is present, return first active mac_id 3087 * found. 3088 */ 3089 for (i = 0; i < mac_count; i++) { 3090 struct get_list_macaddr *mac_entry; 3091 u16 mac_addr_size; 3092 u32 mac_id; 3093 3094 mac_entry = &resp->macaddr_list[i]; 3095 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size); 3096 /* mac_id is a 32 bit value and mac_addr size 3097 * is 6 bytes 3098 */ 3099 if (mac_addr_size == sizeof(u32)) { 3100 *pmac_id_valid = true; 3101 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; 3102 *pmac_id = le32_to_cpu(mac_id); 3103 goto out; 3104 } 3105 } 3106 /* If no active mac_id found, return first mac addr */ 3107 *pmac_id_valid = false; 3108 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 3109 ETH_ALEN); 3110 } 3111 3112 out: 3113 spin_unlock_bh(&adapter->mcc_lock); 3114 dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, 3115 get_mac_list_cmd.va, get_mac_list_cmd.dma); 3116 return status; 3117 } 3118 3119 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, 3120 u8 *mac, u32 if_handle, bool active, u32 domain) 3121 { 3122 if (!active) 3123 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id, 3124 if_handle, domain); 3125 if (BEx_chip(adapter)) 3126 return be_cmd_mac_addr_query(adapter, mac, false, 3127 if_handle, curr_pmac_id); 3128 else 3129 /* Fetch the MAC address using pmac_id */ 3130 return be_cmd_get_mac_from_list(adapter, mac, &active, 3131 &curr_pmac_id, 3132 if_handle, domain); 3133 } 3134 3135 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) 3136 { 3137 int status; 3138 bool pmac_valid = false; 3139 3140 eth_zero_addr(mac); 3141 3142 if (BEx_chip(adapter)) { 3143 if (be_physfn(adapter)) 3144 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 3145 0); 3146 else 3147 status = be_cmd_mac_addr_query(adapter, mac, false, 3148 adapter->if_handle, 0); 3149 } else { 3150 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid, 3151 NULL, adapter->if_handle, 0); 3152 } 3153 3154 return status; 3155 } 3156 3157 /* Uses synchronous MCCQ */ 3158 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 3159 u8 mac_count, u32 domain) 3160 { 3161 struct be_mcc_wrb *wrb; 3162 struct be_cmd_req_set_mac_list *req; 3163 int status; 3164 struct be_dma_mem cmd; 3165 3166 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3167 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 3168 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3169 GFP_KERNEL); 3170 if (!cmd.va) 3171 return -ENOMEM; 3172 3173 spin_lock_bh(&adapter->mcc_lock); 3174 3175 wrb = wrb_from_mccq(adapter); 3176 if (!wrb) { 3177 status = -EBUSY; 3178 goto err; 3179 } 3180 3181 req = cmd.va; 3182 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3183 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), 3184 wrb, &cmd); 3185 3186 req->hdr.domain = domain; 3187 req->mac_count = mac_count; 3188 if (mac_count) 3189 memcpy(req->mac, mac_array, ETH_ALEN*mac_count); 3190 3191 status = be_mcc_notify_wait(adapter); 3192 3193 err: 3194 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 3195 spin_unlock_bh(&adapter->mcc_lock); 3196 return status; 3197 } 3198 3199 /* Wrapper to delete any active MACs and provision the new mac. 3200 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the 3201 * current list are active. 3202 */ 3203 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom) 3204 { 3205 bool active_mac = false; 3206 u8 old_mac[ETH_ALEN]; 3207 u32 pmac_id; 3208 int status; 3209 3210 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, 3211 &pmac_id, if_id, dom); 3212 3213 if (!status && active_mac) 3214 be_cmd_pmac_del(adapter, if_id, pmac_id, dom); 3215 3216 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom); 3217 } 3218 3219 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 3220 u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk) 3221 { 3222 struct be_mcc_wrb *wrb; 3223 struct be_cmd_req_set_hsw_config *req; 3224 void *ctxt; 3225 int status; 3226 3227 spin_lock_bh(&adapter->mcc_lock); 3228 3229 wrb = wrb_from_mccq(adapter); 3230 if (!wrb) { 3231 status = -EBUSY; 3232 goto err; 3233 } 3234 3235 req = embedded_payload(wrb); 3236 ctxt = &req->context; 3237 3238 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3239 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, 3240 NULL); 3241 3242 req->hdr.domain = domain; 3243 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); 3244 if (pvid) { 3245 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); 3246 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); 3247 } 3248 if (!BEx_chip(adapter) && hsw_mode) { 3249 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, 3250 ctxt, adapter->hba_port_num); 3251 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); 3252 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type, 3253 ctxt, hsw_mode); 3254 } 3255 3256 /* Enable/disable both mac and vlan spoof checking */ 3257 if (!BEx_chip(adapter) && spoofchk) { 3258 AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk, 3259 ctxt, spoofchk); 3260 AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk, 3261 ctxt, spoofchk); 3262 } 3263 3264 be_dws_cpu_to_le(req->context, sizeof(req->context)); 3265 status = be_mcc_notify_wait(adapter); 3266 3267 err: 3268 spin_unlock_bh(&adapter->mcc_lock); 3269 return status; 3270 } 3271 3272 /* Get Hyper switch config */ 3273 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 3274 u32 domain, u16 intf_id, u8 *mode, bool *spoofchk) 3275 { 3276 struct be_mcc_wrb *wrb; 3277 struct be_cmd_req_get_hsw_config *req; 3278 void *ctxt; 3279 int status; 3280 u16 vid; 3281 3282 spin_lock_bh(&adapter->mcc_lock); 3283 3284 wrb = wrb_from_mccq(adapter); 3285 if (!wrb) { 3286 status = -EBUSY; 3287 goto err; 3288 } 3289 3290 req = embedded_payload(wrb); 3291 ctxt = &req->context; 3292 3293 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3294 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, 3295 NULL); 3296 3297 req->hdr.domain = domain; 3298 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3299 ctxt, intf_id); 3300 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); 3301 3302 if (!BEx_chip(adapter) && mode) { 3303 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3304 ctxt, adapter->hba_port_num); 3305 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1); 3306 } 3307 be_dws_cpu_to_le(req->context, sizeof(req->context)); 3308 3309 status = be_mcc_notify_wait(adapter); 3310 if (!status) { 3311 struct be_cmd_resp_get_hsw_config *resp = 3312 embedded_payload(wrb); 3313 3314 be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); 3315 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3316 pvid, &resp->context); 3317 if (pvid) 3318 *pvid = le16_to_cpu(vid); 3319 if (mode) 3320 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3321 port_fwd_type, &resp->context); 3322 if (spoofchk) 3323 *spoofchk = 3324 AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3325 spoofchk, &resp->context); 3326 } 3327 3328 err: 3329 spin_unlock_bh(&adapter->mcc_lock); 3330 return status; 3331 } 3332 3333 static bool be_is_wol_excluded(struct be_adapter *adapter) 3334 { 3335 struct pci_dev *pdev = adapter->pdev; 3336 3337 if (be_virtfn(adapter)) 3338 return true; 3339 3340 switch (pdev->subsystem_device) { 3341 case OC_SUBSYS_DEVICE_ID1: 3342 case OC_SUBSYS_DEVICE_ID2: 3343 case OC_SUBSYS_DEVICE_ID3: 3344 case OC_SUBSYS_DEVICE_ID4: 3345 return true; 3346 default: 3347 return false; 3348 } 3349 } 3350 3351 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) 3352 { 3353 struct be_mcc_wrb *wrb; 3354 struct be_cmd_req_acpi_wol_magic_config_v1 *req; 3355 int status = 0; 3356 struct be_dma_mem cmd; 3357 3358 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 3359 CMD_SUBSYSTEM_ETH)) 3360 return -EPERM; 3361 3362 if (be_is_wol_excluded(adapter)) 3363 return status; 3364 3365 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3366 return -1; 3367 3368 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3369 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 3370 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3371 GFP_ATOMIC); 3372 if (!cmd.va) { 3373 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3374 status = -ENOMEM; 3375 goto err; 3376 } 3377 3378 wrb = wrb_from_mbox(adapter); 3379 if (!wrb) { 3380 status = -EBUSY; 3381 goto err; 3382 } 3383 3384 req = cmd.va; 3385 3386 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 3387 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 3388 sizeof(*req), wrb, &cmd); 3389 3390 req->hdr.version = 1; 3391 req->query_options = BE_GET_WOL_CAP; 3392 3393 status = be_mbox_notify_wait(adapter); 3394 if (!status) { 3395 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; 3396 3397 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va; 3398 3399 adapter->wol_cap = resp->wol_settings; 3400 if (adapter->wol_cap & BE_WOL_CAP) 3401 adapter->wol_en = true; 3402 } 3403 err: 3404 mutex_unlock(&adapter->mbox_lock); 3405 if (cmd.va) 3406 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, 3407 cmd.dma); 3408 return status; 3409 3410 } 3411 3412 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) 3413 { 3414 struct be_dma_mem extfat_cmd; 3415 struct be_fat_conf_params *cfgs; 3416 int status; 3417 int i, j; 3418 3419 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3420 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3421 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 3422 extfat_cmd.size, &extfat_cmd.dma, 3423 GFP_ATOMIC); 3424 if (!extfat_cmd.va) 3425 return -ENOMEM; 3426 3427 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); 3428 if (status) 3429 goto err; 3430 3431 cfgs = (struct be_fat_conf_params *) 3432 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); 3433 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { 3434 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); 3435 3436 for (j = 0; j < num_modes; j++) { 3437 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) 3438 cfgs->module[i].trace_lvl[j].dbg_lvl = 3439 cpu_to_le32(level); 3440 } 3441 } 3442 3443 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); 3444 err: 3445 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, 3446 extfat_cmd.dma); 3447 return status; 3448 } 3449 3450 int be_cmd_get_fw_log_level(struct be_adapter *adapter) 3451 { 3452 struct be_dma_mem extfat_cmd; 3453 struct be_fat_conf_params *cfgs; 3454 int status, j; 3455 int level = 0; 3456 3457 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3458 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3459 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 3460 extfat_cmd.size, &extfat_cmd.dma, 3461 GFP_ATOMIC); 3462 3463 if (!extfat_cmd.va) { 3464 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", 3465 __func__); 3466 goto err; 3467 } 3468 3469 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); 3470 if (!status) { 3471 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + 3472 sizeof(struct be_cmd_resp_hdr)); 3473 3474 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { 3475 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) 3476 level = cfgs->module[0].trace_lvl[j].dbg_lvl; 3477 } 3478 } 3479 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, 3480 extfat_cmd.dma); 3481 err: 3482 return level; 3483 } 3484 3485 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, 3486 struct be_dma_mem *cmd) 3487 { 3488 struct be_mcc_wrb *wrb; 3489 struct be_cmd_req_get_ext_fat_caps *req; 3490 int status; 3491 3492 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3493 return -1; 3494 3495 wrb = wrb_from_mbox(adapter); 3496 if (!wrb) { 3497 status = -EBUSY; 3498 goto err; 3499 } 3500 3501 req = cmd->va; 3502 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3503 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES, 3504 cmd->size, wrb, cmd); 3505 req->parameter_type = cpu_to_le32(1); 3506 3507 status = be_mbox_notify_wait(adapter); 3508 err: 3509 mutex_unlock(&adapter->mbox_lock); 3510 return status; 3511 } 3512 3513 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, 3514 struct be_dma_mem *cmd, 3515 struct be_fat_conf_params *configs) 3516 { 3517 struct be_mcc_wrb *wrb; 3518 struct be_cmd_req_set_ext_fat_caps *req; 3519 int status; 3520 3521 spin_lock_bh(&adapter->mcc_lock); 3522 3523 wrb = wrb_from_mccq(adapter); 3524 if (!wrb) { 3525 status = -EBUSY; 3526 goto err; 3527 } 3528 3529 req = cmd->va; 3530 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params)); 3531 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3532 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES, 3533 cmd->size, wrb, cmd); 3534 3535 status = be_mcc_notify_wait(adapter); 3536 err: 3537 spin_unlock_bh(&adapter->mcc_lock); 3538 return status; 3539 } 3540 3541 int be_cmd_query_port_name(struct be_adapter *adapter) 3542 { 3543 struct be_cmd_req_get_port_name *req; 3544 struct be_mcc_wrb *wrb; 3545 int status; 3546 3547 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3548 return -1; 3549 3550 wrb = wrb_from_mbox(adapter); 3551 req = embedded_payload(wrb); 3552 3553 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3554 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb, 3555 NULL); 3556 if (!BEx_chip(adapter)) 3557 req->hdr.version = 1; 3558 3559 status = be_mbox_notify_wait(adapter); 3560 if (!status) { 3561 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb); 3562 3563 adapter->port_name = resp->port_name[adapter->hba_port_num]; 3564 } else { 3565 adapter->port_name = adapter->hba_port_num + '0'; 3566 } 3567 3568 mutex_unlock(&adapter->mbox_lock); 3569 return status; 3570 } 3571 3572 /* Descriptor type */ 3573 enum { 3574 FUNC_DESC = 1, 3575 VFT_DESC = 2 3576 }; 3577 3578 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count, 3579 int desc_type) 3580 { 3581 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3582 struct be_nic_res_desc *nic; 3583 int i; 3584 3585 for (i = 0; i < desc_count; i++) { 3586 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || 3587 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) { 3588 nic = (struct be_nic_res_desc *)hdr; 3589 if (desc_type == FUNC_DESC || 3590 (desc_type == VFT_DESC && 3591 nic->flags & (1 << VFT_SHIFT))) 3592 return nic; 3593 } 3594 3595 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3596 hdr = (void *)hdr + hdr->desc_len; 3597 } 3598 return NULL; 3599 } 3600 3601 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count) 3602 { 3603 return be_get_nic_desc(buf, desc_count, VFT_DESC); 3604 } 3605 3606 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count) 3607 { 3608 return be_get_nic_desc(buf, desc_count, FUNC_DESC); 3609 } 3610 3611 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf, 3612 u32 desc_count) 3613 { 3614 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3615 struct be_pcie_res_desc *pcie; 3616 int i; 3617 3618 for (i = 0; i < desc_count; i++) { 3619 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || 3620 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) { 3621 pcie = (struct be_pcie_res_desc *)hdr; 3622 if (pcie->pf_num == devfn) 3623 return pcie; 3624 } 3625 3626 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3627 hdr = (void *)hdr + hdr->desc_len; 3628 } 3629 return NULL; 3630 } 3631 3632 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count) 3633 { 3634 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 3635 int i; 3636 3637 for (i = 0; i < desc_count; i++) { 3638 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1) 3639 return (struct be_port_res_desc *)hdr; 3640 3641 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 3642 hdr = (void *)hdr + hdr->desc_len; 3643 } 3644 return NULL; 3645 } 3646 3647 static void be_copy_nic_desc(struct be_resources *res, 3648 struct be_nic_res_desc *desc) 3649 { 3650 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count); 3651 res->max_vlans = le16_to_cpu(desc->vlan_count); 3652 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count); 3653 res->max_tx_qs = le16_to_cpu(desc->txq_count); 3654 res->max_rss_qs = le16_to_cpu(desc->rssq_count); 3655 res->max_rx_qs = le16_to_cpu(desc->rq_count); 3656 res->max_evt_qs = le16_to_cpu(desc->eq_count); 3657 res->max_cq_count = le16_to_cpu(desc->cq_count); 3658 res->max_iface_count = le16_to_cpu(desc->iface_count); 3659 res->max_mcc_count = le16_to_cpu(desc->mcc_count); 3660 /* Clear flags that driver is not interested in */ 3661 res->if_cap_flags = le32_to_cpu(desc->cap_flags) & 3662 BE_IF_CAP_FLAGS_WANT; 3663 } 3664 3665 /* Uses Mbox */ 3666 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) 3667 { 3668 struct be_mcc_wrb *wrb; 3669 struct be_cmd_req_get_func_config *req; 3670 int status; 3671 struct be_dma_mem cmd; 3672 3673 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3674 return -1; 3675 3676 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3677 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 3678 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3679 GFP_ATOMIC); 3680 if (!cmd.va) { 3681 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3682 status = -ENOMEM; 3683 goto err; 3684 } 3685 3686 wrb = wrb_from_mbox(adapter); 3687 if (!wrb) { 3688 status = -EBUSY; 3689 goto err; 3690 } 3691 3692 req = cmd.va; 3693 3694 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3695 OPCODE_COMMON_GET_FUNC_CONFIG, 3696 cmd.size, wrb, &cmd); 3697 3698 if (skyhawk_chip(adapter)) 3699 req->hdr.version = 1; 3700 3701 status = be_mbox_notify_wait(adapter); 3702 if (!status) { 3703 struct be_cmd_resp_get_func_config *resp = cmd.va; 3704 u32 desc_count = le32_to_cpu(resp->desc_count); 3705 struct be_nic_res_desc *desc; 3706 3707 desc = be_get_func_nic_desc(resp->func_param, desc_count); 3708 if (!desc) { 3709 status = -EINVAL; 3710 goto err; 3711 } 3712 3713 adapter->pf_number = desc->pf_num; 3714 be_copy_nic_desc(res, desc); 3715 } 3716 err: 3717 mutex_unlock(&adapter->mbox_lock); 3718 if (cmd.va) 3719 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, 3720 cmd.dma); 3721 return status; 3722 } 3723 3724 /* Will use MBOX only if MCCQ has not been created */ 3725 int be_cmd_get_profile_config(struct be_adapter *adapter, 3726 struct be_resources *res, u8 query, u8 domain) 3727 { 3728 struct be_cmd_resp_get_profile_config *resp; 3729 struct be_cmd_req_get_profile_config *req; 3730 struct be_nic_res_desc *vf_res; 3731 struct be_pcie_res_desc *pcie; 3732 struct be_port_res_desc *port; 3733 struct be_nic_res_desc *nic; 3734 struct be_mcc_wrb wrb = {0}; 3735 struct be_dma_mem cmd; 3736 u16 desc_count; 3737 int status; 3738 3739 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3740 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 3741 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3742 GFP_ATOMIC); 3743 if (!cmd.va) 3744 return -ENOMEM; 3745 3746 req = cmd.va; 3747 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3748 OPCODE_COMMON_GET_PROFILE_CONFIG, 3749 cmd.size, &wrb, &cmd); 3750 3751 req->hdr.domain = domain; 3752 if (!lancer_chip(adapter)) 3753 req->hdr.version = 1; 3754 req->type = ACTIVE_PROFILE_TYPE; 3755 3756 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the 3757 * descriptors with all bits set to "1" for the fields which can be 3758 * modified using SET_PROFILE_CONFIG cmd. 3759 */ 3760 if (query == RESOURCE_MODIFIABLE) 3761 req->type |= QUERY_MODIFIABLE_FIELDS_TYPE; 3762 3763 status = be_cmd_notify_wait(adapter, &wrb); 3764 if (status) 3765 goto err; 3766 3767 resp = cmd.va; 3768 desc_count = le16_to_cpu(resp->desc_count); 3769 3770 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, 3771 desc_count); 3772 if (pcie) 3773 res->max_vfs = le16_to_cpu(pcie->num_vfs); 3774 3775 port = be_get_port_desc(resp->func_param, desc_count); 3776 if (port) 3777 adapter->mc_type = port->mc_type; 3778 3779 nic = be_get_func_nic_desc(resp->func_param, desc_count); 3780 if (nic) 3781 be_copy_nic_desc(res, nic); 3782 3783 vf_res = be_get_vft_desc(resp->func_param, desc_count); 3784 if (vf_res) 3785 res->vf_if_cap_flags = vf_res->cap_flags; 3786 err: 3787 if (cmd.va) 3788 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, 3789 cmd.dma); 3790 return status; 3791 } 3792 3793 /* Will use MBOX only if MCCQ has not been created */ 3794 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, 3795 int size, int count, u8 version, u8 domain) 3796 { 3797 struct be_cmd_req_set_profile_config *req; 3798 struct be_mcc_wrb wrb = {0}; 3799 struct be_dma_mem cmd; 3800 int status; 3801 3802 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3803 cmd.size = sizeof(struct be_cmd_req_set_profile_config); 3804 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3805 GFP_ATOMIC); 3806 if (!cmd.va) 3807 return -ENOMEM; 3808 3809 req = cmd.va; 3810 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3811 OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size, 3812 &wrb, &cmd); 3813 req->hdr.version = version; 3814 req->hdr.domain = domain; 3815 req->desc_count = cpu_to_le32(count); 3816 memcpy(req->desc, desc, size); 3817 3818 status = be_cmd_notify_wait(adapter, &wrb); 3819 3820 if (cmd.va) 3821 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, 3822 cmd.dma); 3823 return status; 3824 } 3825 3826 /* Mark all fields invalid */ 3827 static void be_reset_nic_desc(struct be_nic_res_desc *nic) 3828 { 3829 memset(nic, 0, sizeof(*nic)); 3830 nic->unicast_mac_count = 0xFFFF; 3831 nic->mcc_count = 0xFFFF; 3832 nic->vlan_count = 0xFFFF; 3833 nic->mcast_mac_count = 0xFFFF; 3834 nic->txq_count = 0xFFFF; 3835 nic->rq_count = 0xFFFF; 3836 nic->rssq_count = 0xFFFF; 3837 nic->lro_count = 0xFFFF; 3838 nic->cq_count = 0xFFFF; 3839 nic->toe_conn_count = 0xFFFF; 3840 nic->eq_count = 0xFFFF; 3841 nic->iface_count = 0xFFFF; 3842 nic->link_param = 0xFF; 3843 nic->channel_id_param = cpu_to_le16(0xF000); 3844 nic->acpi_params = 0xFF; 3845 nic->wol_param = 0x0F; 3846 nic->tunnel_iface_count = 0xFFFF; 3847 nic->direct_tenant_iface_count = 0xFFFF; 3848 nic->bw_min = 0xFFFFFFFF; 3849 nic->bw_max = 0xFFFFFFFF; 3850 } 3851 3852 /* Mark all fields invalid */ 3853 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie) 3854 { 3855 memset(pcie, 0, sizeof(*pcie)); 3856 pcie->sriov_state = 0xFF; 3857 pcie->pf_state = 0xFF; 3858 pcie->pf_type = 0xFF; 3859 pcie->num_vfs = 0xFFFF; 3860 } 3861 3862 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, 3863 u8 domain) 3864 { 3865 struct be_nic_res_desc nic_desc; 3866 u32 bw_percent; 3867 u16 version = 0; 3868 3869 if (BE3_chip(adapter)) 3870 return be_cmd_set_qos(adapter, max_rate / 10, domain); 3871 3872 be_reset_nic_desc(&nic_desc); 3873 nic_desc.pf_num = adapter->pf_number; 3874 nic_desc.vf_num = domain; 3875 nic_desc.bw_min = 0; 3876 if (lancer_chip(adapter)) { 3877 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 3878 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; 3879 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) | 3880 (1 << NOSV_SHIFT); 3881 nic_desc.bw_max = cpu_to_le32(max_rate / 10); 3882 } else { 3883 version = 1; 3884 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; 3885 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3886 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); 3887 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100; 3888 nic_desc.bw_max = cpu_to_le32(bw_percent); 3889 } 3890 3891 return be_cmd_set_profile_config(adapter, &nic_desc, 3892 nic_desc.hdr.desc_len, 3893 1, version, domain); 3894 } 3895 3896 static void be_fill_vf_res_template(struct be_adapter *adapter, 3897 struct be_resources pool_res, 3898 u16 num_vfs, u16 num_vf_qs, 3899 struct be_nic_res_desc *nic_vft) 3900 { 3901 u32 vf_if_cap_flags = pool_res.vf_if_cap_flags; 3902 struct be_resources res_mod = {0}; 3903 3904 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd, 3905 * which are modifiable using SET_PROFILE_CONFIG cmd. 3906 */ 3907 be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0); 3908 3909 /* If RSS IFACE capability flags are modifiable for a VF, set the 3910 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if 3911 * more than 1 RSSQ is available for a VF. 3912 * Otherwise, provision only 1 queue pair for VF. 3913 */ 3914 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) { 3915 nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); 3916 if (num_vf_qs > 1) { 3917 vf_if_cap_flags |= BE_IF_FLAGS_RSS; 3918 if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS) 3919 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS; 3920 } else { 3921 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | 3922 BE_IF_FLAGS_DEFQ_RSS); 3923 } 3924 3925 nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); 3926 } else { 3927 num_vf_qs = 1; 3928 } 3929 3930 nic_vft->rq_count = cpu_to_le16(num_vf_qs); 3931 nic_vft->txq_count = cpu_to_le16(num_vf_qs); 3932 nic_vft->rssq_count = cpu_to_le16(num_vf_qs); 3933 nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count / 3934 (num_vfs + 1)); 3935 3936 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally 3937 * among the PF and it's VFs, if the fields are changeable 3938 */ 3939 if (res_mod.max_uc_mac == FIELD_MODIFIABLE) 3940 nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac / 3941 (num_vfs + 1)); 3942 3943 if (res_mod.max_vlans == FIELD_MODIFIABLE) 3944 nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans / 3945 (num_vfs + 1)); 3946 3947 if (res_mod.max_iface_count == FIELD_MODIFIABLE) 3948 nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count / 3949 (num_vfs + 1)); 3950 3951 if (res_mod.max_mcc_count == FIELD_MODIFIABLE) 3952 nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count / 3953 (num_vfs + 1)); 3954 } 3955 3956 int be_cmd_set_sriov_config(struct be_adapter *adapter, 3957 struct be_resources pool_res, u16 num_vfs, 3958 u16 num_vf_qs) 3959 { 3960 struct { 3961 struct be_pcie_res_desc pcie; 3962 struct be_nic_res_desc nic_vft; 3963 } __packed desc; 3964 3965 /* PF PCIE descriptor */ 3966 be_reset_pcie_desc(&desc.pcie); 3967 desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1; 3968 desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3969 desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); 3970 desc.pcie.pf_num = adapter->pdev->devfn; 3971 desc.pcie.sriov_state = num_vfs ? 1 : 0; 3972 desc.pcie.num_vfs = cpu_to_le16(num_vfs); 3973 3974 /* VF NIC Template descriptor */ 3975 be_reset_nic_desc(&desc.nic_vft); 3976 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; 3977 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 3978 desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); 3979 desc.nic_vft.pf_num = adapter->pdev->devfn; 3980 desc.nic_vft.vf_num = 0; 3981 3982 be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs, 3983 &desc.nic_vft); 3984 3985 return be_cmd_set_profile_config(adapter, &desc, 3986 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0); 3987 } 3988 3989 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) 3990 { 3991 struct be_mcc_wrb *wrb; 3992 struct be_cmd_req_manage_iface_filters *req; 3993 int status; 3994 3995 if (iface == 0xFFFFFFFF) 3996 return -1; 3997 3998 spin_lock_bh(&adapter->mcc_lock); 3999 4000 wrb = wrb_from_mccq(adapter); 4001 if (!wrb) { 4002 status = -EBUSY; 4003 goto err; 4004 } 4005 req = embedded_payload(wrb); 4006 4007 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4008 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req), 4009 wrb, NULL); 4010 req->op = op; 4011 req->target_iface_id = cpu_to_le32(iface); 4012 4013 status = be_mcc_notify_wait(adapter); 4014 err: 4015 spin_unlock_bh(&adapter->mcc_lock); 4016 return status; 4017 } 4018 4019 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port) 4020 { 4021 struct be_port_res_desc port_desc; 4022 4023 memset(&port_desc, 0, sizeof(port_desc)); 4024 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1; 4025 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 4026 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); 4027 port_desc.link_num = adapter->hba_port_num; 4028 if (port) { 4029 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) | 4030 (1 << RCVID_SHIFT); 4031 port_desc.nv_port = swab16(port); 4032 } else { 4033 port_desc.nv_flags = NV_TYPE_DISABLED; 4034 port_desc.nv_port = 0; 4035 } 4036 4037 return be_cmd_set_profile_config(adapter, &port_desc, 4038 RESOURCE_DESC_SIZE_V1, 1, 1, 0); 4039 } 4040 4041 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, 4042 int vf_num) 4043 { 4044 struct be_mcc_wrb *wrb; 4045 struct be_cmd_req_get_iface_list *req; 4046 struct be_cmd_resp_get_iface_list *resp; 4047 int status; 4048 4049 spin_lock_bh(&adapter->mcc_lock); 4050 4051 wrb = wrb_from_mccq(adapter); 4052 if (!wrb) { 4053 status = -EBUSY; 4054 goto err; 4055 } 4056 req = embedded_payload(wrb); 4057 4058 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4059 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp), 4060 wrb, NULL); 4061 req->hdr.domain = vf_num + 1; 4062 4063 status = be_mcc_notify_wait(adapter); 4064 if (!status) { 4065 resp = (struct be_cmd_resp_get_iface_list *)req; 4066 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id); 4067 } 4068 4069 err: 4070 spin_unlock_bh(&adapter->mcc_lock); 4071 return status; 4072 } 4073 4074 static int lancer_wait_idle(struct be_adapter *adapter) 4075 { 4076 #define SLIPORT_IDLE_TIMEOUT 30 4077 u32 reg_val; 4078 int status = 0, i; 4079 4080 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) { 4081 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET); 4082 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0) 4083 break; 4084 4085 ssleep(1); 4086 } 4087 4088 if (i == SLIPORT_IDLE_TIMEOUT) 4089 status = -1; 4090 4091 return status; 4092 } 4093 4094 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask) 4095 { 4096 int status = 0; 4097 4098 status = lancer_wait_idle(adapter); 4099 if (status) 4100 return status; 4101 4102 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET); 4103 4104 return status; 4105 } 4106 4107 /* Routine to check whether dump image is present or not */ 4108 bool dump_present(struct be_adapter *adapter) 4109 { 4110 u32 sliport_status = 0; 4111 4112 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 4113 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK); 4114 } 4115 4116 int lancer_initiate_dump(struct be_adapter *adapter) 4117 { 4118 struct device *dev = &adapter->pdev->dev; 4119 int status; 4120 4121 if (dump_present(adapter)) { 4122 dev_info(dev, "Previous dump not cleared, not forcing dump\n"); 4123 return -EEXIST; 4124 } 4125 4126 /* give firmware reset and diagnostic dump */ 4127 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK | 4128 PHYSDEV_CONTROL_DD_MASK); 4129 if (status < 0) { 4130 dev_err(dev, "FW reset failed\n"); 4131 return status; 4132 } 4133 4134 status = lancer_wait_idle(adapter); 4135 if (status) 4136 return status; 4137 4138 if (!dump_present(adapter)) { 4139 dev_err(dev, "FW dump not generated\n"); 4140 return -EIO; 4141 } 4142 4143 return 0; 4144 } 4145 4146 int lancer_delete_dump(struct be_adapter *adapter) 4147 { 4148 int status; 4149 4150 status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE); 4151 return be_cmd_status(status); 4152 } 4153 4154 /* Uses sync mcc */ 4155 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) 4156 { 4157 struct be_mcc_wrb *wrb; 4158 struct be_cmd_enable_disable_vf *req; 4159 int status; 4160 4161 if (BEx_chip(adapter)) 4162 return 0; 4163 4164 spin_lock_bh(&adapter->mcc_lock); 4165 4166 wrb = wrb_from_mccq(adapter); 4167 if (!wrb) { 4168 status = -EBUSY; 4169 goto err; 4170 } 4171 4172 req = embedded_payload(wrb); 4173 4174 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4175 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req), 4176 wrb, NULL); 4177 4178 req->hdr.domain = domain; 4179 req->enable = 1; 4180 status = be_mcc_notify_wait(adapter); 4181 err: 4182 spin_unlock_bh(&adapter->mcc_lock); 4183 return status; 4184 } 4185 4186 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable) 4187 { 4188 struct be_mcc_wrb *wrb; 4189 struct be_cmd_req_intr_set *req; 4190 int status; 4191 4192 if (mutex_lock_interruptible(&adapter->mbox_lock)) 4193 return -1; 4194 4195 wrb = wrb_from_mbox(adapter); 4196 4197 req = embedded_payload(wrb); 4198 4199 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4200 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req), 4201 wrb, NULL); 4202 4203 req->intr_enabled = intr_enable; 4204 4205 status = be_mbox_notify_wait(adapter); 4206 4207 mutex_unlock(&adapter->mbox_lock); 4208 return status; 4209 } 4210 4211 /* Uses MBOX */ 4212 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id) 4213 { 4214 struct be_cmd_req_get_active_profile *req; 4215 struct be_mcc_wrb *wrb; 4216 int status; 4217 4218 if (mutex_lock_interruptible(&adapter->mbox_lock)) 4219 return -1; 4220 4221 wrb = wrb_from_mbox(adapter); 4222 if (!wrb) { 4223 status = -EBUSY; 4224 goto err; 4225 } 4226 4227 req = embedded_payload(wrb); 4228 4229 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4230 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req), 4231 wrb, NULL); 4232 4233 status = be_mbox_notify_wait(adapter); 4234 if (!status) { 4235 struct be_cmd_resp_get_active_profile *resp = 4236 embedded_payload(wrb); 4237 4238 *profile_id = le16_to_cpu(resp->active_profile_id); 4239 } 4240 4241 err: 4242 mutex_unlock(&adapter->mbox_lock); 4243 return status; 4244 } 4245 4246 int be_cmd_set_logical_link_config(struct be_adapter *adapter, 4247 int link_state, u8 domain) 4248 { 4249 struct be_mcc_wrb *wrb; 4250 struct be_cmd_req_set_ll_link *req; 4251 int status; 4252 4253 if (BEx_chip(adapter) || lancer_chip(adapter)) 4254 return -EOPNOTSUPP; 4255 4256 spin_lock_bh(&adapter->mcc_lock); 4257 4258 wrb = wrb_from_mccq(adapter); 4259 if (!wrb) { 4260 status = -EBUSY; 4261 goto err; 4262 } 4263 4264 req = embedded_payload(wrb); 4265 4266 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4267 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG, 4268 sizeof(*req), wrb, NULL); 4269 4270 req->hdr.version = 1; 4271 req->hdr.domain = domain; 4272 4273 if (link_state == IFLA_VF_LINK_STATE_ENABLE) 4274 req->link_config |= 1; 4275 4276 if (link_state == IFLA_VF_LINK_STATE_AUTO) 4277 req->link_config |= 1 << PLINK_TRACK_SHIFT; 4278 4279 status = be_mcc_notify_wait(adapter); 4280 err: 4281 spin_unlock_bh(&adapter->mcc_lock); 4282 return status; 4283 } 4284 4285 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 4286 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 4287 { 4288 struct be_adapter *adapter = netdev_priv(netdev_handle); 4289 struct be_mcc_wrb *wrb; 4290 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload; 4291 struct be_cmd_req_hdr *req; 4292 struct be_cmd_resp_hdr *resp; 4293 int status; 4294 4295 spin_lock_bh(&adapter->mcc_lock); 4296 4297 wrb = wrb_from_mccq(adapter); 4298 if (!wrb) { 4299 status = -EBUSY; 4300 goto err; 4301 } 4302 req = embedded_payload(wrb); 4303 resp = embedded_payload(wrb); 4304 4305 be_wrb_cmd_hdr_prepare(req, hdr->subsystem, 4306 hdr->opcode, wrb_payload_size, wrb, NULL); 4307 memcpy(req, wrb_payload, wrb_payload_size); 4308 be_dws_cpu_to_le(req, wrb_payload_size); 4309 4310 status = be_mcc_notify_wait(adapter); 4311 if (cmd_status) 4312 *cmd_status = (status & 0xffff); 4313 if (ext_status) 4314 *ext_status = 0; 4315 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); 4316 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); 4317 err: 4318 spin_unlock_bh(&adapter->mcc_lock); 4319 return status; 4320 } 4321 EXPORT_SYMBOL(be_roce_mcc_cmd); 4322