1 /* 2 * Copyright (C) 2005 - 2011 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Contact Information: 11 * linux-drivers@emulex.com 12 * 13 * Emulex 14 * 3333 Susan Street 15 * Costa Mesa, CA 92626 16 */ 17 18 #include "be.h" 19 #include "be_cmds.h" 20 21 /* Must be a power of 2 or else MODULO will BUG_ON */ 22 static int be_get_temp_freq = 64; 23 24 static inline void *embedded_payload(struct be_mcc_wrb *wrb) 25 { 26 return wrb->payload.embedded_payload; 27 } 28 29 static void be_mcc_notify(struct be_adapter *adapter) 30 { 31 struct be_queue_info *mccq = &adapter->mcc_obj.q; 32 u32 val = 0; 33 34 if (adapter->eeh_err) { 35 dev_info(&adapter->pdev->dev, 36 "Error in Card Detected! Cannot issue commands\n"); 37 return; 38 } 39 40 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 41 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 42 43 wmb(); 44 iowrite32(val, adapter->db + DB_MCCQ_OFFSET); 45 } 46 47 /* To check if valid bit is set, check the entire word as we don't know 48 * the endianness of the data (old entry is host endian while a new entry is 49 * little endian) */ 50 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) 51 { 52 if (compl->flags != 0) { 53 compl->flags = le32_to_cpu(compl->flags); 54 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); 55 return true; 56 } else { 57 return false; 58 } 59 } 60 61 /* Need to reset the entire word that houses the valid bit */ 62 static inline void be_mcc_compl_use(struct be_mcc_compl *compl) 63 { 64 compl->flags = 0; 65 } 66 67 static int be_mcc_compl_process(struct be_adapter *adapter, 68 struct be_mcc_compl *compl) 69 { 70 u16 compl_status, extd_status; 71 72 /* Just swap the status to host endian; mcc tag is opaquely copied 73 * from mcc_wrb */ 74 be_dws_le_to_cpu(compl, 4); 75 76 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 77 CQE_STATUS_COMPL_MASK; 78 79 if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) || 80 (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) && 81 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) { 82 adapter->flash_status = compl_status; 83 complete(&adapter->flash_compl); 84 } 85 86 if (compl_status == MCC_STATUS_SUCCESS) { 87 if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) || 88 (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) && 89 (compl->tag1 == CMD_SUBSYSTEM_ETH)) { 90 be_parse_stats(adapter); 91 adapter->stats_cmd_sent = false; 92 } 93 if (compl->tag0 == 94 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) { 95 struct be_mcc_wrb *mcc_wrb = 96 queue_index_node(&adapter->mcc_obj.q, 97 compl->tag1); 98 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 99 embedded_payload(mcc_wrb); 100 adapter->drv_stats.be_on_die_temperature = 101 resp->on_die_temperature; 102 } 103 } else { 104 if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) 105 be_get_temp_freq = 0; 106 107 if (compl_status == MCC_STATUS_NOT_SUPPORTED || 108 compl_status == MCC_STATUS_ILLEGAL_REQUEST) 109 goto done; 110 111 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 112 dev_warn(&adapter->pdev->dev, "This domain(VM) is not " 113 "permitted to execute this cmd (opcode %d)\n", 114 compl->tag0); 115 } else { 116 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & 117 CQE_STATUS_EXTD_MASK; 118 dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:" 119 "status %d, extd-status %d\n", 120 compl->tag0, compl_status, extd_status); 121 } 122 } 123 done: 124 return compl_status; 125 } 126 127 /* Link state evt is a string of bytes; no need for endian swapping */ 128 static void be_async_link_state_process(struct be_adapter *adapter, 129 struct be_async_event_link_state *evt) 130 { 131 be_link_status_update(adapter, evt->port_link_status); 132 } 133 134 /* Grp5 CoS Priority evt */ 135 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 136 struct be_async_event_grp5_cos_priority *evt) 137 { 138 if (evt->valid) { 139 adapter->vlan_prio_bmap = evt->available_priority_bmap; 140 adapter->recommended_prio &= ~VLAN_PRIO_MASK; 141 adapter->recommended_prio = 142 evt->reco_default_priority << VLAN_PRIO_SHIFT; 143 } 144 } 145 146 /* Grp5 QOS Speed evt */ 147 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 148 struct be_async_event_grp5_qos_link_speed *evt) 149 { 150 if (evt->physical_port == adapter->port_num) { 151 /* qos_link_speed is in units of 10 Mbps */ 152 adapter->link_speed = evt->qos_link_speed * 10; 153 } 154 } 155 156 /*Grp5 PVID evt*/ 157 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 158 struct be_async_event_grp5_pvid_state *evt) 159 { 160 if (evt->enabled) 161 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 162 else 163 adapter->pvid = 0; 164 } 165 166 static void be_async_grp5_evt_process(struct be_adapter *adapter, 167 u32 trailer, struct be_mcc_compl *evt) 168 { 169 u8 event_type = 0; 170 171 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & 172 ASYNC_TRAILER_EVENT_TYPE_MASK; 173 174 switch (event_type) { 175 case ASYNC_EVENT_COS_PRIORITY: 176 be_async_grp5_cos_priority_process(adapter, 177 (struct be_async_event_grp5_cos_priority *)evt); 178 break; 179 case ASYNC_EVENT_QOS_SPEED: 180 be_async_grp5_qos_speed_process(adapter, 181 (struct be_async_event_grp5_qos_link_speed *)evt); 182 break; 183 case ASYNC_EVENT_PVID_STATE: 184 be_async_grp5_pvid_state_process(adapter, 185 (struct be_async_event_grp5_pvid_state *)evt); 186 break; 187 default: 188 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); 189 break; 190 } 191 } 192 193 static inline bool is_link_state_evt(u32 trailer) 194 { 195 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 196 ASYNC_TRAILER_EVENT_CODE_MASK) == 197 ASYNC_EVENT_CODE_LINK_STATE; 198 } 199 200 static inline bool is_grp5_evt(u32 trailer) 201 { 202 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 203 ASYNC_TRAILER_EVENT_CODE_MASK) == 204 ASYNC_EVENT_CODE_GRP_5); 205 } 206 207 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 208 { 209 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; 210 struct be_mcc_compl *compl = queue_tail_node(mcc_cq); 211 212 if (be_mcc_compl_is_new(compl)) { 213 queue_tail_inc(mcc_cq); 214 return compl; 215 } 216 return NULL; 217 } 218 219 void be_async_mcc_enable(struct be_adapter *adapter) 220 { 221 spin_lock_bh(&adapter->mcc_cq_lock); 222 223 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); 224 adapter->mcc_obj.rearm_cq = true; 225 226 spin_unlock_bh(&adapter->mcc_cq_lock); 227 } 228 229 void be_async_mcc_disable(struct be_adapter *adapter) 230 { 231 adapter->mcc_obj.rearm_cq = false; 232 } 233 234 int be_process_mcc(struct be_adapter *adapter, int *status) 235 { 236 struct be_mcc_compl *compl; 237 int num = 0; 238 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 239 240 spin_lock_bh(&adapter->mcc_cq_lock); 241 while ((compl = be_mcc_compl_get(adapter))) { 242 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 243 /* Interpret flags as an async trailer */ 244 if (is_link_state_evt(compl->flags)) 245 be_async_link_state_process(adapter, 246 (struct be_async_event_link_state *) compl); 247 else if (is_grp5_evt(compl->flags)) 248 be_async_grp5_evt_process(adapter, 249 compl->flags, compl); 250 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 251 *status = be_mcc_compl_process(adapter, compl); 252 atomic_dec(&mcc_obj->q.used); 253 } 254 be_mcc_compl_use(compl); 255 num++; 256 } 257 258 spin_unlock_bh(&adapter->mcc_cq_lock); 259 return num; 260 } 261 262 /* Wait till no more pending mcc requests are present */ 263 static int be_mcc_wait_compl(struct be_adapter *adapter) 264 { 265 #define mcc_timeout 120000 /* 12s timeout */ 266 int i, num, status = 0; 267 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 268 269 if (adapter->eeh_err) 270 return -EIO; 271 272 for (i = 0; i < mcc_timeout; i++) { 273 num = be_process_mcc(adapter, &status); 274 if (num) 275 be_cq_notify(adapter, mcc_obj->cq.id, 276 mcc_obj->rearm_cq, num); 277 278 if (atomic_read(&mcc_obj->q.used) == 0) 279 break; 280 udelay(100); 281 } 282 if (i == mcc_timeout) { 283 dev_err(&adapter->pdev->dev, "mccq poll timed out\n"); 284 return -1; 285 } 286 return status; 287 } 288 289 /* Notify MCC requests and wait for completion */ 290 static int be_mcc_notify_wait(struct be_adapter *adapter) 291 { 292 be_mcc_notify(adapter); 293 return be_mcc_wait_compl(adapter); 294 } 295 296 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) 297 { 298 int msecs = 0; 299 u32 ready; 300 301 if (adapter->eeh_err) { 302 dev_err(&adapter->pdev->dev, 303 "Error detected in card.Cannot issue commands\n"); 304 return -EIO; 305 } 306 307 do { 308 ready = ioread32(db); 309 if (ready == 0xffffffff) { 310 dev_err(&adapter->pdev->dev, 311 "pci slot disconnected\n"); 312 return -1; 313 } 314 315 ready &= MPU_MAILBOX_DB_RDY_MASK; 316 if (ready) 317 break; 318 319 if (msecs > 4000) { 320 dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); 321 if (!lancer_chip(adapter)) 322 be_detect_dump_ue(adapter); 323 return -1; 324 } 325 326 msleep(1); 327 msecs++; 328 } while (true); 329 330 return 0; 331 } 332 333 /* 334 * Insert the mailbox address into the doorbell in two steps 335 * Polls on the mbox doorbell till a command completion (or a timeout) occurs 336 */ 337 static int be_mbox_notify_wait(struct be_adapter *adapter) 338 { 339 int status; 340 u32 val = 0; 341 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; 342 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 343 struct be_mcc_mailbox *mbox = mbox_mem->va; 344 struct be_mcc_compl *compl = &mbox->compl; 345 346 /* wait for ready to be set */ 347 status = be_mbox_db_ready_wait(adapter, db); 348 if (status != 0) 349 return status; 350 351 val |= MPU_MAILBOX_DB_HI_MASK; 352 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ 353 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 354 iowrite32(val, db); 355 356 /* wait for ready to be set */ 357 status = be_mbox_db_ready_wait(adapter, db); 358 if (status != 0) 359 return status; 360 361 val = 0; 362 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ 363 val |= (u32)(mbox_mem->dma >> 4) << 2; 364 iowrite32(val, db); 365 366 status = be_mbox_db_ready_wait(adapter, db); 367 if (status != 0) 368 return status; 369 370 /* A cq entry has been made now */ 371 if (be_mcc_compl_is_new(compl)) { 372 status = be_mcc_compl_process(adapter, &mbox->compl); 373 be_mcc_compl_use(compl); 374 if (status) 375 return status; 376 } else { 377 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); 378 return -1; 379 } 380 return 0; 381 } 382 383 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) 384 { 385 u32 sem; 386 387 if (lancer_chip(adapter)) 388 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET); 389 else 390 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); 391 392 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; 393 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) 394 return -1; 395 else 396 return 0; 397 } 398 399 int be_cmd_POST(struct be_adapter *adapter) 400 { 401 u16 stage; 402 int status, timeout = 0; 403 struct device *dev = &adapter->pdev->dev; 404 405 do { 406 status = be_POST_stage_get(adapter, &stage); 407 if (status) { 408 dev_err(dev, "POST error; stage=0x%x\n", stage); 409 return -1; 410 } else if (stage != POST_STAGE_ARMFW_RDY) { 411 if (msleep_interruptible(2000)) { 412 dev_err(dev, "Waiting for POST aborted\n"); 413 return -EINTR; 414 } 415 timeout += 2; 416 } else { 417 return 0; 418 } 419 } while (timeout < 60); 420 421 dev_err(dev, "POST timeout; stage=0x%x\n", stage); 422 return -1; 423 } 424 425 426 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) 427 { 428 return &wrb->payload.sgl[0]; 429 } 430 431 /* Don't touch the hdr after it's prepared */ 432 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, 433 bool embedded, u8 sge_cnt, u32 opcode) 434 { 435 if (embedded) 436 wrb->embedded |= MCC_WRB_EMBEDDED_MASK; 437 else 438 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) << 439 MCC_WRB_SGE_CNT_SHIFT; 440 wrb->payload_length = payload_len; 441 wrb->tag0 = opcode; 442 be_dws_cpu_to_le(wrb, 8); 443 } 444 445 /* Don't touch the hdr after it's prepared */ 446 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 447 u8 subsystem, u8 opcode, int cmd_len) 448 { 449 req_hdr->opcode = opcode; 450 req_hdr->subsystem = subsystem; 451 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 452 req_hdr->version = 0; 453 } 454 455 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 456 struct be_dma_mem *mem) 457 { 458 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); 459 u64 dma = (u64)mem->dma; 460 461 for (i = 0; i < buf_pages; i++) { 462 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); 463 pages[i].hi = cpu_to_le32(upper_32_bits(dma)); 464 dma += PAGE_SIZE_4K; 465 } 466 } 467 468 /* Converts interrupt delay in microseconds to multiplier value */ 469 static u32 eq_delay_to_mult(u32 usec_delay) 470 { 471 #define MAX_INTR_RATE 651042 472 const u32 round = 10; 473 u32 multiplier; 474 475 if (usec_delay == 0) 476 multiplier = 0; 477 else { 478 u32 interrupt_rate = 1000000 / usec_delay; 479 /* Max delay, corresponding to the lowest interrupt rate */ 480 if (interrupt_rate == 0) 481 multiplier = 1023; 482 else { 483 multiplier = (MAX_INTR_RATE - interrupt_rate) * round; 484 multiplier /= interrupt_rate; 485 /* Round the multiplier to the closest value.*/ 486 multiplier = (multiplier + round/2) / round; 487 multiplier = min(multiplier, (u32)1023); 488 } 489 } 490 return multiplier; 491 } 492 493 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) 494 { 495 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 496 struct be_mcc_wrb *wrb 497 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; 498 memset(wrb, 0, sizeof(*wrb)); 499 return wrb; 500 } 501 502 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) 503 { 504 struct be_queue_info *mccq = &adapter->mcc_obj.q; 505 struct be_mcc_wrb *wrb; 506 507 if (atomic_read(&mccq->used) >= mccq->len) { 508 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n"); 509 return NULL; 510 } 511 512 wrb = queue_head_node(mccq); 513 queue_head_inc(mccq); 514 atomic_inc(&mccq->used); 515 memset(wrb, 0, sizeof(*wrb)); 516 return wrb; 517 } 518 519 /* Tell fw we're about to start firing cmds by writing a 520 * special pattern across the wrb hdr; uses mbox 521 */ 522 int be_cmd_fw_init(struct be_adapter *adapter) 523 { 524 u8 *wrb; 525 int status; 526 527 if (mutex_lock_interruptible(&adapter->mbox_lock)) 528 return -1; 529 530 wrb = (u8 *)wrb_from_mbox(adapter); 531 *wrb++ = 0xFF; 532 *wrb++ = 0x12; 533 *wrb++ = 0x34; 534 *wrb++ = 0xFF; 535 *wrb++ = 0xFF; 536 *wrb++ = 0x56; 537 *wrb++ = 0x78; 538 *wrb = 0xFF; 539 540 status = be_mbox_notify_wait(adapter); 541 542 mutex_unlock(&adapter->mbox_lock); 543 return status; 544 } 545 546 /* Tell fw we're done with firing cmds by writing a 547 * special pattern across the wrb hdr; uses mbox 548 */ 549 int be_cmd_fw_clean(struct be_adapter *adapter) 550 { 551 u8 *wrb; 552 int status; 553 554 if (adapter->eeh_err) 555 return -EIO; 556 557 if (mutex_lock_interruptible(&adapter->mbox_lock)) 558 return -1; 559 560 wrb = (u8 *)wrb_from_mbox(adapter); 561 *wrb++ = 0xFF; 562 *wrb++ = 0xAA; 563 *wrb++ = 0xBB; 564 *wrb++ = 0xFF; 565 *wrb++ = 0xFF; 566 *wrb++ = 0xCC; 567 *wrb++ = 0xDD; 568 *wrb = 0xFF; 569 570 status = be_mbox_notify_wait(adapter); 571 572 mutex_unlock(&adapter->mbox_lock); 573 return status; 574 } 575 int be_cmd_eq_create(struct be_adapter *adapter, 576 struct be_queue_info *eq, int eq_delay) 577 { 578 struct be_mcc_wrb *wrb; 579 struct be_cmd_req_eq_create *req; 580 struct be_dma_mem *q_mem = &eq->dma_mem; 581 int status; 582 583 if (mutex_lock_interruptible(&adapter->mbox_lock)) 584 return -1; 585 586 wrb = wrb_from_mbox(adapter); 587 req = embedded_payload(wrb); 588 589 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE); 590 591 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 592 OPCODE_COMMON_EQ_CREATE, sizeof(*req)); 593 594 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 595 596 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); 597 /* 4byte eqe*/ 598 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); 599 AMAP_SET_BITS(struct amap_eq_context, count, req->context, 600 __ilog2_u32(eq->len/256)); 601 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, 602 eq_delay_to_mult(eq_delay)); 603 be_dws_cpu_to_le(req->context, sizeof(req->context)); 604 605 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 606 607 status = be_mbox_notify_wait(adapter); 608 if (!status) { 609 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); 610 eq->id = le16_to_cpu(resp->eq_id); 611 eq->created = true; 612 } 613 614 mutex_unlock(&adapter->mbox_lock); 615 return status; 616 } 617 618 /* Use MCC */ 619 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 620 u8 type, bool permanent, u32 if_handle) 621 { 622 struct be_mcc_wrb *wrb; 623 struct be_cmd_req_mac_query *req; 624 int status; 625 626 spin_lock_bh(&adapter->mcc_lock); 627 628 wrb = wrb_from_mccq(adapter); 629 if (!wrb) { 630 status = -EBUSY; 631 goto err; 632 } 633 req = embedded_payload(wrb); 634 635 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 636 OPCODE_COMMON_NTWK_MAC_QUERY); 637 638 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 639 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req)); 640 641 req->type = type; 642 if (permanent) { 643 req->permanent = 1; 644 } else { 645 req->if_id = cpu_to_le16((u16) if_handle); 646 req->permanent = 0; 647 } 648 649 status = be_mcc_notify_wait(adapter); 650 if (!status) { 651 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); 652 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 653 } 654 655 err: 656 spin_unlock_bh(&adapter->mcc_lock); 657 return status; 658 } 659 660 /* Uses synchronous MCCQ */ 661 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 662 u32 if_id, u32 *pmac_id, u32 domain) 663 { 664 struct be_mcc_wrb *wrb; 665 struct be_cmd_req_pmac_add *req; 666 int status; 667 668 spin_lock_bh(&adapter->mcc_lock); 669 670 wrb = wrb_from_mccq(adapter); 671 if (!wrb) { 672 status = -EBUSY; 673 goto err; 674 } 675 req = embedded_payload(wrb); 676 677 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 678 OPCODE_COMMON_NTWK_PMAC_ADD); 679 680 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 681 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); 682 683 req->hdr.domain = domain; 684 req->if_id = cpu_to_le32(if_id); 685 memcpy(req->mac_address, mac_addr, ETH_ALEN); 686 687 status = be_mcc_notify_wait(adapter); 688 if (!status) { 689 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); 690 *pmac_id = le32_to_cpu(resp->pmac_id); 691 } 692 693 err: 694 spin_unlock_bh(&adapter->mcc_lock); 695 return status; 696 } 697 698 /* Uses synchronous MCCQ */ 699 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom) 700 { 701 struct be_mcc_wrb *wrb; 702 struct be_cmd_req_pmac_del *req; 703 int status; 704 705 spin_lock_bh(&adapter->mcc_lock); 706 707 wrb = wrb_from_mccq(adapter); 708 if (!wrb) { 709 status = -EBUSY; 710 goto err; 711 } 712 req = embedded_payload(wrb); 713 714 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 715 OPCODE_COMMON_NTWK_PMAC_DEL); 716 717 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 718 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); 719 720 req->hdr.domain = dom; 721 req->if_id = cpu_to_le32(if_id); 722 req->pmac_id = cpu_to_le32(pmac_id); 723 724 status = be_mcc_notify_wait(adapter); 725 726 err: 727 spin_unlock_bh(&adapter->mcc_lock); 728 return status; 729 } 730 731 /* Uses Mbox */ 732 int be_cmd_cq_create(struct be_adapter *adapter, 733 struct be_queue_info *cq, struct be_queue_info *eq, 734 bool sol_evts, bool no_delay, int coalesce_wm) 735 { 736 struct be_mcc_wrb *wrb; 737 struct be_cmd_req_cq_create *req; 738 struct be_dma_mem *q_mem = &cq->dma_mem; 739 void *ctxt; 740 int status; 741 742 if (mutex_lock_interruptible(&adapter->mbox_lock)) 743 return -1; 744 745 wrb = wrb_from_mbox(adapter); 746 req = embedded_payload(wrb); 747 ctxt = &req->context; 748 749 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 750 OPCODE_COMMON_CQ_CREATE); 751 752 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 753 OPCODE_COMMON_CQ_CREATE, sizeof(*req)); 754 755 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 756 if (lancer_chip(adapter)) { 757 req->hdr.version = 2; 758 req->page_size = 1; /* 1 for 4K */ 759 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt, 760 no_delay); 761 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt, 762 __ilog2_u32(cq->len/256)); 763 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1); 764 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable, 765 ctxt, 1); 766 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid, 767 ctxt, eq->id); 768 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1); 769 } else { 770 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 771 coalesce_wm); 772 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 773 ctxt, no_delay); 774 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, 775 __ilog2_u32(cq->len/256)); 776 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 777 AMAP_SET_BITS(struct amap_cq_context_be, solevent, 778 ctxt, sol_evts); 779 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 780 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); 781 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1); 782 } 783 784 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 785 786 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 787 788 status = be_mbox_notify_wait(adapter); 789 if (!status) { 790 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); 791 cq->id = le16_to_cpu(resp->cq_id); 792 cq->created = true; 793 } 794 795 mutex_unlock(&adapter->mbox_lock); 796 797 return status; 798 } 799 800 static u32 be_encoded_q_len(int q_len) 801 { 802 u32 len_encoded = fls(q_len); /* log2(len) + 1 */ 803 if (len_encoded == 16) 804 len_encoded = 0; 805 return len_encoded; 806 } 807 808 int be_cmd_mccq_ext_create(struct be_adapter *adapter, 809 struct be_queue_info *mccq, 810 struct be_queue_info *cq) 811 { 812 struct be_mcc_wrb *wrb; 813 struct be_cmd_req_mcc_ext_create *req; 814 struct be_dma_mem *q_mem = &mccq->dma_mem; 815 void *ctxt; 816 int status; 817 818 if (mutex_lock_interruptible(&adapter->mbox_lock)) 819 return -1; 820 821 wrb = wrb_from_mbox(adapter); 822 req = embedded_payload(wrb); 823 ctxt = &req->context; 824 825 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 826 OPCODE_COMMON_MCC_CREATE_EXT); 827 828 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 829 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req)); 830 831 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 832 if (lancer_chip(adapter)) { 833 req->hdr.version = 1; 834 req->cq_id = cpu_to_le16(cq->id); 835 836 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt, 837 be_encoded_q_len(mccq->len)); 838 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1); 839 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id, 840 ctxt, cq->id); 841 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid, 842 ctxt, 1); 843 844 } else { 845 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 846 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 847 be_encoded_q_len(mccq->len)); 848 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 849 } 850 851 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ 852 req->async_event_bitmap[0] = cpu_to_le32(0x00000022); 853 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 854 855 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 856 857 status = be_mbox_notify_wait(adapter); 858 if (!status) { 859 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 860 mccq->id = le16_to_cpu(resp->id); 861 mccq->created = true; 862 } 863 mutex_unlock(&adapter->mbox_lock); 864 865 return status; 866 } 867 868 int be_cmd_mccq_org_create(struct be_adapter *adapter, 869 struct be_queue_info *mccq, 870 struct be_queue_info *cq) 871 { 872 struct be_mcc_wrb *wrb; 873 struct be_cmd_req_mcc_create *req; 874 struct be_dma_mem *q_mem = &mccq->dma_mem; 875 void *ctxt; 876 int status; 877 878 if (mutex_lock_interruptible(&adapter->mbox_lock)) 879 return -1; 880 881 wrb = wrb_from_mbox(adapter); 882 req = embedded_payload(wrb); 883 ctxt = &req->context; 884 885 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 886 OPCODE_COMMON_MCC_CREATE); 887 888 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 889 OPCODE_COMMON_MCC_CREATE, sizeof(*req)); 890 891 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 892 893 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 894 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 895 be_encoded_q_len(mccq->len)); 896 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 897 898 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 899 900 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 901 902 status = be_mbox_notify_wait(adapter); 903 if (!status) { 904 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 905 mccq->id = le16_to_cpu(resp->id); 906 mccq->created = true; 907 } 908 909 mutex_unlock(&adapter->mbox_lock); 910 return status; 911 } 912 913 int be_cmd_mccq_create(struct be_adapter *adapter, 914 struct be_queue_info *mccq, 915 struct be_queue_info *cq) 916 { 917 int status; 918 919 status = be_cmd_mccq_ext_create(adapter, mccq, cq); 920 if (status && !lancer_chip(adapter)) { 921 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " 922 "or newer to avoid conflicting priorities between NIC " 923 "and FCoE traffic"); 924 status = be_cmd_mccq_org_create(adapter, mccq, cq); 925 } 926 return status; 927 } 928 929 int be_cmd_txq_create(struct be_adapter *adapter, 930 struct be_queue_info *txq, 931 struct be_queue_info *cq) 932 { 933 struct be_mcc_wrb *wrb; 934 struct be_cmd_req_eth_tx_create *req; 935 struct be_dma_mem *q_mem = &txq->dma_mem; 936 void *ctxt; 937 int status; 938 939 if (mutex_lock_interruptible(&adapter->mbox_lock)) 940 return -1; 941 942 wrb = wrb_from_mbox(adapter); 943 req = embedded_payload(wrb); 944 ctxt = &req->context; 945 946 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 947 OPCODE_ETH_TX_CREATE); 948 949 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, 950 sizeof(*req)); 951 952 if (lancer_chip(adapter)) { 953 req->hdr.version = 1; 954 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt, 955 adapter->if_handle); 956 } 957 958 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 959 req->ulp_num = BE_ULP1_NUM; 960 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 961 962 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, 963 be_encoded_q_len(txq->len)); 964 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1); 965 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id); 966 967 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 968 969 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 970 971 status = be_mbox_notify_wait(adapter); 972 if (!status) { 973 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); 974 txq->id = le16_to_cpu(resp->cid); 975 txq->created = true; 976 } 977 978 mutex_unlock(&adapter->mbox_lock); 979 980 return status; 981 } 982 983 /* Uses MCC */ 984 int be_cmd_rxq_create(struct be_adapter *adapter, 985 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 986 u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id) 987 { 988 struct be_mcc_wrb *wrb; 989 struct be_cmd_req_eth_rx_create *req; 990 struct be_dma_mem *q_mem = &rxq->dma_mem; 991 int status; 992 993 spin_lock_bh(&adapter->mcc_lock); 994 995 wrb = wrb_from_mccq(adapter); 996 if (!wrb) { 997 status = -EBUSY; 998 goto err; 999 } 1000 req = embedded_payload(wrb); 1001 1002 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1003 OPCODE_ETH_RX_CREATE); 1004 1005 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE, 1006 sizeof(*req)); 1007 1008 req->cq_id = cpu_to_le16(cq_id); 1009 req->frag_size = fls(frag_size) - 1; 1010 req->num_pages = 2; 1011 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1012 req->interface_id = cpu_to_le32(if_id); 1013 req->max_frame_size = cpu_to_le16(max_frame_size); 1014 req->rss_queue = cpu_to_le32(rss); 1015 1016 status = be_mcc_notify_wait(adapter); 1017 if (!status) { 1018 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); 1019 rxq->id = le16_to_cpu(resp->id); 1020 rxq->created = true; 1021 *rss_id = resp->rss_id; 1022 } 1023 1024 err: 1025 spin_unlock_bh(&adapter->mcc_lock); 1026 return status; 1027 } 1028 1029 /* Generic destroyer function for all types of queues 1030 * Uses Mbox 1031 */ 1032 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1033 int queue_type) 1034 { 1035 struct be_mcc_wrb *wrb; 1036 struct be_cmd_req_q_destroy *req; 1037 u8 subsys = 0, opcode = 0; 1038 int status; 1039 1040 if (adapter->eeh_err) 1041 return -EIO; 1042 1043 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1044 return -1; 1045 1046 wrb = wrb_from_mbox(adapter); 1047 req = embedded_payload(wrb); 1048 1049 switch (queue_type) { 1050 case QTYPE_EQ: 1051 subsys = CMD_SUBSYSTEM_COMMON; 1052 opcode = OPCODE_COMMON_EQ_DESTROY; 1053 break; 1054 case QTYPE_CQ: 1055 subsys = CMD_SUBSYSTEM_COMMON; 1056 opcode = OPCODE_COMMON_CQ_DESTROY; 1057 break; 1058 case QTYPE_TXQ: 1059 subsys = CMD_SUBSYSTEM_ETH; 1060 opcode = OPCODE_ETH_TX_DESTROY; 1061 break; 1062 case QTYPE_RXQ: 1063 subsys = CMD_SUBSYSTEM_ETH; 1064 opcode = OPCODE_ETH_RX_DESTROY; 1065 break; 1066 case QTYPE_MCCQ: 1067 subsys = CMD_SUBSYSTEM_COMMON; 1068 opcode = OPCODE_COMMON_MCC_DESTROY; 1069 break; 1070 default: 1071 BUG(); 1072 } 1073 1074 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode); 1075 1076 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req)); 1077 req->id = cpu_to_le16(q->id); 1078 1079 status = be_mbox_notify_wait(adapter); 1080 if (!status) 1081 q->created = false; 1082 1083 mutex_unlock(&adapter->mbox_lock); 1084 return status; 1085 } 1086 1087 /* Uses MCC */ 1088 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) 1089 { 1090 struct be_mcc_wrb *wrb; 1091 struct be_cmd_req_q_destroy *req; 1092 int status; 1093 1094 spin_lock_bh(&adapter->mcc_lock); 1095 1096 wrb = wrb_from_mccq(adapter); 1097 if (!wrb) { 1098 status = -EBUSY; 1099 goto err; 1100 } 1101 req = embedded_payload(wrb); 1102 1103 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY); 1104 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY, 1105 sizeof(*req)); 1106 req->id = cpu_to_le16(q->id); 1107 1108 status = be_mcc_notify_wait(adapter); 1109 if (!status) 1110 q->created = false; 1111 1112 err: 1113 spin_unlock_bh(&adapter->mcc_lock); 1114 return status; 1115 } 1116 1117 /* Create an rx filtering policy configuration on an i/f 1118 * Uses MCCQ 1119 */ 1120 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 1121 u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain) 1122 { 1123 struct be_mcc_wrb *wrb; 1124 struct be_cmd_req_if_create *req; 1125 int status; 1126 1127 spin_lock_bh(&adapter->mcc_lock); 1128 1129 wrb = wrb_from_mccq(adapter); 1130 if (!wrb) { 1131 status = -EBUSY; 1132 goto err; 1133 } 1134 req = embedded_payload(wrb); 1135 1136 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1137 OPCODE_COMMON_NTWK_INTERFACE_CREATE); 1138 1139 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1140 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); 1141 1142 req->hdr.domain = domain; 1143 req->capability_flags = cpu_to_le32(cap_flags); 1144 req->enable_flags = cpu_to_le32(en_flags); 1145 if (mac) 1146 memcpy(req->mac_addr, mac, ETH_ALEN); 1147 else 1148 req->pmac_invalid = true; 1149 1150 status = be_mcc_notify_wait(adapter); 1151 if (!status) { 1152 struct be_cmd_resp_if_create *resp = embedded_payload(wrb); 1153 *if_handle = le32_to_cpu(resp->interface_id); 1154 if (mac) 1155 *pmac_id = le32_to_cpu(resp->pmac_id); 1156 } 1157 1158 err: 1159 spin_unlock_bh(&adapter->mcc_lock); 1160 return status; 1161 } 1162 1163 /* Uses MCCQ */ 1164 int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain) 1165 { 1166 struct be_mcc_wrb *wrb; 1167 struct be_cmd_req_if_destroy *req; 1168 int status; 1169 1170 if (adapter->eeh_err) 1171 return -EIO; 1172 1173 if (!interface_id) 1174 return 0; 1175 1176 spin_lock_bh(&adapter->mcc_lock); 1177 1178 wrb = wrb_from_mccq(adapter); 1179 if (!wrb) { 1180 status = -EBUSY; 1181 goto err; 1182 } 1183 req = embedded_payload(wrb); 1184 1185 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1186 OPCODE_COMMON_NTWK_INTERFACE_DESTROY); 1187 1188 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1189 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); 1190 1191 req->hdr.domain = domain; 1192 req->interface_id = cpu_to_le32(interface_id); 1193 1194 status = be_mcc_notify_wait(adapter); 1195 err: 1196 spin_unlock_bh(&adapter->mcc_lock); 1197 return status; 1198 } 1199 1200 /* Get stats is a non embedded command: the request is not embedded inside 1201 * WRB but is a separate dma memory block 1202 * Uses asynchronous MCC 1203 */ 1204 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) 1205 { 1206 struct be_mcc_wrb *wrb; 1207 struct be_cmd_req_hdr *hdr; 1208 struct be_sge *sge; 1209 int status = 0; 1210 1211 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0) 1212 be_cmd_get_die_temperature(adapter); 1213 1214 spin_lock_bh(&adapter->mcc_lock); 1215 1216 wrb = wrb_from_mccq(adapter); 1217 if (!wrb) { 1218 status = -EBUSY; 1219 goto err; 1220 } 1221 hdr = nonemb_cmd->va; 1222 sge = nonembedded_sgl(wrb); 1223 1224 be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1, 1225 OPCODE_ETH_GET_STATISTICS); 1226 1227 be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1228 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size); 1229 1230 if (adapter->generation == BE_GEN3) 1231 hdr->version = 1; 1232 1233 wrb->tag1 = CMD_SUBSYSTEM_ETH; 1234 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); 1235 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); 1236 sge->len = cpu_to_le32(nonemb_cmd->size); 1237 1238 be_mcc_notify(adapter); 1239 adapter->stats_cmd_sent = true; 1240 1241 err: 1242 spin_unlock_bh(&adapter->mcc_lock); 1243 return status; 1244 } 1245 1246 /* Lancer Stats */ 1247 int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1248 struct be_dma_mem *nonemb_cmd) 1249 { 1250 1251 struct be_mcc_wrb *wrb; 1252 struct lancer_cmd_req_pport_stats *req; 1253 struct be_sge *sge; 1254 int status = 0; 1255 1256 spin_lock_bh(&adapter->mcc_lock); 1257 1258 wrb = wrb_from_mccq(adapter); 1259 if (!wrb) { 1260 status = -EBUSY; 1261 goto err; 1262 } 1263 req = nonemb_cmd->va; 1264 sge = nonembedded_sgl(wrb); 1265 1266 be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1, 1267 OPCODE_ETH_GET_PPORT_STATS); 1268 1269 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1270 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size); 1271 1272 1273 req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num); 1274 req->cmd_params.params.reset_stats = 0; 1275 1276 wrb->tag1 = CMD_SUBSYSTEM_ETH; 1277 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); 1278 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); 1279 sge->len = cpu_to_le32(nonemb_cmd->size); 1280 1281 be_mcc_notify(adapter); 1282 adapter->stats_cmd_sent = true; 1283 1284 err: 1285 spin_unlock_bh(&adapter->mcc_lock); 1286 return status; 1287 } 1288 1289 /* Uses synchronous mcc */ 1290 int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, 1291 u16 *link_speed, u32 dom) 1292 { 1293 struct be_mcc_wrb *wrb; 1294 struct be_cmd_req_link_status *req; 1295 int status; 1296 1297 spin_lock_bh(&adapter->mcc_lock); 1298 1299 wrb = wrb_from_mccq(adapter); 1300 if (!wrb) { 1301 status = -EBUSY; 1302 goto err; 1303 } 1304 req = embedded_payload(wrb); 1305 1306 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1307 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY); 1308 1309 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1310 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req)); 1311 1312 status = be_mcc_notify_wait(adapter); 1313 if (!status) { 1314 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 1315 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) { 1316 *link_speed = le16_to_cpu(resp->link_speed); 1317 if (mac_speed) 1318 *mac_speed = resp->mac_speed; 1319 } 1320 } 1321 1322 err: 1323 spin_unlock_bh(&adapter->mcc_lock); 1324 return status; 1325 } 1326 1327 /* Uses synchronous mcc */ 1328 int be_cmd_get_die_temperature(struct be_adapter *adapter) 1329 { 1330 struct be_mcc_wrb *wrb; 1331 struct be_cmd_req_get_cntl_addnl_attribs *req; 1332 u16 mccq_index; 1333 int status; 1334 1335 spin_lock_bh(&adapter->mcc_lock); 1336 1337 mccq_index = adapter->mcc_obj.q.head; 1338 1339 wrb = wrb_from_mccq(adapter); 1340 if (!wrb) { 1341 status = -EBUSY; 1342 goto err; 1343 } 1344 req = embedded_payload(wrb); 1345 1346 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1347 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES); 1348 1349 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1350 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req)); 1351 1352 wrb->tag1 = mccq_index; 1353 1354 be_mcc_notify(adapter); 1355 1356 err: 1357 spin_unlock_bh(&adapter->mcc_lock); 1358 return status; 1359 } 1360 1361 /* Uses synchronous mcc */ 1362 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) 1363 { 1364 struct be_mcc_wrb *wrb; 1365 struct be_cmd_req_get_fat *req; 1366 int status; 1367 1368 spin_lock_bh(&adapter->mcc_lock); 1369 1370 wrb = wrb_from_mccq(adapter); 1371 if (!wrb) { 1372 status = -EBUSY; 1373 goto err; 1374 } 1375 req = embedded_payload(wrb); 1376 1377 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1378 OPCODE_COMMON_MANAGE_FAT); 1379 1380 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1381 OPCODE_COMMON_MANAGE_FAT, sizeof(*req)); 1382 req->fat_operation = cpu_to_le32(QUERY_FAT); 1383 status = be_mcc_notify_wait(adapter); 1384 if (!status) { 1385 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); 1386 if (log_size && resp->log_size) 1387 *log_size = le32_to_cpu(resp->log_size) - 1388 sizeof(u32); 1389 } 1390 err: 1391 spin_unlock_bh(&adapter->mcc_lock); 1392 return status; 1393 } 1394 1395 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) 1396 { 1397 struct be_dma_mem get_fat_cmd; 1398 struct be_mcc_wrb *wrb; 1399 struct be_cmd_req_get_fat *req; 1400 struct be_sge *sge; 1401 u32 offset = 0, total_size, buf_size, 1402 log_offset = sizeof(u32), payload_len; 1403 int status; 1404 1405 if (buf_len == 0) 1406 return; 1407 1408 total_size = buf_len; 1409 1410 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1411 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, 1412 get_fat_cmd.size, 1413 &get_fat_cmd.dma); 1414 if (!get_fat_cmd.va) { 1415 status = -ENOMEM; 1416 dev_err(&adapter->pdev->dev, 1417 "Memory allocation failure while retrieving FAT data\n"); 1418 return; 1419 } 1420 1421 spin_lock_bh(&adapter->mcc_lock); 1422 1423 while (total_size) { 1424 buf_size = min(total_size, (u32)60*1024); 1425 total_size -= buf_size; 1426 1427 wrb = wrb_from_mccq(adapter); 1428 if (!wrb) { 1429 status = -EBUSY; 1430 goto err; 1431 } 1432 req = get_fat_cmd.va; 1433 sge = nonembedded_sgl(wrb); 1434 1435 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; 1436 be_wrb_hdr_prepare(wrb, payload_len, false, 1, 1437 OPCODE_COMMON_MANAGE_FAT); 1438 1439 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1440 OPCODE_COMMON_MANAGE_FAT, payload_len); 1441 1442 sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma)); 1443 sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF); 1444 sge->len = cpu_to_le32(get_fat_cmd.size); 1445 1446 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); 1447 req->read_log_offset = cpu_to_le32(log_offset); 1448 req->read_log_length = cpu_to_le32(buf_size); 1449 req->data_buffer_size = cpu_to_le32(buf_size); 1450 1451 status = be_mcc_notify_wait(adapter); 1452 if (!status) { 1453 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1454 memcpy(buf + offset, 1455 resp->data_buffer, 1456 le32_to_cpu(resp->read_log_length)); 1457 } else { 1458 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); 1459 goto err; 1460 } 1461 offset += buf_size; 1462 log_offset += buf_size; 1463 } 1464 err: 1465 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1466 get_fat_cmd.va, 1467 get_fat_cmd.dma); 1468 spin_unlock_bh(&adapter->mcc_lock); 1469 } 1470 1471 /* Uses synchronous mcc */ 1472 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, 1473 char *fw_on_flash) 1474 { 1475 struct be_mcc_wrb *wrb; 1476 struct be_cmd_req_get_fw_version *req; 1477 int status; 1478 1479 spin_lock_bh(&adapter->mcc_lock); 1480 1481 wrb = wrb_from_mccq(adapter); 1482 if (!wrb) { 1483 status = -EBUSY; 1484 goto err; 1485 } 1486 1487 req = embedded_payload(wrb); 1488 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1489 OPCODE_COMMON_GET_FW_VERSION); 1490 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1491 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req)); 1492 1493 status = be_mcc_notify_wait(adapter); 1494 if (!status) { 1495 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); 1496 strcpy(fw_ver, resp->firmware_version_string); 1497 if (fw_on_flash) 1498 strcpy(fw_on_flash, resp->fw_on_flash_version_string); 1499 } 1500 err: 1501 spin_unlock_bh(&adapter->mcc_lock); 1502 return status; 1503 } 1504 1505 /* set the EQ delay interval of an EQ to specified value 1506 * Uses async mcc 1507 */ 1508 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) 1509 { 1510 struct be_mcc_wrb *wrb; 1511 struct be_cmd_req_modify_eq_delay *req; 1512 int status = 0; 1513 1514 spin_lock_bh(&adapter->mcc_lock); 1515 1516 wrb = wrb_from_mccq(adapter); 1517 if (!wrb) { 1518 status = -EBUSY; 1519 goto err; 1520 } 1521 req = embedded_payload(wrb); 1522 1523 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1524 OPCODE_COMMON_MODIFY_EQ_DELAY); 1525 1526 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1527 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); 1528 1529 req->num_eq = cpu_to_le32(1); 1530 req->delay[0].eq_id = cpu_to_le32(eq_id); 1531 req->delay[0].phase = 0; 1532 req->delay[0].delay_multiplier = cpu_to_le32(eqd); 1533 1534 be_mcc_notify(adapter); 1535 1536 err: 1537 spin_unlock_bh(&adapter->mcc_lock); 1538 return status; 1539 } 1540 1541 /* Uses sycnhronous mcc */ 1542 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1543 u32 num, bool untagged, bool promiscuous) 1544 { 1545 struct be_mcc_wrb *wrb; 1546 struct be_cmd_req_vlan_config *req; 1547 int status; 1548 1549 spin_lock_bh(&adapter->mcc_lock); 1550 1551 wrb = wrb_from_mccq(adapter); 1552 if (!wrb) { 1553 status = -EBUSY; 1554 goto err; 1555 } 1556 req = embedded_payload(wrb); 1557 1558 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1559 OPCODE_COMMON_NTWK_VLAN_CONFIG); 1560 1561 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1562 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req)); 1563 1564 req->interface_id = if_id; 1565 req->promiscuous = promiscuous; 1566 req->untagged = untagged; 1567 req->num_vlan = num; 1568 if (!promiscuous) { 1569 memcpy(req->normal_vlan, vtag_array, 1570 req->num_vlan * sizeof(vtag_array[0])); 1571 } 1572 1573 status = be_mcc_notify_wait(adapter); 1574 1575 err: 1576 spin_unlock_bh(&adapter->mcc_lock); 1577 return status; 1578 } 1579 1580 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1581 { 1582 struct be_mcc_wrb *wrb; 1583 struct be_dma_mem *mem = &adapter->rx_filter; 1584 struct be_cmd_req_rx_filter *req = mem->va; 1585 struct be_sge *sge; 1586 int status; 1587 1588 spin_lock_bh(&adapter->mcc_lock); 1589 1590 wrb = wrb_from_mccq(adapter); 1591 if (!wrb) { 1592 status = -EBUSY; 1593 goto err; 1594 } 1595 sge = nonembedded_sgl(wrb); 1596 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); 1597 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); 1598 sge->len = cpu_to_le32(mem->size); 1599 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, 1600 OPCODE_COMMON_NTWK_RX_FILTER); 1601 1602 memset(req, 0, sizeof(*req)); 1603 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1604 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req)); 1605 1606 req->if_id = cpu_to_le32(adapter->if_handle); 1607 if (flags & IFF_PROMISC) { 1608 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1609 BE_IF_FLAGS_VLAN_PROMISCUOUS); 1610 if (value == ON) 1611 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1612 BE_IF_FLAGS_VLAN_PROMISCUOUS); 1613 } else if (flags & IFF_ALLMULTI) { 1614 req->if_flags_mask = req->if_flags = 1615 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1616 } else { 1617 struct netdev_hw_addr *ha; 1618 int i = 0; 1619 1620 req->if_flags_mask = req->if_flags = 1621 cpu_to_le32(BE_IF_FLAGS_MULTICAST); 1622 req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev)); 1623 netdev_for_each_mc_addr(ha, adapter->netdev) 1624 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1625 } 1626 1627 status = be_mcc_notify_wait(adapter); 1628 err: 1629 spin_unlock_bh(&adapter->mcc_lock); 1630 return status; 1631 } 1632 1633 /* Uses synchrounous mcc */ 1634 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) 1635 { 1636 struct be_mcc_wrb *wrb; 1637 struct be_cmd_req_set_flow_control *req; 1638 int status; 1639 1640 spin_lock_bh(&adapter->mcc_lock); 1641 1642 wrb = wrb_from_mccq(adapter); 1643 if (!wrb) { 1644 status = -EBUSY; 1645 goto err; 1646 } 1647 req = embedded_payload(wrb); 1648 1649 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1650 OPCODE_COMMON_SET_FLOW_CONTROL); 1651 1652 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1653 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req)); 1654 1655 req->tx_flow_control = cpu_to_le16((u16)tx_fc); 1656 req->rx_flow_control = cpu_to_le16((u16)rx_fc); 1657 1658 status = be_mcc_notify_wait(adapter); 1659 1660 err: 1661 spin_unlock_bh(&adapter->mcc_lock); 1662 return status; 1663 } 1664 1665 /* Uses sycn mcc */ 1666 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) 1667 { 1668 struct be_mcc_wrb *wrb; 1669 struct be_cmd_req_get_flow_control *req; 1670 int status; 1671 1672 spin_lock_bh(&adapter->mcc_lock); 1673 1674 wrb = wrb_from_mccq(adapter); 1675 if (!wrb) { 1676 status = -EBUSY; 1677 goto err; 1678 } 1679 req = embedded_payload(wrb); 1680 1681 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1682 OPCODE_COMMON_GET_FLOW_CONTROL); 1683 1684 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1685 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req)); 1686 1687 status = be_mcc_notify_wait(adapter); 1688 if (!status) { 1689 struct be_cmd_resp_get_flow_control *resp = 1690 embedded_payload(wrb); 1691 *tx_fc = le16_to_cpu(resp->tx_flow_control); 1692 *rx_fc = le16_to_cpu(resp->rx_flow_control); 1693 } 1694 1695 err: 1696 spin_unlock_bh(&adapter->mcc_lock); 1697 return status; 1698 } 1699 1700 /* Uses mbox */ 1701 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, 1702 u32 *mode, u32 *caps) 1703 { 1704 struct be_mcc_wrb *wrb; 1705 struct be_cmd_req_query_fw_cfg *req; 1706 int status; 1707 1708 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1709 return -1; 1710 1711 wrb = wrb_from_mbox(adapter); 1712 req = embedded_payload(wrb); 1713 1714 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1715 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG); 1716 1717 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1718 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); 1719 1720 status = be_mbox_notify_wait(adapter); 1721 if (!status) { 1722 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 1723 *port_num = le32_to_cpu(resp->phys_port); 1724 *mode = le32_to_cpu(resp->function_mode); 1725 *caps = le32_to_cpu(resp->function_caps); 1726 } 1727 1728 mutex_unlock(&adapter->mbox_lock); 1729 return status; 1730 } 1731 1732 /* Uses mbox */ 1733 int be_cmd_reset_function(struct be_adapter *adapter) 1734 { 1735 struct be_mcc_wrb *wrb; 1736 struct be_cmd_req_hdr *req; 1737 int status; 1738 1739 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1740 return -1; 1741 1742 wrb = wrb_from_mbox(adapter); 1743 req = embedded_payload(wrb); 1744 1745 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1746 OPCODE_COMMON_FUNCTION_RESET); 1747 1748 be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, 1749 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req)); 1750 1751 status = be_mbox_notify_wait(adapter); 1752 1753 mutex_unlock(&adapter->mbox_lock); 1754 return status; 1755 } 1756 1757 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size) 1758 { 1759 struct be_mcc_wrb *wrb; 1760 struct be_cmd_req_rss_config *req; 1761 u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF, 1762 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF}; 1763 int status; 1764 1765 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1766 return -1; 1767 1768 wrb = wrb_from_mbox(adapter); 1769 req = embedded_payload(wrb); 1770 1771 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1772 OPCODE_ETH_RSS_CONFIG); 1773 1774 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1775 OPCODE_ETH_RSS_CONFIG, sizeof(*req)); 1776 1777 req->if_id = cpu_to_le32(adapter->if_handle); 1778 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4); 1779 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 1780 memcpy(req->cpu_table, rsstable, table_size); 1781 memcpy(req->hash, myhash, sizeof(myhash)); 1782 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 1783 1784 status = be_mbox_notify_wait(adapter); 1785 1786 mutex_unlock(&adapter->mbox_lock); 1787 return status; 1788 } 1789 1790 /* Uses sync mcc */ 1791 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 1792 u8 bcn, u8 sts, u8 state) 1793 { 1794 struct be_mcc_wrb *wrb; 1795 struct be_cmd_req_enable_disable_beacon *req; 1796 int status; 1797 1798 spin_lock_bh(&adapter->mcc_lock); 1799 1800 wrb = wrb_from_mccq(adapter); 1801 if (!wrb) { 1802 status = -EBUSY; 1803 goto err; 1804 } 1805 req = embedded_payload(wrb); 1806 1807 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1808 OPCODE_COMMON_ENABLE_DISABLE_BEACON); 1809 1810 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1811 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req)); 1812 1813 req->port_num = port_num; 1814 req->beacon_state = state; 1815 req->beacon_duration = bcn; 1816 req->status_duration = sts; 1817 1818 status = be_mcc_notify_wait(adapter); 1819 1820 err: 1821 spin_unlock_bh(&adapter->mcc_lock); 1822 return status; 1823 } 1824 1825 /* Uses sync mcc */ 1826 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) 1827 { 1828 struct be_mcc_wrb *wrb; 1829 struct be_cmd_req_get_beacon_state *req; 1830 int status; 1831 1832 spin_lock_bh(&adapter->mcc_lock); 1833 1834 wrb = wrb_from_mccq(adapter); 1835 if (!wrb) { 1836 status = -EBUSY; 1837 goto err; 1838 } 1839 req = embedded_payload(wrb); 1840 1841 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1842 OPCODE_COMMON_GET_BEACON_STATE); 1843 1844 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1845 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req)); 1846 1847 req->port_num = port_num; 1848 1849 status = be_mcc_notify_wait(adapter); 1850 if (!status) { 1851 struct be_cmd_resp_get_beacon_state *resp = 1852 embedded_payload(wrb); 1853 *state = resp->beacon_state; 1854 } 1855 1856 err: 1857 spin_unlock_bh(&adapter->mcc_lock); 1858 return status; 1859 } 1860 1861 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 1862 u32 data_size, u32 data_offset, const char *obj_name, 1863 u32 *data_written, u8 *addn_status) 1864 { 1865 struct be_mcc_wrb *wrb; 1866 struct lancer_cmd_req_write_object *req; 1867 struct lancer_cmd_resp_write_object *resp; 1868 void *ctxt = NULL; 1869 int status; 1870 1871 spin_lock_bh(&adapter->mcc_lock); 1872 adapter->flash_status = 0; 1873 1874 wrb = wrb_from_mccq(adapter); 1875 if (!wrb) { 1876 status = -EBUSY; 1877 goto err_unlock; 1878 } 1879 1880 req = embedded_payload(wrb); 1881 1882 be_wrb_hdr_prepare(wrb, sizeof(struct lancer_cmd_req_write_object), 1883 true, 1, OPCODE_COMMON_WRITE_OBJECT); 1884 wrb->tag1 = CMD_SUBSYSTEM_COMMON; 1885 1886 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1887 OPCODE_COMMON_WRITE_OBJECT, 1888 sizeof(struct lancer_cmd_req_write_object)); 1889 1890 ctxt = &req->context; 1891 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 1892 write_length, ctxt, data_size); 1893 1894 if (data_size == 0) 1895 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 1896 eof, ctxt, 1); 1897 else 1898 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 1899 eof, ctxt, 0); 1900 1901 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1902 req->write_offset = cpu_to_le32(data_offset); 1903 strcpy(req->object_name, obj_name); 1904 req->descriptor_count = cpu_to_le32(1); 1905 req->buf_len = cpu_to_le32(data_size); 1906 req->addr_low = cpu_to_le32((cmd->dma + 1907 sizeof(struct lancer_cmd_req_write_object)) 1908 & 0xFFFFFFFF); 1909 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + 1910 sizeof(struct lancer_cmd_req_write_object))); 1911 1912 be_mcc_notify(adapter); 1913 spin_unlock_bh(&adapter->mcc_lock); 1914 1915 if (!wait_for_completion_timeout(&adapter->flash_compl, 1916 msecs_to_jiffies(12000))) 1917 status = -1; 1918 else 1919 status = adapter->flash_status; 1920 1921 resp = embedded_payload(wrb); 1922 if (!status) { 1923 *data_written = le32_to_cpu(resp->actual_write_len); 1924 } else { 1925 *addn_status = resp->additional_status; 1926 status = resp->status; 1927 } 1928 1929 return status; 1930 1931 err_unlock: 1932 spin_unlock_bh(&adapter->mcc_lock); 1933 return status; 1934 } 1935 1936 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 1937 u32 flash_type, u32 flash_opcode, u32 buf_size) 1938 { 1939 struct be_mcc_wrb *wrb; 1940 struct be_cmd_write_flashrom *req; 1941 struct be_sge *sge; 1942 int status; 1943 1944 spin_lock_bh(&adapter->mcc_lock); 1945 adapter->flash_status = 0; 1946 1947 wrb = wrb_from_mccq(adapter); 1948 if (!wrb) { 1949 status = -EBUSY; 1950 goto err_unlock; 1951 } 1952 req = cmd->va; 1953 sge = nonembedded_sgl(wrb); 1954 1955 be_wrb_hdr_prepare(wrb, cmd->size, false, 1, 1956 OPCODE_COMMON_WRITE_FLASHROM); 1957 wrb->tag1 = CMD_SUBSYSTEM_COMMON; 1958 1959 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1960 OPCODE_COMMON_WRITE_FLASHROM, cmd->size); 1961 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma)); 1962 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF); 1963 sge->len = cpu_to_le32(cmd->size); 1964 1965 req->params.op_type = cpu_to_le32(flash_type); 1966 req->params.op_code = cpu_to_le32(flash_opcode); 1967 req->params.data_buf_size = cpu_to_le32(buf_size); 1968 1969 be_mcc_notify(adapter); 1970 spin_unlock_bh(&adapter->mcc_lock); 1971 1972 if (!wait_for_completion_timeout(&adapter->flash_compl, 1973 msecs_to_jiffies(40000))) 1974 status = -1; 1975 else 1976 status = adapter->flash_status; 1977 1978 return status; 1979 1980 err_unlock: 1981 spin_unlock_bh(&adapter->mcc_lock); 1982 return status; 1983 } 1984 1985 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 1986 int offset) 1987 { 1988 struct be_mcc_wrb *wrb; 1989 struct be_cmd_write_flashrom *req; 1990 int status; 1991 1992 spin_lock_bh(&adapter->mcc_lock); 1993 1994 wrb = wrb_from_mccq(adapter); 1995 if (!wrb) { 1996 status = -EBUSY; 1997 goto err; 1998 } 1999 req = embedded_payload(wrb); 2000 2001 be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0, 2002 OPCODE_COMMON_READ_FLASHROM); 2003 2004 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2005 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4); 2006 2007 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); 2008 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2009 req->params.offset = cpu_to_le32(offset); 2010 req->params.data_buf_size = cpu_to_le32(0x4); 2011 2012 status = be_mcc_notify_wait(adapter); 2013 if (!status) 2014 memcpy(flashed_crc, req->params.data_buf, 4); 2015 2016 err: 2017 spin_unlock_bh(&adapter->mcc_lock); 2018 return status; 2019 } 2020 2021 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2022 struct be_dma_mem *nonemb_cmd) 2023 { 2024 struct be_mcc_wrb *wrb; 2025 struct be_cmd_req_acpi_wol_magic_config *req; 2026 struct be_sge *sge; 2027 int status; 2028 2029 spin_lock_bh(&adapter->mcc_lock); 2030 2031 wrb = wrb_from_mccq(adapter); 2032 if (!wrb) { 2033 status = -EBUSY; 2034 goto err; 2035 } 2036 req = nonemb_cmd->va; 2037 sge = nonembedded_sgl(wrb); 2038 2039 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, 2040 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG); 2041 2042 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2043 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req)); 2044 memcpy(req->magic_mac, mac, ETH_ALEN); 2045 2046 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); 2047 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); 2048 sge->len = cpu_to_le32(nonemb_cmd->size); 2049 2050 status = be_mcc_notify_wait(adapter); 2051 2052 err: 2053 spin_unlock_bh(&adapter->mcc_lock); 2054 return status; 2055 } 2056 2057 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 2058 u8 loopback_type, u8 enable) 2059 { 2060 struct be_mcc_wrb *wrb; 2061 struct be_cmd_req_set_lmode *req; 2062 int status; 2063 2064 spin_lock_bh(&adapter->mcc_lock); 2065 2066 wrb = wrb_from_mccq(adapter); 2067 if (!wrb) { 2068 status = -EBUSY; 2069 goto err; 2070 } 2071 2072 req = embedded_payload(wrb); 2073 2074 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 2075 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE); 2076 2077 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2078 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, 2079 sizeof(*req)); 2080 2081 req->src_port = port_num; 2082 req->dest_port = port_num; 2083 req->loopback_type = loopback_type; 2084 req->loopback_state = enable; 2085 2086 status = be_mcc_notify_wait(adapter); 2087 err: 2088 spin_unlock_bh(&adapter->mcc_lock); 2089 return status; 2090 } 2091 2092 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 2093 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) 2094 { 2095 struct be_mcc_wrb *wrb; 2096 struct be_cmd_req_loopback_test *req; 2097 int status; 2098 2099 spin_lock_bh(&adapter->mcc_lock); 2100 2101 wrb = wrb_from_mccq(adapter); 2102 if (!wrb) { 2103 status = -EBUSY; 2104 goto err; 2105 } 2106 2107 req = embedded_payload(wrb); 2108 2109 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 2110 OPCODE_LOWLEVEL_LOOPBACK_TEST); 2111 2112 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2113 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req)); 2114 req->hdr.timeout = cpu_to_le32(4); 2115 2116 req->pattern = cpu_to_le64(pattern); 2117 req->src_port = cpu_to_le32(port_num); 2118 req->dest_port = cpu_to_le32(port_num); 2119 req->pkt_size = cpu_to_le32(pkt_size); 2120 req->num_pkts = cpu_to_le32(num_pkts); 2121 req->loopback_type = cpu_to_le32(loopback_type); 2122 2123 status = be_mcc_notify_wait(adapter); 2124 if (!status) { 2125 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); 2126 status = le32_to_cpu(resp->status); 2127 } 2128 2129 err: 2130 spin_unlock_bh(&adapter->mcc_lock); 2131 return status; 2132 } 2133 2134 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 2135 u32 byte_cnt, struct be_dma_mem *cmd) 2136 { 2137 struct be_mcc_wrb *wrb; 2138 struct be_cmd_req_ddrdma_test *req; 2139 struct be_sge *sge; 2140 int status; 2141 int i, j = 0; 2142 2143 spin_lock_bh(&adapter->mcc_lock); 2144 2145 wrb = wrb_from_mccq(adapter); 2146 if (!wrb) { 2147 status = -EBUSY; 2148 goto err; 2149 } 2150 req = cmd->va; 2151 sge = nonembedded_sgl(wrb); 2152 be_wrb_hdr_prepare(wrb, cmd->size, false, 1, 2153 OPCODE_LOWLEVEL_HOST_DDR_DMA); 2154 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2155 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size); 2156 2157 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma)); 2158 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF); 2159 sge->len = cpu_to_le32(cmd->size); 2160 2161 req->pattern = cpu_to_le64(pattern); 2162 req->byte_count = cpu_to_le32(byte_cnt); 2163 for (i = 0; i < byte_cnt; i++) { 2164 req->snd_buff[i] = (u8)(pattern >> (j*8)); 2165 j++; 2166 if (j > 7) 2167 j = 0; 2168 } 2169 2170 status = be_mcc_notify_wait(adapter); 2171 2172 if (!status) { 2173 struct be_cmd_resp_ddrdma_test *resp; 2174 resp = cmd->va; 2175 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || 2176 resp->snd_err) { 2177 status = -1; 2178 } 2179 } 2180 2181 err: 2182 spin_unlock_bh(&adapter->mcc_lock); 2183 return status; 2184 } 2185 2186 int be_cmd_get_seeprom_data(struct be_adapter *adapter, 2187 struct be_dma_mem *nonemb_cmd) 2188 { 2189 struct be_mcc_wrb *wrb; 2190 struct be_cmd_req_seeprom_read *req; 2191 struct be_sge *sge; 2192 int status; 2193 2194 spin_lock_bh(&adapter->mcc_lock); 2195 2196 wrb = wrb_from_mccq(adapter); 2197 if (!wrb) { 2198 status = -EBUSY; 2199 goto err; 2200 } 2201 req = nonemb_cmd->va; 2202 sge = nonembedded_sgl(wrb); 2203 2204 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, 2205 OPCODE_COMMON_SEEPROM_READ); 2206 2207 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2208 OPCODE_COMMON_SEEPROM_READ, sizeof(*req)); 2209 2210 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); 2211 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); 2212 sge->len = cpu_to_le32(nonemb_cmd->size); 2213 2214 status = be_mcc_notify_wait(adapter); 2215 2216 err: 2217 spin_unlock_bh(&adapter->mcc_lock); 2218 return status; 2219 } 2220 2221 int be_cmd_get_phy_info(struct be_adapter *adapter, 2222 struct be_phy_info *phy_info) 2223 { 2224 struct be_mcc_wrb *wrb; 2225 struct be_cmd_req_get_phy_info *req; 2226 struct be_sge *sge; 2227 struct be_dma_mem cmd; 2228 int status; 2229 2230 spin_lock_bh(&adapter->mcc_lock); 2231 2232 wrb = wrb_from_mccq(adapter); 2233 if (!wrb) { 2234 status = -EBUSY; 2235 goto err; 2236 } 2237 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2238 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 2239 &cmd.dma); 2240 if (!cmd.va) { 2241 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2242 status = -ENOMEM; 2243 goto err; 2244 } 2245 2246 req = cmd.va; 2247 sge = nonembedded_sgl(wrb); 2248 2249 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, 2250 OPCODE_COMMON_GET_PHY_DETAILS); 2251 2252 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2253 OPCODE_COMMON_GET_PHY_DETAILS, 2254 sizeof(*req)); 2255 2256 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd.dma)); 2257 sge->pa_lo = cpu_to_le32(cmd.dma & 0xFFFFFFFF); 2258 sge->len = cpu_to_le32(cmd.size); 2259 2260 status = be_mcc_notify_wait(adapter); 2261 if (!status) { 2262 struct be_phy_info *resp_phy_info = 2263 cmd.va + sizeof(struct be_cmd_req_hdr); 2264 phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type); 2265 phy_info->interface_type = 2266 le16_to_cpu(resp_phy_info->interface_type); 2267 } 2268 pci_free_consistent(adapter->pdev, cmd.size, 2269 cmd.va, cmd.dma); 2270 err: 2271 spin_unlock_bh(&adapter->mcc_lock); 2272 return status; 2273 } 2274 2275 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) 2276 { 2277 struct be_mcc_wrb *wrb; 2278 struct be_cmd_req_set_qos *req; 2279 int status; 2280 2281 spin_lock_bh(&adapter->mcc_lock); 2282 2283 wrb = wrb_from_mccq(adapter); 2284 if (!wrb) { 2285 status = -EBUSY; 2286 goto err; 2287 } 2288 2289 req = embedded_payload(wrb); 2290 2291 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 2292 OPCODE_COMMON_SET_QOS); 2293 2294 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2295 OPCODE_COMMON_SET_QOS, sizeof(*req)); 2296 2297 req->hdr.domain = domain; 2298 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); 2299 req->max_bps_nic = cpu_to_le32(bps); 2300 2301 status = be_mcc_notify_wait(adapter); 2302 2303 err: 2304 spin_unlock_bh(&adapter->mcc_lock); 2305 return status; 2306 } 2307 2308 int be_cmd_get_cntl_attributes(struct be_adapter *adapter) 2309 { 2310 struct be_mcc_wrb *wrb; 2311 struct be_cmd_req_cntl_attribs *req; 2312 struct be_cmd_resp_cntl_attribs *resp; 2313 struct be_sge *sge; 2314 int status; 2315 int payload_len = max(sizeof(*req), sizeof(*resp)); 2316 struct mgmt_controller_attrib *attribs; 2317 struct be_dma_mem attribs_cmd; 2318 2319 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2320 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2321 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2322 &attribs_cmd.dma); 2323 if (!attribs_cmd.va) { 2324 dev_err(&adapter->pdev->dev, 2325 "Memory allocation failure\n"); 2326 return -ENOMEM; 2327 } 2328 2329 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2330 return -1; 2331 2332 wrb = wrb_from_mbox(adapter); 2333 if (!wrb) { 2334 status = -EBUSY; 2335 goto err; 2336 } 2337 req = attribs_cmd.va; 2338 sge = nonembedded_sgl(wrb); 2339 2340 be_wrb_hdr_prepare(wrb, payload_len, false, 1, 2341 OPCODE_COMMON_GET_CNTL_ATTRIBUTES); 2342 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2343 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len); 2344 sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma)); 2345 sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF); 2346 sge->len = cpu_to_le32(attribs_cmd.size); 2347 2348 status = be_mbox_notify_wait(adapter); 2349 if (!status) { 2350 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); 2351 adapter->hba_port_num = attribs->hba_attribs.phy_port; 2352 } 2353 2354 err: 2355 mutex_unlock(&adapter->mbox_lock); 2356 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va, 2357 attribs_cmd.dma); 2358 return status; 2359 } 2360 2361 /* Uses mbox */ 2362 int be_cmd_req_native_mode(struct be_adapter *adapter) 2363 { 2364 struct be_mcc_wrb *wrb; 2365 struct be_cmd_req_set_func_cap *req; 2366 int status; 2367 2368 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2369 return -1; 2370 2371 wrb = wrb_from_mbox(adapter); 2372 if (!wrb) { 2373 status = -EBUSY; 2374 goto err; 2375 } 2376 2377 req = embedded_payload(wrb); 2378 2379 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 2380 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP); 2381 2382 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2383 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req)); 2384 2385 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | 2386 CAPABILITY_BE3_NATIVE_ERX_API); 2387 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); 2388 2389 status = be_mbox_notify_wait(adapter); 2390 if (!status) { 2391 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); 2392 adapter->be3_native = le32_to_cpu(resp->cap_flags) & 2393 CAPABILITY_BE3_NATIVE_ERX_API; 2394 } 2395 err: 2396 mutex_unlock(&adapter->mbox_lock); 2397 return status; 2398 } 2399