1 /* 2 * QLogic qlcnic NIC Driver 3 * Copyright (c) 2009-2010 QLogic Corporation 4 * 5 * See LICENSE.qlcnic for copyright and licensing details. 6 */ 7 8 #include "qlcnic.h" 9 10 static u32 11 qlcnic_poll_rsp(struct qlcnic_adapter *adapter) 12 { 13 u32 rsp; 14 int timeout = 0; 15 16 do { 17 /* give atleast 1ms for firmware to respond */ 18 msleep(1); 19 20 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) 21 return QLCNIC_CDRP_RSP_TIMEOUT; 22 23 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET); 24 } while (!QLCNIC_CDRP_IS_RSP(rsp)); 25 26 return rsp; 27 } 28 29 void 30 qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) 31 { 32 u32 rsp; 33 u32 signature; 34 struct pci_dev *pdev = adapter->pdev; 35 struct qlcnic_hardware_context *ahw = adapter->ahw; 36 37 signature = QLCNIC_CDRP_SIGNATURE_MAKE(ahw->pci_func, 38 adapter->fw_hal_version); 39 40 /* Acquire semaphore before accessing CRB */ 41 if (qlcnic_api_lock(adapter)) { 42 cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT; 43 return; 44 } 45 46 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); 47 QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, cmd->req.arg1); 48 QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, cmd->req.arg2); 49 QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, cmd->req.arg3); 50 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, 51 QLCNIC_CDRP_FORM_CMD(cmd->req.cmd)); 52 53 rsp = qlcnic_poll_rsp(adapter); 54 55 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { 56 dev_err(&pdev->dev, "CDRP response timeout.\n"); 57 cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT; 58 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { 59 cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); 60 switch (cmd->rsp.cmd) { 61 case QLCNIC_RCODE_INVALID_ARGS: 62 dev_err(&pdev->dev, "CDRP invalid args: 0x%x.\n", 63 cmd->rsp.cmd); 64 break; 65 case QLCNIC_RCODE_NOT_SUPPORTED: 66 case QLCNIC_RCODE_NOT_IMPL: 67 dev_err(&pdev->dev, 68 "CDRP command not supported: 0x%x.\n", 69 cmd->rsp.cmd); 70 break; 71 case QLCNIC_RCODE_NOT_PERMITTED: 72 dev_err(&pdev->dev, 73 "CDRP requested action not permitted: 0x%x.\n", 74 cmd->rsp.cmd); 75 break; 76 case QLCNIC_RCODE_INVALID: 77 dev_err(&pdev->dev, 78 "CDRP invalid or unknown cmd received: 0x%x.\n", 79 cmd->rsp.cmd); 80 break; 81 case QLCNIC_RCODE_TIMEOUT: 82 dev_err(&pdev->dev, "CDRP command timeout: 0x%x.\n", 83 cmd->rsp.cmd); 84 break; 85 default: 86 dev_err(&pdev->dev, "CDRP command failed: 0x%x.\n", 87 cmd->rsp.cmd); 88 } 89 } else if (rsp == QLCNIC_CDRP_RSP_OK) { 90 cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS; 91 if (cmd->rsp.arg2) 92 cmd->rsp.arg2 = QLCRD32(adapter, 93 QLCNIC_ARG2_CRB_OFFSET); 94 if (cmd->rsp.arg3) 95 cmd->rsp.arg3 = QLCRD32(adapter, 96 QLCNIC_ARG3_CRB_OFFSET); 97 } 98 if (cmd->rsp.arg1) 99 cmd->rsp.arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); 100 101 /* Release semaphore */ 102 qlcnic_api_unlock(adapter); 103 104 } 105 106 static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size) 107 { 108 uint64_t sum = 0; 109 int count = temp_size / sizeof(uint32_t); 110 while (count-- > 0) 111 sum += *temp_buffer++; 112 while (sum >> 32) 113 sum = (sum & 0xFFFFFFFF) + (sum >> 32); 114 return ~sum; 115 } 116 117 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) 118 { 119 int err, i; 120 u16 temp_size; 121 void *tmp_addr; 122 u32 version, csum, *template, *tmp_buf; 123 struct qlcnic_cmd_args cmd; 124 struct qlcnic_hardware_context *ahw; 125 struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl; 126 dma_addr_t tmp_addr_t = 0; 127 128 ahw = adapter->ahw; 129 memset(&cmd, 0, sizeof(cmd)); 130 cmd.req.cmd = QLCNIC_CDRP_CMD_TEMP_SIZE; 131 memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd)); 132 qlcnic_issue_cmd(adapter, &cmd); 133 if (cmd.rsp.cmd != QLCNIC_RCODE_SUCCESS) { 134 dev_info(&adapter->pdev->dev, 135 "Can't get template size %d\n", cmd.rsp.cmd); 136 err = -EIO; 137 return err; 138 } 139 temp_size = cmd.rsp.arg2; 140 version = cmd.rsp.arg3; 141 if (!temp_size) 142 return -EIO; 143 144 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, 145 &tmp_addr_t, GFP_KERNEL); 146 if (!tmp_addr) { 147 dev_err(&adapter->pdev->dev, 148 "Can't get memory for FW dump template\n"); 149 return -ENOMEM; 150 } 151 memset(&cmd.rsp, 0, sizeof(struct _cdrp_cmd)); 152 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_TEMP_HDR; 153 cmd.req.arg1 = LSD(tmp_addr_t); 154 cmd.req.arg2 = MSD(tmp_addr_t); 155 cmd.req.arg3 = temp_size; 156 qlcnic_issue_cmd(adapter, &cmd); 157 158 err = cmd.rsp.cmd; 159 if (err != QLCNIC_RCODE_SUCCESS) { 160 dev_err(&adapter->pdev->dev, 161 "Failed to get mini dump template header %d\n", err); 162 err = -EIO; 163 goto error; 164 } 165 tmp_tmpl = tmp_addr; 166 csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size); 167 if (csum) { 168 dev_err(&adapter->pdev->dev, 169 "Template header checksum validation failed\n"); 170 err = -EIO; 171 goto error; 172 } 173 ahw->fw_dump.tmpl_hdr = vzalloc(temp_size); 174 if (!ahw->fw_dump.tmpl_hdr) { 175 err = -EIO; 176 goto error; 177 } 178 tmp_buf = tmp_addr; 179 template = (u32 *) ahw->fw_dump.tmpl_hdr; 180 for (i = 0; i < temp_size/sizeof(u32); i++) 181 *template++ = __le32_to_cpu(*tmp_buf++); 182 183 tmpl_hdr = ahw->fw_dump.tmpl_hdr; 184 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; 185 ahw->fw_dump.enable = 1; 186 error: 187 dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t); 188 return err; 189 } 190 191 int 192 qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) 193 { 194 struct qlcnic_cmd_args cmd; 195 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 196 197 memset(&cmd, 0, sizeof(cmd)); 198 cmd.req.cmd = QLCNIC_CDRP_CMD_SET_MTU; 199 cmd.req.arg1 = recv_ctx->context_id; 200 cmd.req.arg2 = mtu; 201 cmd.req.arg3 = 0; 202 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { 203 qlcnic_issue_cmd(adapter, &cmd); 204 if (cmd.rsp.cmd) { 205 dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); 206 return -EIO; 207 } 208 } 209 210 return 0; 211 } 212 213 static int 214 qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) 215 { 216 void *addr; 217 struct qlcnic_hostrq_rx_ctx *prq; 218 struct qlcnic_cardrsp_rx_ctx *prsp; 219 struct qlcnic_hostrq_rds_ring *prq_rds; 220 struct qlcnic_hostrq_sds_ring *prq_sds; 221 struct qlcnic_cardrsp_rds_ring *prsp_rds; 222 struct qlcnic_cardrsp_sds_ring *prsp_sds; 223 struct qlcnic_host_rds_ring *rds_ring; 224 struct qlcnic_host_sds_ring *sds_ring; 225 struct qlcnic_cmd_args cmd; 226 227 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; 228 u64 phys_addr; 229 230 u8 i, nrds_rings, nsds_rings; 231 size_t rq_size, rsp_size; 232 u32 cap, reg, val, reg2; 233 int err; 234 235 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 236 237 nrds_rings = adapter->max_rds_rings; 238 nsds_rings = adapter->max_sds_rings; 239 240 rq_size = 241 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, 242 nsds_rings); 243 rsp_size = 244 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, 245 nsds_rings); 246 247 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 248 &hostrq_phys_addr, GFP_KERNEL); 249 if (addr == NULL) 250 return -ENOMEM; 251 prq = addr; 252 253 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 254 &cardrsp_phys_addr, GFP_KERNEL); 255 if (addr == NULL) { 256 err = -ENOMEM; 257 goto out_free_rq; 258 } 259 prsp = addr; 260 261 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); 262 263 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN 264 | QLCNIC_CAP0_VALIDOFF); 265 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); 266 267 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) 268 cap |= QLCNIC_CAP0_LRO_MSS; 269 270 prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx, 271 msix_handler); 272 prq->txrx_sds_binding = nsds_rings - 1; 273 274 prq->capabilities[0] = cpu_to_le32(cap); 275 prq->host_int_crb_mode = 276 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); 277 prq->host_rds_crb_mode = 278 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE); 279 280 prq->num_rds_rings = cpu_to_le16(nrds_rings); 281 prq->num_sds_rings = cpu_to_le16(nsds_rings); 282 prq->rds_ring_offset = 0; 283 284 val = le32_to_cpu(prq->rds_ring_offset) + 285 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); 286 prq->sds_ring_offset = cpu_to_le32(val); 287 288 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data + 289 le32_to_cpu(prq->rds_ring_offset)); 290 291 for (i = 0; i < nrds_rings; i++) { 292 293 rds_ring = &recv_ctx->rds_rings[i]; 294 rds_ring->producer = 0; 295 296 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); 297 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); 298 prq_rds[i].ring_kind = cpu_to_le32(i); 299 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); 300 } 301 302 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data + 303 le32_to_cpu(prq->sds_ring_offset)); 304 305 for (i = 0; i < nsds_rings; i++) { 306 307 sds_ring = &recv_ctx->sds_rings[i]; 308 sds_ring->consumer = 0; 309 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); 310 311 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); 312 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); 313 prq_sds[i].msi_index = cpu_to_le16(i); 314 } 315 316 phys_addr = hostrq_phys_addr; 317 memset(&cmd, 0, sizeof(cmd)); 318 cmd.req.arg1 = (u32) (phys_addr >> 32); 319 cmd.req.arg2 = (u32) (phys_addr & 0xffffffff); 320 cmd.req.arg3 = rq_size; 321 cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_RX_CTX; 322 qlcnic_issue_cmd(adapter, &cmd); 323 err = cmd.rsp.cmd; 324 if (err) { 325 dev_err(&adapter->pdev->dev, 326 "Failed to create rx ctx in firmware%d\n", err); 327 goto out_free_rsp; 328 } 329 330 331 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *) 332 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); 333 334 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { 335 rds_ring = &recv_ctx->rds_rings[i]; 336 337 reg = le32_to_cpu(prsp_rds[i].host_producer_crb); 338 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg; 339 } 340 341 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) 342 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); 343 344 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { 345 sds_ring = &recv_ctx->sds_rings[i]; 346 347 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); 348 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); 349 350 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg; 351 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2; 352 } 353 354 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); 355 recv_ctx->context_id = le16_to_cpu(prsp->context_id); 356 recv_ctx->virt_port = prsp->virt_port; 357 358 out_free_rsp: 359 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, 360 cardrsp_phys_addr); 361 out_free_rq: 362 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); 363 return err; 364 } 365 366 static void 367 qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) 368 { 369 struct qlcnic_cmd_args cmd; 370 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 371 372 memset(&cmd, 0, sizeof(cmd)); 373 cmd.req.arg1 = recv_ctx->context_id; 374 cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET; 375 cmd.req.arg3 = 0; 376 cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_RX_CTX; 377 qlcnic_issue_cmd(adapter, &cmd); 378 if (cmd.rsp.cmd) 379 dev_err(&adapter->pdev->dev, 380 "Failed to destroy rx ctx in firmware\n"); 381 382 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; 383 } 384 385 static int 386 qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) 387 { 388 struct qlcnic_hostrq_tx_ctx *prq; 389 struct qlcnic_hostrq_cds_ring *prq_cds; 390 struct qlcnic_cardrsp_tx_ctx *prsp; 391 void *rq_addr, *rsp_addr; 392 size_t rq_size, rsp_size; 393 u32 temp; 394 struct qlcnic_cmd_args cmd; 395 int err; 396 u64 phys_addr; 397 dma_addr_t rq_phys_addr, rsp_phys_addr; 398 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; 399 400 /* reset host resources */ 401 tx_ring->producer = 0; 402 tx_ring->sw_consumer = 0; 403 *(tx_ring->hw_consumer) = 0; 404 405 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 406 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 407 &rq_phys_addr, GFP_KERNEL); 408 if (!rq_addr) 409 return -ENOMEM; 410 411 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 412 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 413 &rsp_phys_addr, GFP_KERNEL); 414 if (!rsp_addr) { 415 err = -ENOMEM; 416 goto out_free_rq; 417 } 418 419 memset(rq_addr, 0, rq_size); 420 prq = rq_addr; 421 422 memset(rsp_addr, 0, rsp_size); 423 prsp = rsp_addr; 424 425 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); 426 427 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | 428 QLCNIC_CAP0_LSO); 429 prq->capabilities[0] = cpu_to_le32(temp); 430 431 prq->host_int_crb_mode = 432 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); 433 434 prq->interrupt_ctl = 0; 435 prq->msi_index = 0; 436 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); 437 438 prq_cds = &prq->cds_ring; 439 440 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); 441 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); 442 443 phys_addr = rq_phys_addr; 444 memset(&cmd, 0, sizeof(cmd)); 445 cmd.req.arg1 = (u32)(phys_addr >> 32); 446 cmd.req.arg2 = ((u32)phys_addr & 0xffffffff); 447 cmd.req.arg3 = rq_size; 448 cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_TX_CTX; 449 qlcnic_issue_cmd(adapter, &cmd); 450 err = cmd.rsp.cmd; 451 452 if (err == QLCNIC_RCODE_SUCCESS) { 453 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 454 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; 455 456 adapter->tx_context_id = 457 le16_to_cpu(prsp->context_id); 458 } else { 459 dev_err(&adapter->pdev->dev, 460 "Failed to create tx ctx in firmware%d\n", err); 461 err = -EIO; 462 } 463 464 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, 465 rsp_phys_addr); 466 467 out_free_rq: 468 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); 469 470 return err; 471 } 472 473 static void 474 qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter) 475 { 476 struct qlcnic_cmd_args cmd; 477 478 memset(&cmd, 0, sizeof(cmd)); 479 cmd.req.arg1 = adapter->tx_context_id; 480 cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET; 481 cmd.req.arg3 = 0; 482 cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_TX_CTX; 483 qlcnic_issue_cmd(adapter, &cmd); 484 if (cmd.rsp.cmd) 485 dev_err(&adapter->pdev->dev, 486 "Failed to destroy tx ctx in firmware\n"); 487 } 488 489 int 490 qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) 491 { 492 struct qlcnic_cmd_args cmd; 493 494 memset(&cmd, 0, sizeof(cmd)); 495 cmd.req.arg1 = config; 496 cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIG_PORT; 497 qlcnic_issue_cmd(adapter, &cmd); 498 499 return cmd.rsp.cmd; 500 } 501 502 int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) 503 { 504 void *addr; 505 int err; 506 int ring; 507 struct qlcnic_recv_context *recv_ctx; 508 struct qlcnic_host_rds_ring *rds_ring; 509 struct qlcnic_host_sds_ring *sds_ring; 510 struct qlcnic_host_tx_ring *tx_ring; 511 512 struct pci_dev *pdev = adapter->pdev; 513 514 recv_ctx = adapter->recv_ctx; 515 tx_ring = adapter->tx_ring; 516 517 tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev, 518 sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL); 519 if (tx_ring->hw_consumer == NULL) { 520 dev_err(&pdev->dev, "failed to allocate tx consumer\n"); 521 return -ENOMEM; 522 } 523 524 /* cmd desc ring */ 525 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), 526 &tx_ring->phys_addr, GFP_KERNEL); 527 528 if (addr == NULL) { 529 dev_err(&pdev->dev, "failed to allocate tx desc ring\n"); 530 err = -ENOMEM; 531 goto err_out_free; 532 } 533 534 tx_ring->desc_head = addr; 535 536 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 537 rds_ring = &recv_ctx->rds_rings[ring]; 538 addr = dma_alloc_coherent(&adapter->pdev->dev, 539 RCV_DESC_RINGSIZE(rds_ring), 540 &rds_ring->phys_addr, GFP_KERNEL); 541 if (addr == NULL) { 542 dev_err(&pdev->dev, 543 "failed to allocate rds ring [%d]\n", ring); 544 err = -ENOMEM; 545 goto err_out_free; 546 } 547 rds_ring->desc_head = addr; 548 549 } 550 551 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 552 sds_ring = &recv_ctx->sds_rings[ring]; 553 554 addr = dma_alloc_coherent(&adapter->pdev->dev, 555 STATUS_DESC_RINGSIZE(sds_ring), 556 &sds_ring->phys_addr, GFP_KERNEL); 557 if (addr == NULL) { 558 dev_err(&pdev->dev, 559 "failed to allocate sds ring [%d]\n", ring); 560 err = -ENOMEM; 561 goto err_out_free; 562 } 563 sds_ring->desc_head = addr; 564 } 565 566 return 0; 567 568 err_out_free: 569 qlcnic_free_hw_resources(adapter); 570 return err; 571 } 572 573 574 int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter) 575 { 576 int err; 577 578 if (adapter->flags & QLCNIC_NEED_FLR) { 579 pci_reset_function(adapter->pdev); 580 adapter->flags &= ~QLCNIC_NEED_FLR; 581 } 582 583 err = qlcnic_fw_cmd_create_rx_ctx(adapter); 584 if (err) 585 return err; 586 587 err = qlcnic_fw_cmd_create_tx_ctx(adapter); 588 if (err) { 589 qlcnic_fw_cmd_destroy_rx_ctx(adapter); 590 return err; 591 } 592 593 set_bit(__QLCNIC_FW_ATTACHED, &adapter->state); 594 return 0; 595 } 596 597 void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) 598 { 599 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { 600 qlcnic_fw_cmd_destroy_rx_ctx(adapter); 601 qlcnic_fw_cmd_destroy_tx_ctx(adapter); 602 603 /* Allow dma queues to drain after context reset */ 604 msleep(20); 605 } 606 } 607 608 void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) 609 { 610 struct qlcnic_recv_context *recv_ctx; 611 struct qlcnic_host_rds_ring *rds_ring; 612 struct qlcnic_host_sds_ring *sds_ring; 613 struct qlcnic_host_tx_ring *tx_ring; 614 int ring; 615 616 recv_ctx = adapter->recv_ctx; 617 618 tx_ring = adapter->tx_ring; 619 if (tx_ring->hw_consumer != NULL) { 620 dma_free_coherent(&adapter->pdev->dev, 621 sizeof(u32), 622 tx_ring->hw_consumer, 623 tx_ring->hw_cons_phys_addr); 624 tx_ring->hw_consumer = NULL; 625 } 626 627 if (tx_ring->desc_head != NULL) { 628 dma_free_coherent(&adapter->pdev->dev, 629 TX_DESC_RINGSIZE(tx_ring), 630 tx_ring->desc_head, tx_ring->phys_addr); 631 tx_ring->desc_head = NULL; 632 } 633 634 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 635 rds_ring = &recv_ctx->rds_rings[ring]; 636 637 if (rds_ring->desc_head != NULL) { 638 dma_free_coherent(&adapter->pdev->dev, 639 RCV_DESC_RINGSIZE(rds_ring), 640 rds_ring->desc_head, 641 rds_ring->phys_addr); 642 rds_ring->desc_head = NULL; 643 } 644 } 645 646 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 647 sds_ring = &recv_ctx->sds_rings[ring]; 648 649 if (sds_ring->desc_head != NULL) { 650 dma_free_coherent(&adapter->pdev->dev, 651 STATUS_DESC_RINGSIZE(sds_ring), 652 sds_ring->desc_head, 653 sds_ring->phys_addr); 654 sds_ring->desc_head = NULL; 655 } 656 } 657 } 658 659 660 /* Get MAC address of a NIC partition */ 661 int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) 662 { 663 int err; 664 struct qlcnic_cmd_args cmd; 665 666 memset(&cmd, 0, sizeof(cmd)); 667 cmd.req.arg1 = adapter->ahw->pci_func | BIT_8; 668 cmd.req.cmd = QLCNIC_CDRP_CMD_MAC_ADDRESS; 669 cmd.rsp.arg1 = cmd.rsp.arg2 = 1; 670 qlcnic_issue_cmd(adapter, &cmd); 671 err = cmd.rsp.cmd; 672 673 if (err == QLCNIC_RCODE_SUCCESS) 674 qlcnic_fetch_mac(adapter, cmd.rsp.arg1, cmd.rsp.arg2, 0, mac); 675 else { 676 dev_err(&adapter->pdev->dev, 677 "Failed to get mac address%d\n", err); 678 err = -EIO; 679 } 680 681 return err; 682 } 683 684 /* Get info of a NIC partition */ 685 int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, 686 struct qlcnic_info *npar_info, u8 func_id) 687 { 688 int err; 689 dma_addr_t nic_dma_t; 690 struct qlcnic_info *nic_info; 691 void *nic_info_addr; 692 struct qlcnic_cmd_args cmd; 693 size_t nic_size = sizeof(struct qlcnic_info); 694 695 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 696 &nic_dma_t, GFP_KERNEL); 697 if (!nic_info_addr) 698 return -ENOMEM; 699 memset(nic_info_addr, 0, nic_size); 700 701 nic_info = nic_info_addr; 702 memset(&cmd, 0, sizeof(cmd)); 703 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_NIC_INFO; 704 cmd.req.arg1 = MSD(nic_dma_t); 705 cmd.req.arg2 = LSD(nic_dma_t); 706 cmd.req.arg3 = (func_id << 16 | nic_size); 707 qlcnic_issue_cmd(adapter, &cmd); 708 err = cmd.rsp.cmd; 709 710 if (err == QLCNIC_RCODE_SUCCESS) { 711 npar_info->pci_func = le16_to_cpu(nic_info->pci_func); 712 npar_info->op_mode = le16_to_cpu(nic_info->op_mode); 713 npar_info->phys_port = le16_to_cpu(nic_info->phys_port); 714 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); 715 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); 716 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); 717 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); 718 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); 719 npar_info->capabilities = le32_to_cpu(nic_info->capabilities); 720 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); 721 722 dev_info(&adapter->pdev->dev, 723 "phy port: %d switch_mode: %d,\n" 724 "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n" 725 "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n", 726 npar_info->phys_port, npar_info->switch_mode, 727 npar_info->max_tx_ques, npar_info->max_rx_ques, 728 npar_info->min_tx_bw, npar_info->max_tx_bw, 729 npar_info->max_mtu, npar_info->capabilities); 730 } else { 731 dev_err(&adapter->pdev->dev, 732 "Failed to get nic info%d\n", err); 733 err = -EIO; 734 } 735 736 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, 737 nic_dma_t); 738 return err; 739 } 740 741 /* Configure a NIC partition */ 742 int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic) 743 { 744 int err = -EIO; 745 dma_addr_t nic_dma_t; 746 void *nic_info_addr; 747 struct qlcnic_cmd_args cmd; 748 struct qlcnic_info *nic_info; 749 size_t nic_size = sizeof(struct qlcnic_info); 750 751 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 752 return err; 753 754 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 755 &nic_dma_t, GFP_KERNEL); 756 if (!nic_info_addr) 757 return -ENOMEM; 758 759 memset(nic_info_addr, 0, nic_size); 760 nic_info = nic_info_addr; 761 762 nic_info->pci_func = cpu_to_le16(nic->pci_func); 763 nic_info->op_mode = cpu_to_le16(nic->op_mode); 764 nic_info->phys_port = cpu_to_le16(nic->phys_port); 765 nic_info->switch_mode = cpu_to_le16(nic->switch_mode); 766 nic_info->capabilities = cpu_to_le32(nic->capabilities); 767 nic_info->max_mac_filters = nic->max_mac_filters; 768 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); 769 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); 770 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); 771 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); 772 773 memset(&cmd, 0, sizeof(cmd)); 774 cmd.req.cmd = QLCNIC_CDRP_CMD_SET_NIC_INFO; 775 cmd.req.arg1 = MSD(nic_dma_t); 776 cmd.req.arg2 = LSD(nic_dma_t); 777 cmd.req.arg3 = ((nic->pci_func << 16) | nic_size); 778 qlcnic_issue_cmd(adapter, &cmd); 779 err = cmd.rsp.cmd; 780 781 if (err != QLCNIC_RCODE_SUCCESS) { 782 dev_err(&adapter->pdev->dev, 783 "Failed to set nic info%d\n", err); 784 err = -EIO; 785 } 786 787 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, 788 nic_dma_t); 789 return err; 790 } 791 792 /* Get PCI Info of a partition */ 793 int qlcnic_get_pci_info(struct qlcnic_adapter *adapter, 794 struct qlcnic_pci_info *pci_info) 795 { 796 int err = 0, i; 797 struct qlcnic_cmd_args cmd; 798 dma_addr_t pci_info_dma_t; 799 struct qlcnic_pci_info *npar; 800 void *pci_info_addr; 801 size_t npar_size = sizeof(struct qlcnic_pci_info); 802 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; 803 804 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, 805 &pci_info_dma_t, GFP_KERNEL); 806 if (!pci_info_addr) 807 return -ENOMEM; 808 memset(pci_info_addr, 0, pci_size); 809 810 npar = pci_info_addr; 811 memset(&cmd, 0, sizeof(cmd)); 812 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_PCI_INFO; 813 cmd.req.arg1 = MSD(pci_info_dma_t); 814 cmd.req.arg2 = LSD(pci_info_dma_t); 815 cmd.req.arg3 = pci_size; 816 qlcnic_issue_cmd(adapter, &cmd); 817 err = cmd.rsp.cmd; 818 819 if (err == QLCNIC_RCODE_SUCCESS) { 820 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) { 821 pci_info->id = le16_to_cpu(npar->id); 822 pci_info->active = le16_to_cpu(npar->active); 823 pci_info->type = le16_to_cpu(npar->type); 824 pci_info->default_port = 825 le16_to_cpu(npar->default_port); 826 pci_info->tx_min_bw = 827 le16_to_cpu(npar->tx_min_bw); 828 pci_info->tx_max_bw = 829 le16_to_cpu(npar->tx_max_bw); 830 memcpy(pci_info->mac, npar->mac, ETH_ALEN); 831 } 832 } else { 833 dev_err(&adapter->pdev->dev, 834 "Failed to get PCI Info%d\n", err); 835 err = -EIO; 836 } 837 838 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, 839 pci_info_dma_t); 840 return err; 841 } 842 843 /* Configure eSwitch for port mirroring */ 844 int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, 845 u8 enable_mirroring, u8 pci_func) 846 { 847 int err = -EIO; 848 u32 arg1; 849 struct qlcnic_cmd_args cmd; 850 851 if (adapter->op_mode != QLCNIC_MGMT_FUNC || 852 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) 853 return err; 854 855 arg1 = id | (enable_mirroring ? BIT_4 : 0); 856 arg1 |= pci_func << 8; 857 858 memset(&cmd, 0, sizeof(cmd)); 859 cmd.req.cmd = QLCNIC_CDRP_CMD_SET_PORTMIRRORING; 860 cmd.req.arg1 = arg1; 861 qlcnic_issue_cmd(adapter, &cmd); 862 err = cmd.rsp.cmd; 863 864 if (err != QLCNIC_RCODE_SUCCESS) { 865 dev_err(&adapter->pdev->dev, 866 "Failed to configure port mirroring%d on eswitch:%d\n", 867 pci_func, id); 868 } else { 869 dev_info(&adapter->pdev->dev, 870 "Configured eSwitch %d for port mirroring:%d\n", 871 id, pci_func); 872 } 873 874 return err; 875 } 876 877 int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, 878 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 879 880 size_t stats_size = sizeof(struct __qlcnic_esw_statistics); 881 struct __qlcnic_esw_statistics *stats; 882 dma_addr_t stats_dma_t; 883 void *stats_addr; 884 u32 arg1; 885 struct qlcnic_cmd_args cmd; 886 int err; 887 888 if (esw_stats == NULL) 889 return -ENOMEM; 890 891 if (adapter->op_mode != QLCNIC_MGMT_FUNC && 892 func != adapter->ahw->pci_func) { 893 dev_err(&adapter->pdev->dev, 894 "Not privilege to query stats for func=%d", func); 895 return -EIO; 896 } 897 898 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 899 &stats_dma_t, GFP_KERNEL); 900 if (!stats_addr) { 901 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n"); 902 return -ENOMEM; 903 } 904 memset(stats_addr, 0, stats_size); 905 906 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; 907 arg1 |= rx_tx << 15 | stats_size << 16; 908 909 memset(&cmd, 0, sizeof(cmd)); 910 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS; 911 cmd.req.arg1 = arg1; 912 cmd.req.arg2 = MSD(stats_dma_t); 913 cmd.req.arg3 = LSD(stats_dma_t); 914 qlcnic_issue_cmd(adapter, &cmd); 915 err = cmd.rsp.cmd; 916 917 if (!err) { 918 stats = stats_addr; 919 esw_stats->context_id = le16_to_cpu(stats->context_id); 920 esw_stats->version = le16_to_cpu(stats->version); 921 esw_stats->size = le16_to_cpu(stats->size); 922 esw_stats->multicast_frames = 923 le64_to_cpu(stats->multicast_frames); 924 esw_stats->broadcast_frames = 925 le64_to_cpu(stats->broadcast_frames); 926 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames); 927 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames); 928 esw_stats->local_frames = le64_to_cpu(stats->local_frames); 929 esw_stats->errors = le64_to_cpu(stats->errors); 930 esw_stats->numbytes = le64_to_cpu(stats->numbytes); 931 } 932 933 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 934 stats_dma_t); 935 return err; 936 } 937 938 /* This routine will retrieve the MAC statistics from firmware */ 939 int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, 940 struct qlcnic_mac_statistics *mac_stats) 941 { 942 struct qlcnic_mac_statistics *stats; 943 struct qlcnic_cmd_args cmd; 944 size_t stats_size = sizeof(struct qlcnic_mac_statistics); 945 dma_addr_t stats_dma_t; 946 void *stats_addr; 947 int err; 948 949 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 950 &stats_dma_t, GFP_KERNEL); 951 if (!stats_addr) { 952 dev_err(&adapter->pdev->dev, 953 "%s: Unable to allocate memory.\n", __func__); 954 return -ENOMEM; 955 } 956 memset(stats_addr, 0, stats_size); 957 memset(&cmd, 0, sizeof(cmd)); 958 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_MAC_STATS; 959 cmd.req.arg1 = stats_size << 16; 960 cmd.req.arg2 = MSD(stats_dma_t); 961 cmd.req.arg3 = LSD(stats_dma_t); 962 963 qlcnic_issue_cmd(adapter, &cmd); 964 err = cmd.rsp.cmd; 965 966 if (!err) { 967 stats = stats_addr; 968 mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames); 969 mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes); 970 mac_stats->mac_tx_mcast_pkts = 971 le64_to_cpu(stats->mac_tx_mcast_pkts); 972 mac_stats->mac_tx_bcast_pkts = 973 le64_to_cpu(stats->mac_tx_bcast_pkts); 974 mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames); 975 mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes); 976 mac_stats->mac_rx_mcast_pkts = 977 le64_to_cpu(stats->mac_rx_mcast_pkts); 978 mac_stats->mac_rx_length_error = 979 le64_to_cpu(stats->mac_rx_length_error); 980 mac_stats->mac_rx_length_small = 981 le64_to_cpu(stats->mac_rx_length_small); 982 mac_stats->mac_rx_length_large = 983 le64_to_cpu(stats->mac_rx_length_large); 984 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber); 985 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped); 986 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error); 987 } 988 989 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 990 stats_dma_t); 991 return err; 992 } 993 994 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, 995 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 996 997 struct __qlcnic_esw_statistics port_stats; 998 u8 i; 999 int ret = -EIO; 1000 1001 if (esw_stats == NULL) 1002 return -ENOMEM; 1003 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 1004 return -EIO; 1005 if (adapter->npars == NULL) 1006 return -EIO; 1007 1008 memset(esw_stats, 0, sizeof(u64)); 1009 esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL; 1010 esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL; 1011 esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL; 1012 esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL; 1013 esw_stats->errors = QLCNIC_STATS_NOT_AVAIL; 1014 esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL; 1015 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL; 1016 esw_stats->context_id = eswitch; 1017 1018 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 1019 if (adapter->npars[i].phy_port != eswitch) 1020 continue; 1021 1022 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics)); 1023 if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats)) 1024 continue; 1025 1026 esw_stats->size = port_stats.size; 1027 esw_stats->version = port_stats.version; 1028 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames, 1029 port_stats.unicast_frames); 1030 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames, 1031 port_stats.multicast_frames); 1032 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames, 1033 port_stats.broadcast_frames); 1034 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames, 1035 port_stats.dropped_frames); 1036 QLCNIC_ADD_ESW_STATS(esw_stats->errors, 1037 port_stats.errors); 1038 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames, 1039 port_stats.local_frames); 1040 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes, 1041 port_stats.numbytes); 1042 ret = 0; 1043 } 1044 return ret; 1045 } 1046 1047 int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, 1048 const u8 port, const u8 rx_tx) 1049 { 1050 1051 u32 arg1; 1052 struct qlcnic_cmd_args cmd; 1053 1054 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 1055 return -EIO; 1056 1057 if (func_esw == QLCNIC_STATS_PORT) { 1058 if (port >= QLCNIC_MAX_PCI_FUNC) 1059 goto err_ret; 1060 } else if (func_esw == QLCNIC_STATS_ESWITCH) { 1061 if (port >= QLCNIC_NIU_MAX_XG_PORTS) 1062 goto err_ret; 1063 } else { 1064 goto err_ret; 1065 } 1066 1067 if (rx_tx > QLCNIC_QUERY_TX_COUNTER) 1068 goto err_ret; 1069 1070 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; 1071 arg1 |= BIT_14 | rx_tx << 15; 1072 1073 memset(&cmd, 0, sizeof(cmd)); 1074 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS; 1075 cmd.req.arg1 = arg1; 1076 qlcnic_issue_cmd(adapter, &cmd); 1077 return cmd.rsp.cmd; 1078 1079 err_ret: 1080 dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d" 1081 "rx_ctx=%d\n", func_esw, port, rx_tx); 1082 return -EIO; 1083 } 1084 1085 static int 1086 __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, 1087 u32 *arg1, u32 *arg2) 1088 { 1089 int err = -EIO; 1090 struct qlcnic_cmd_args cmd; 1091 u8 pci_func; 1092 pci_func = (*arg1 >> 8); 1093 1094 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG; 1095 cmd.req.arg1 = *arg1; 1096 cmd.rsp.arg1 = cmd.rsp.arg2 = 1; 1097 qlcnic_issue_cmd(adapter, &cmd); 1098 *arg1 = cmd.rsp.arg1; 1099 *arg2 = cmd.rsp.arg2; 1100 err = cmd.rsp.cmd; 1101 1102 if (err == QLCNIC_RCODE_SUCCESS) { 1103 dev_info(&adapter->pdev->dev, 1104 "eSwitch port config for pci func %d\n", pci_func); 1105 } else { 1106 dev_err(&adapter->pdev->dev, 1107 "Failed to get eswitch port config for pci func %d\n", 1108 pci_func); 1109 } 1110 return err; 1111 } 1112 /* Configure eSwitch port 1113 op_mode = 0 for setting default port behavior 1114 op_mode = 1 for setting vlan id 1115 op_mode = 2 for deleting vlan id 1116 op_type = 0 for vlan_id 1117 op_type = 1 for port vlan_id 1118 */ 1119 int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, 1120 struct qlcnic_esw_func_cfg *esw_cfg) 1121 { 1122 int err = -EIO; 1123 u32 arg1, arg2 = 0; 1124 struct qlcnic_cmd_args cmd; 1125 u8 pci_func; 1126 1127 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 1128 return err; 1129 pci_func = esw_cfg->pci_func; 1130 arg1 = (adapter->npars[pci_func].phy_port & BIT_0); 1131 arg1 |= (pci_func << 8); 1132 1133 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1134 return err; 1135 arg1 &= ~(0x0ff << 8); 1136 arg1 |= (pci_func << 8); 1137 arg1 &= ~(BIT_2 | BIT_3); 1138 switch (esw_cfg->op_mode) { 1139 case QLCNIC_PORT_DEFAULTS: 1140 arg1 |= (BIT_4 | BIT_6 | BIT_7); 1141 arg2 |= (BIT_0 | BIT_1); 1142 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) 1143 arg2 |= (BIT_2 | BIT_3); 1144 if (!(esw_cfg->discard_tagged)) 1145 arg1 &= ~BIT_4; 1146 if (!(esw_cfg->promisc_mode)) 1147 arg1 &= ~BIT_6; 1148 if (!(esw_cfg->mac_override)) 1149 arg1 &= ~BIT_7; 1150 if (!(esw_cfg->mac_anti_spoof)) 1151 arg2 &= ~BIT_0; 1152 if (!(esw_cfg->offload_flags & BIT_0)) 1153 arg2 &= ~(BIT_1 | BIT_2 | BIT_3); 1154 if (!(esw_cfg->offload_flags & BIT_1)) 1155 arg2 &= ~BIT_2; 1156 if (!(esw_cfg->offload_flags & BIT_2)) 1157 arg2 &= ~BIT_3; 1158 break; 1159 case QLCNIC_ADD_VLAN: 1160 arg1 |= (BIT_2 | BIT_5); 1161 arg1 |= (esw_cfg->vlan_id << 16); 1162 break; 1163 case QLCNIC_DEL_VLAN: 1164 arg1 |= (BIT_3 | BIT_5); 1165 arg1 &= ~(0x0ffff << 16); 1166 break; 1167 default: 1168 return err; 1169 } 1170 1171 memset(&cmd, 0, sizeof(cmd)); 1172 cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH; 1173 cmd.req.arg1 = arg1; 1174 cmd.req.arg2 = arg2; 1175 qlcnic_issue_cmd(adapter, &cmd); 1176 1177 err = cmd.rsp.cmd; 1178 if (err != QLCNIC_RCODE_SUCCESS) { 1179 dev_err(&adapter->pdev->dev, 1180 "Failed to configure eswitch pci func %d\n", pci_func); 1181 } else { 1182 dev_info(&adapter->pdev->dev, 1183 "Configured eSwitch for pci func %d\n", pci_func); 1184 } 1185 1186 return err; 1187 } 1188 1189 int 1190 qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, 1191 struct qlcnic_esw_func_cfg *esw_cfg) 1192 { 1193 u32 arg1, arg2; 1194 u8 phy_port; 1195 if (adapter->op_mode == QLCNIC_MGMT_FUNC) 1196 phy_port = adapter->npars[esw_cfg->pci_func].phy_port; 1197 else 1198 phy_port = adapter->physical_port; 1199 arg1 = phy_port; 1200 arg1 |= (esw_cfg->pci_func << 8); 1201 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1202 return -EIO; 1203 1204 esw_cfg->discard_tagged = !!(arg1 & BIT_4); 1205 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5); 1206 esw_cfg->promisc_mode = !!(arg1 & BIT_6); 1207 esw_cfg->mac_override = !!(arg1 & BIT_7); 1208 esw_cfg->vlan_id = LSW(arg1 >> 16); 1209 esw_cfg->mac_anti_spoof = (arg2 & 0x1); 1210 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7); 1211 1212 return 0; 1213 } 1214