1 /* 2 * QLogic qlcnic NIC Driver 3 * Copyright (c) 2009-2013 QLogic Corporation 4 * 5 * See LICENSE.qlcnic for copyright and licensing details. 6 */ 7 8 #include "qlcnic.h" 9 10 static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { 11 {QLCNIC_CMD_CREATE_RX_CTX, 4, 1}, 12 {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1}, 13 {QLCNIC_CMD_CREATE_TX_CTX, 4, 1}, 14 {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1}, 15 {QLCNIC_CMD_INTRPT_TEST, 4, 1}, 16 {QLCNIC_CMD_SET_MTU, 4, 1}, 17 {QLCNIC_CMD_READ_PHY, 4, 2}, 18 {QLCNIC_CMD_WRITE_PHY, 5, 1}, 19 {QLCNIC_CMD_READ_HW_REG, 4, 1}, 20 {QLCNIC_CMD_GET_FLOW_CTL, 4, 2}, 21 {QLCNIC_CMD_SET_FLOW_CTL, 4, 1}, 22 {QLCNIC_CMD_READ_MAX_MTU, 4, 2}, 23 {QLCNIC_CMD_READ_MAX_LRO, 4, 2}, 24 {QLCNIC_CMD_MAC_ADDRESS, 4, 3}, 25 {QLCNIC_CMD_GET_PCI_INFO, 4, 1}, 26 {QLCNIC_CMD_GET_NIC_INFO, 4, 1}, 27 {QLCNIC_CMD_SET_NIC_INFO, 4, 1}, 28 {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3}, 29 {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1}, 30 {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3}, 31 {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1}, 32 {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1}, 33 {QLCNIC_CMD_GET_MAC_STATS, 4, 1}, 34 {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3}, 35 {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1}, 36 {QLCNIC_CMD_CONFIG_PORT, 4, 1}, 37 {QLCNIC_CMD_TEMP_SIZE, 4, 4}, 38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, 39 {QLCNIC_CMD_SET_DRV_VER, 4, 1}, 40 }; 41 42 static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) 43 { 44 return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) | 45 (0xcafe << 16); 46 } 47 48 /* Allocate mailbox registers */ 49 int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, 50 struct qlcnic_adapter *adapter, u32 type) 51 { 52 int i, size; 53 const struct qlcnic_mailbox_metadata *mbx_tbl; 54 55 mbx_tbl = qlcnic_mbx_tbl; 56 size = ARRAY_SIZE(qlcnic_mbx_tbl); 57 for (i = 0; i < size; i++) { 58 if (type == mbx_tbl[i].cmd) { 59 mbx->req.num = mbx_tbl[i].in_args; 60 mbx->rsp.num = mbx_tbl[i].out_args; 61 mbx->req.arg = kcalloc(mbx->req.num, 62 sizeof(u32), GFP_ATOMIC); 63 if (!mbx->req.arg) 64 return -ENOMEM; 65 mbx->rsp.arg = kcalloc(mbx->rsp.num, 66 sizeof(u32), GFP_ATOMIC); 67 if (!mbx->rsp.arg) { 68 kfree(mbx->req.arg); 69 mbx->req.arg = NULL; 70 return -ENOMEM; 71 } 72 memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); 73 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); 74 mbx->req.arg[0] = type; 75 break; 76 } 77 } 78 return 0; 79 } 80 81 /* Free up mailbox registers */ 82 void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd) 83 { 84 kfree(cmd->req.arg); 85 cmd->req.arg = NULL; 86 kfree(cmd->rsp.arg); 87 cmd->rsp.arg = NULL; 88 } 89 90 static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func) 91 { 92 int i; 93 94 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 95 if (adapter->npars[i].pci_func == pci_func) 96 return i; 97 } 98 99 return -1; 100 } 101 102 static u32 103 qlcnic_poll_rsp(struct qlcnic_adapter *adapter) 104 { 105 u32 rsp; 106 int timeout = 0; 107 108 do { 109 /* give atleast 1ms for firmware to respond */ 110 mdelay(1); 111 112 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) 113 return QLCNIC_CDRP_RSP_TIMEOUT; 114 115 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET); 116 } while (!QLCNIC_CDRP_IS_RSP(rsp)); 117 118 return rsp; 119 } 120 121 int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, 122 struct qlcnic_cmd_args *cmd) 123 { 124 int i; 125 u32 rsp; 126 u32 signature; 127 struct pci_dev *pdev = adapter->pdev; 128 struct qlcnic_hardware_context *ahw = adapter->ahw; 129 const char *fmt; 130 131 signature = qlcnic_get_cmd_signature(ahw); 132 133 /* Acquire semaphore before accessing CRB */ 134 if (qlcnic_api_lock(adapter)) { 135 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; 136 return cmd->rsp.arg[0]; 137 } 138 139 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); 140 for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++) 141 QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]); 142 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, 143 QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0])); 144 rsp = qlcnic_poll_rsp(adapter); 145 146 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { 147 dev_err(&pdev->dev, "card response timeout.\n"); 148 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; 149 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { 150 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1)); 151 switch (cmd->rsp.arg[0]) { 152 case QLCNIC_RCODE_INVALID_ARGS: 153 fmt = "CDRP invalid args: [%d]\n"; 154 break; 155 case QLCNIC_RCODE_NOT_SUPPORTED: 156 case QLCNIC_RCODE_NOT_IMPL: 157 fmt = "CDRP command not supported: [%d]\n"; 158 break; 159 case QLCNIC_RCODE_NOT_PERMITTED: 160 fmt = "CDRP requested action not permitted: [%d]\n"; 161 break; 162 case QLCNIC_RCODE_INVALID: 163 fmt = "CDRP invalid or unknown cmd received: [%d]\n"; 164 break; 165 case QLCNIC_RCODE_TIMEOUT: 166 fmt = "CDRP command timeout: [%d]\n"; 167 break; 168 default: 169 fmt = "CDRP command failed: [%d]\n"; 170 break; 171 } 172 dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]); 173 } else if (rsp == QLCNIC_CDRP_RSP_OK) 174 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; 175 176 for (i = 1; i < cmd->rsp.num; i++) 177 cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i)); 178 179 /* Release semaphore */ 180 qlcnic_api_unlock(adapter); 181 return cmd->rsp.arg[0]; 182 } 183 184 int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter) 185 { 186 struct qlcnic_cmd_args cmd; 187 u32 arg1, arg2, arg3; 188 char drv_string[12]; 189 int err = 0; 190 191 memset(drv_string, 0, sizeof(drv_string)); 192 snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d", 193 _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR, 194 _QLCNIC_LINUX_SUBVERSION); 195 196 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_DRV_VER); 197 memcpy(&arg1, drv_string, sizeof(u32)); 198 memcpy(&arg2, drv_string + 4, sizeof(u32)); 199 memcpy(&arg3, drv_string + 8, sizeof(u32)); 200 201 cmd.req.arg[1] = arg1; 202 cmd.req.arg[2] = arg2; 203 cmd.req.arg[3] = arg3; 204 205 err = qlcnic_issue_cmd(adapter, &cmd); 206 if (err) { 207 dev_info(&adapter->pdev->dev, 208 "Failed to set driver version in firmware\n"); 209 return -EIO; 210 } 211 212 return 0; 213 } 214 215 int 216 qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) 217 { 218 int err = 0; 219 struct qlcnic_cmd_args cmd; 220 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 221 222 if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE) 223 return err; 224 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU); 225 cmd.req.arg[1] = recv_ctx->context_id; 226 cmd.req.arg[2] = mtu; 227 228 err = qlcnic_issue_cmd(adapter, &cmd); 229 if (err) { 230 dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); 231 err = -EIO; 232 } 233 qlcnic_free_mbx_args(&cmd); 234 return err; 235 } 236 237 int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) 238 { 239 void *addr; 240 struct qlcnic_hostrq_rx_ctx *prq; 241 struct qlcnic_cardrsp_rx_ctx *prsp; 242 struct qlcnic_hostrq_rds_ring *prq_rds; 243 struct qlcnic_hostrq_sds_ring *prq_sds; 244 struct qlcnic_cardrsp_rds_ring *prsp_rds; 245 struct qlcnic_cardrsp_sds_ring *prsp_sds; 246 struct qlcnic_host_rds_ring *rds_ring; 247 struct qlcnic_host_sds_ring *sds_ring; 248 struct qlcnic_cmd_args cmd; 249 250 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; 251 u64 phys_addr; 252 253 u8 i, nrds_rings, nsds_rings; 254 u16 temp_u16; 255 size_t rq_size, rsp_size; 256 u32 cap, reg, val, reg2; 257 int err; 258 259 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 260 261 nrds_rings = adapter->max_rds_rings; 262 nsds_rings = adapter->max_sds_rings; 263 264 rq_size = 265 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, 266 nsds_rings); 267 rsp_size = 268 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, 269 nsds_rings); 270 271 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 272 &hostrq_phys_addr, GFP_KERNEL); 273 if (addr == NULL) 274 return -ENOMEM; 275 prq = addr; 276 277 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 278 &cardrsp_phys_addr, GFP_KERNEL); 279 if (addr == NULL) { 280 err = -ENOMEM; 281 goto out_free_rq; 282 } 283 prsp = addr; 284 285 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); 286 287 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN 288 | QLCNIC_CAP0_VALIDOFF); 289 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); 290 291 temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler); 292 prq->valid_field_offset = cpu_to_le16(temp_u16); 293 prq->txrx_sds_binding = nsds_rings - 1; 294 295 prq->capabilities[0] = cpu_to_le32(cap); 296 prq->host_int_crb_mode = 297 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); 298 prq->host_rds_crb_mode = 299 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE); 300 301 prq->num_rds_rings = cpu_to_le16(nrds_rings); 302 prq->num_sds_rings = cpu_to_le16(nsds_rings); 303 prq->rds_ring_offset = 0; 304 305 val = le32_to_cpu(prq->rds_ring_offset) + 306 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); 307 prq->sds_ring_offset = cpu_to_le32(val); 308 309 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data + 310 le32_to_cpu(prq->rds_ring_offset)); 311 312 for (i = 0; i < nrds_rings; i++) { 313 314 rds_ring = &recv_ctx->rds_rings[i]; 315 rds_ring->producer = 0; 316 317 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); 318 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); 319 prq_rds[i].ring_kind = cpu_to_le32(i); 320 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); 321 } 322 323 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data + 324 le32_to_cpu(prq->sds_ring_offset)); 325 326 for (i = 0; i < nsds_rings; i++) { 327 328 sds_ring = &recv_ctx->sds_rings[i]; 329 sds_ring->consumer = 0; 330 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); 331 332 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); 333 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); 334 prq_sds[i].msi_index = cpu_to_le16(i); 335 } 336 337 phys_addr = hostrq_phys_addr; 338 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); 339 cmd.req.arg[1] = MSD(phys_addr); 340 cmd.req.arg[2] = LSD(phys_addr); 341 cmd.req.arg[3] = rq_size; 342 err = qlcnic_issue_cmd(adapter, &cmd); 343 if (err) { 344 dev_err(&adapter->pdev->dev, 345 "Failed to create rx ctx in firmware%d\n", err); 346 goto out_free_rsp; 347 } 348 349 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *) 350 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); 351 352 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { 353 rds_ring = &recv_ctx->rds_rings[i]; 354 355 reg = le32_to_cpu(prsp_rds[i].host_producer_crb); 356 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg; 357 } 358 359 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) 360 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); 361 362 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { 363 sds_ring = &recv_ctx->sds_rings[i]; 364 365 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); 366 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); 367 368 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg; 369 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2; 370 } 371 372 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); 373 recv_ctx->context_id = le16_to_cpu(prsp->context_id); 374 recv_ctx->virt_port = prsp->virt_port; 375 376 out_free_rsp: 377 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, 378 cardrsp_phys_addr); 379 qlcnic_free_mbx_args(&cmd); 380 out_free_rq: 381 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); 382 return err; 383 } 384 385 void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter) 386 { 387 int err; 388 struct qlcnic_cmd_args cmd; 389 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 390 391 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX); 392 cmd.req.arg[1] = recv_ctx->context_id; 393 err = qlcnic_issue_cmd(adapter, &cmd); 394 if (err) 395 dev_err(&adapter->pdev->dev, 396 "Failed to destroy rx ctx in firmware\n"); 397 398 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; 399 qlcnic_free_mbx_args(&cmd); 400 } 401 402 int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, 403 struct qlcnic_host_tx_ring *tx_ring, 404 int ring) 405 { 406 struct qlcnic_hostrq_tx_ctx *prq; 407 struct qlcnic_hostrq_cds_ring *prq_cds; 408 struct qlcnic_cardrsp_tx_ctx *prsp; 409 void *rq_addr, *rsp_addr; 410 size_t rq_size, rsp_size; 411 u32 temp; 412 struct qlcnic_cmd_args cmd; 413 int err; 414 u64 phys_addr; 415 dma_addr_t rq_phys_addr, rsp_phys_addr; 416 417 /* reset host resources */ 418 tx_ring->producer = 0; 419 tx_ring->sw_consumer = 0; 420 *(tx_ring->hw_consumer) = 0; 421 422 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 423 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 424 &rq_phys_addr, GFP_KERNEL | __GFP_ZERO); 425 if (!rq_addr) 426 return -ENOMEM; 427 428 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 429 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 430 &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO); 431 if (!rsp_addr) { 432 err = -ENOMEM; 433 goto out_free_rq; 434 } 435 436 prq = rq_addr; 437 438 prsp = rsp_addr; 439 440 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); 441 442 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | 443 QLCNIC_CAP0_LSO); 444 prq->capabilities[0] = cpu_to_le32(temp); 445 446 prq->host_int_crb_mode = 447 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); 448 prq->msi_index = 0; 449 450 prq->interrupt_ctl = 0; 451 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); 452 453 prq_cds = &prq->cds_ring; 454 455 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); 456 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); 457 458 phys_addr = rq_phys_addr; 459 460 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); 461 cmd.req.arg[1] = MSD(phys_addr); 462 cmd.req.arg[2] = LSD(phys_addr); 463 cmd.req.arg[3] = rq_size; 464 err = qlcnic_issue_cmd(adapter, &cmd); 465 466 if (err == QLCNIC_RCODE_SUCCESS) { 467 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 468 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; 469 tx_ring->ctx_id = le16_to_cpu(prsp->context_id); 470 } else { 471 dev_err(&adapter->pdev->dev, 472 "Failed to create tx ctx in firmware%d\n", err); 473 err = -EIO; 474 } 475 476 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, 477 rsp_phys_addr); 478 479 out_free_rq: 480 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); 481 qlcnic_free_mbx_args(&cmd); 482 483 return err; 484 } 485 486 void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter, 487 struct qlcnic_host_tx_ring *tx_ring) 488 { 489 struct qlcnic_cmd_args cmd; 490 491 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); 492 493 cmd.req.arg[1] = tx_ring->ctx_id; 494 if (qlcnic_issue_cmd(adapter, &cmd)) 495 dev_err(&adapter->pdev->dev, 496 "Failed to destroy tx ctx in firmware\n"); 497 qlcnic_free_mbx_args(&cmd); 498 } 499 500 int 501 qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) 502 { 503 int err; 504 struct qlcnic_cmd_args cmd; 505 506 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT); 507 cmd.req.arg[1] = config; 508 err = qlcnic_issue_cmd(adapter, &cmd); 509 qlcnic_free_mbx_args(&cmd); 510 return err; 511 } 512 513 int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) 514 { 515 void *addr; 516 int err, ring; 517 struct qlcnic_recv_context *recv_ctx; 518 struct qlcnic_host_rds_ring *rds_ring; 519 struct qlcnic_host_sds_ring *sds_ring; 520 struct qlcnic_host_tx_ring *tx_ring; 521 __le32 *ptr; 522 523 struct pci_dev *pdev = adapter->pdev; 524 525 recv_ctx = adapter->recv_ctx; 526 527 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 528 tx_ring = &adapter->tx_ring[ring]; 529 ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), 530 &tx_ring->hw_cons_phys_addr, 531 GFP_KERNEL); 532 if (ptr == NULL) 533 return -ENOMEM; 534 535 tx_ring->hw_consumer = ptr; 536 /* cmd desc ring */ 537 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), 538 &tx_ring->phys_addr, 539 GFP_KERNEL); 540 if (addr == NULL) { 541 err = -ENOMEM; 542 goto err_out_free; 543 } 544 545 tx_ring->desc_head = addr; 546 } 547 548 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 549 rds_ring = &recv_ctx->rds_rings[ring]; 550 addr = dma_alloc_coherent(&adapter->pdev->dev, 551 RCV_DESC_RINGSIZE(rds_ring), 552 &rds_ring->phys_addr, GFP_KERNEL); 553 if (addr == NULL) { 554 err = -ENOMEM; 555 goto err_out_free; 556 } 557 rds_ring->desc_head = addr; 558 559 } 560 561 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 562 sds_ring = &recv_ctx->sds_rings[ring]; 563 564 addr = dma_alloc_coherent(&adapter->pdev->dev, 565 STATUS_DESC_RINGSIZE(sds_ring), 566 &sds_ring->phys_addr, GFP_KERNEL); 567 if (addr == NULL) { 568 err = -ENOMEM; 569 goto err_out_free; 570 } 571 sds_ring->desc_head = addr; 572 } 573 574 return 0; 575 576 err_out_free: 577 qlcnic_free_hw_resources(adapter); 578 return err; 579 } 580 581 int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) 582 { 583 int i, err, ring; 584 585 if (dev->flags & QLCNIC_NEED_FLR) { 586 pci_reset_function(dev->pdev); 587 dev->flags &= ~QLCNIC_NEED_FLR; 588 } 589 590 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { 591 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) { 592 err = qlcnic_83xx_config_intrpt(dev, 1); 593 if (err) 594 return err; 595 } 596 } 597 598 err = qlcnic_fw_cmd_create_rx_ctx(dev); 599 if (err) 600 goto err_out; 601 602 for (ring = 0; ring < dev->max_drv_tx_rings; ring++) { 603 err = qlcnic_fw_cmd_create_tx_ctx(dev, 604 &dev->tx_ring[ring], 605 ring); 606 if (err) { 607 qlcnic_fw_cmd_del_rx_ctx(dev); 608 if (ring == 0) 609 goto err_out; 610 611 for (i = 0; i < ring; i++) 612 qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]); 613 614 goto err_out; 615 } 616 } 617 618 set_bit(__QLCNIC_FW_ATTACHED, &dev->state); 619 return 0; 620 621 err_out: 622 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { 623 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 624 qlcnic_83xx_config_intrpt(dev, 0); 625 } 626 return err; 627 } 628 629 void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) 630 { 631 int ring; 632 633 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { 634 qlcnic_fw_cmd_del_rx_ctx(adapter); 635 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) 636 qlcnic_fw_cmd_del_tx_ctx(adapter, 637 &adapter->tx_ring[ring]); 638 639 if (qlcnic_83xx_check(adapter) && 640 (adapter->flags & QLCNIC_MSIX_ENABLED)) { 641 if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 642 qlcnic_83xx_config_intrpt(adapter, 0); 643 } 644 /* Allow dma queues to drain after context reset */ 645 mdelay(20); 646 } 647 } 648 649 void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) 650 { 651 struct qlcnic_recv_context *recv_ctx; 652 struct qlcnic_host_rds_ring *rds_ring; 653 struct qlcnic_host_sds_ring *sds_ring; 654 struct qlcnic_host_tx_ring *tx_ring; 655 int ring; 656 657 recv_ctx = adapter->recv_ctx; 658 659 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 660 tx_ring = &adapter->tx_ring[ring]; 661 if (tx_ring->hw_consumer != NULL) { 662 dma_free_coherent(&adapter->pdev->dev, sizeof(u32), 663 tx_ring->hw_consumer, 664 tx_ring->hw_cons_phys_addr); 665 666 tx_ring->hw_consumer = NULL; 667 } 668 669 if (tx_ring->desc_head != NULL) { 670 dma_free_coherent(&adapter->pdev->dev, 671 TX_DESC_RINGSIZE(tx_ring), 672 tx_ring->desc_head, 673 tx_ring->phys_addr); 674 tx_ring->desc_head = NULL; 675 } 676 } 677 678 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 679 rds_ring = &recv_ctx->rds_rings[ring]; 680 681 if (rds_ring->desc_head != NULL) { 682 dma_free_coherent(&adapter->pdev->dev, 683 RCV_DESC_RINGSIZE(rds_ring), 684 rds_ring->desc_head, 685 rds_ring->phys_addr); 686 rds_ring->desc_head = NULL; 687 } 688 } 689 690 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 691 sds_ring = &recv_ctx->sds_rings[ring]; 692 693 if (sds_ring->desc_head != NULL) { 694 dma_free_coherent(&adapter->pdev->dev, 695 STATUS_DESC_RINGSIZE(sds_ring), 696 sds_ring->desc_head, 697 sds_ring->phys_addr); 698 sds_ring->desc_head = NULL; 699 } 700 } 701 } 702 703 704 int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) 705 { 706 int err, i; 707 struct qlcnic_cmd_args cmd; 708 u32 mac_low, mac_high; 709 710 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); 711 cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8; 712 err = qlcnic_issue_cmd(adapter, &cmd); 713 714 if (err == QLCNIC_RCODE_SUCCESS) { 715 mac_low = cmd.rsp.arg[1]; 716 mac_high = cmd.rsp.arg[2]; 717 718 for (i = 0; i < 2; i++) 719 mac[i] = (u8) (mac_high >> ((1 - i) * 8)); 720 for (i = 2; i < 6; i++) 721 mac[i] = (u8) (mac_low >> ((5 - i) * 8)); 722 } else { 723 dev_err(&adapter->pdev->dev, 724 "Failed to get mac address%d\n", err); 725 err = -EIO; 726 } 727 qlcnic_free_mbx_args(&cmd); 728 return err; 729 } 730 731 /* Get info of a NIC partition */ 732 int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, 733 struct qlcnic_info *npar_info, u8 func_id) 734 { 735 int err; 736 dma_addr_t nic_dma_t; 737 const struct qlcnic_info_le *nic_info; 738 void *nic_info_addr; 739 struct qlcnic_cmd_args cmd; 740 size_t nic_size = sizeof(struct qlcnic_info_le); 741 742 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 743 &nic_dma_t, GFP_KERNEL | __GFP_ZERO); 744 if (!nic_info_addr) 745 return -ENOMEM; 746 747 nic_info = nic_info_addr; 748 749 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); 750 cmd.req.arg[1] = MSD(nic_dma_t); 751 cmd.req.arg[2] = LSD(nic_dma_t); 752 cmd.req.arg[3] = (func_id << 16 | nic_size); 753 err = qlcnic_issue_cmd(adapter, &cmd); 754 if (err != QLCNIC_RCODE_SUCCESS) { 755 dev_err(&adapter->pdev->dev, 756 "Failed to get nic info%d\n", err); 757 err = -EIO; 758 } else { 759 npar_info->pci_func = le16_to_cpu(nic_info->pci_func); 760 npar_info->op_mode = le16_to_cpu(nic_info->op_mode); 761 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); 762 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); 763 npar_info->phys_port = le16_to_cpu(nic_info->phys_port); 764 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); 765 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); 766 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); 767 npar_info->capabilities = le32_to_cpu(nic_info->capabilities); 768 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); 769 } 770 771 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, 772 nic_dma_t); 773 qlcnic_free_mbx_args(&cmd); 774 775 return err; 776 } 777 778 /* Configure a NIC partition */ 779 int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, 780 struct qlcnic_info *nic) 781 { 782 int err = -EIO; 783 dma_addr_t nic_dma_t; 784 void *nic_info_addr; 785 struct qlcnic_cmd_args cmd; 786 struct qlcnic_info_le *nic_info; 787 size_t nic_size = sizeof(struct qlcnic_info_le); 788 789 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 790 return err; 791 792 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 793 &nic_dma_t, GFP_KERNEL | __GFP_ZERO); 794 if (!nic_info_addr) 795 return -ENOMEM; 796 797 nic_info = nic_info_addr; 798 799 nic_info->pci_func = cpu_to_le16(nic->pci_func); 800 nic_info->op_mode = cpu_to_le16(nic->op_mode); 801 nic_info->phys_port = cpu_to_le16(nic->phys_port); 802 nic_info->switch_mode = cpu_to_le16(nic->switch_mode); 803 nic_info->capabilities = cpu_to_le32(nic->capabilities); 804 nic_info->max_mac_filters = nic->max_mac_filters; 805 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); 806 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); 807 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); 808 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); 809 810 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); 811 cmd.req.arg[1] = MSD(nic_dma_t); 812 cmd.req.arg[2] = LSD(nic_dma_t); 813 cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size); 814 err = qlcnic_issue_cmd(adapter, &cmd); 815 816 if (err != QLCNIC_RCODE_SUCCESS) { 817 dev_err(&adapter->pdev->dev, 818 "Failed to set nic info%d\n", err); 819 err = -EIO; 820 } 821 822 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, 823 nic_dma_t); 824 qlcnic_free_mbx_args(&cmd); 825 826 return err; 827 } 828 829 /* Get PCI Info of a partition */ 830 int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, 831 struct qlcnic_pci_info *pci_info) 832 { 833 int err = 0, i; 834 struct qlcnic_cmd_args cmd; 835 dma_addr_t pci_info_dma_t; 836 struct qlcnic_pci_info_le *npar; 837 void *pci_info_addr; 838 size_t npar_size = sizeof(struct qlcnic_pci_info_le); 839 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; 840 841 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, 842 &pci_info_dma_t, 843 GFP_KERNEL | __GFP_ZERO); 844 if (!pci_info_addr) 845 return -ENOMEM; 846 847 npar = pci_info_addr; 848 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); 849 cmd.req.arg[1] = MSD(pci_info_dma_t); 850 cmd.req.arg[2] = LSD(pci_info_dma_t); 851 cmd.req.arg[3] = pci_size; 852 err = qlcnic_issue_cmd(adapter, &cmd); 853 854 adapter->ahw->act_pci_func = 0; 855 if (err == QLCNIC_RCODE_SUCCESS) { 856 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) { 857 pci_info->id = le16_to_cpu(npar->id); 858 pci_info->active = le16_to_cpu(npar->active); 859 pci_info->type = le16_to_cpu(npar->type); 860 if (pci_info->type == QLCNIC_TYPE_NIC) 861 adapter->ahw->act_pci_func++; 862 pci_info->default_port = 863 le16_to_cpu(npar->default_port); 864 pci_info->tx_min_bw = 865 le16_to_cpu(npar->tx_min_bw); 866 pci_info->tx_max_bw = 867 le16_to_cpu(npar->tx_max_bw); 868 memcpy(pci_info->mac, npar->mac, ETH_ALEN); 869 } 870 } else { 871 dev_err(&adapter->pdev->dev, 872 "Failed to get PCI Info%d\n", err); 873 err = -EIO; 874 } 875 876 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, 877 pci_info_dma_t); 878 qlcnic_free_mbx_args(&cmd); 879 880 return err; 881 } 882 883 /* Configure eSwitch for port mirroring */ 884 int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, 885 u8 enable_mirroring, u8 pci_func) 886 { 887 int err = -EIO; 888 u32 arg1; 889 struct qlcnic_cmd_args cmd; 890 891 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC || 892 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) 893 return err; 894 895 arg1 = id | (enable_mirroring ? BIT_4 : 0); 896 arg1 |= pci_func << 8; 897 898 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING); 899 cmd.req.arg[1] = arg1; 900 err = qlcnic_issue_cmd(adapter, &cmd); 901 902 if (err != QLCNIC_RCODE_SUCCESS) 903 dev_err(&adapter->pdev->dev, 904 "Failed to configure port mirroring%d on eswitch:%d\n", 905 pci_func, id); 906 else 907 dev_info(&adapter->pdev->dev, 908 "Configured eSwitch %d for port mirroring:%d\n", 909 id, pci_func); 910 qlcnic_free_mbx_args(&cmd); 911 912 return err; 913 } 914 915 int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, 916 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 917 918 size_t stats_size = sizeof(struct qlcnic_esw_stats_le); 919 struct qlcnic_esw_stats_le *stats; 920 dma_addr_t stats_dma_t; 921 void *stats_addr; 922 u32 arg1; 923 struct qlcnic_cmd_args cmd; 924 int err; 925 926 if (esw_stats == NULL) 927 return -ENOMEM; 928 929 if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) && 930 (func != adapter->ahw->pci_func)) { 931 dev_err(&adapter->pdev->dev, 932 "Not privilege to query stats for func=%d", func); 933 return -EIO; 934 } 935 936 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 937 &stats_dma_t, GFP_KERNEL | __GFP_ZERO); 938 if (!stats_addr) 939 return -ENOMEM; 940 941 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; 942 arg1 |= rx_tx << 15 | stats_size << 16; 943 944 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); 945 cmd.req.arg[1] = arg1; 946 cmd.req.arg[2] = MSD(stats_dma_t); 947 cmd.req.arg[3] = LSD(stats_dma_t); 948 err = qlcnic_issue_cmd(adapter, &cmd); 949 950 if (!err) { 951 stats = stats_addr; 952 esw_stats->context_id = le16_to_cpu(stats->context_id); 953 esw_stats->version = le16_to_cpu(stats->version); 954 esw_stats->size = le16_to_cpu(stats->size); 955 esw_stats->multicast_frames = 956 le64_to_cpu(stats->multicast_frames); 957 esw_stats->broadcast_frames = 958 le64_to_cpu(stats->broadcast_frames); 959 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames); 960 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames); 961 esw_stats->local_frames = le64_to_cpu(stats->local_frames); 962 esw_stats->errors = le64_to_cpu(stats->errors); 963 esw_stats->numbytes = le64_to_cpu(stats->numbytes); 964 } 965 966 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 967 stats_dma_t); 968 qlcnic_free_mbx_args(&cmd); 969 970 return err; 971 } 972 973 /* This routine will retrieve the MAC statistics from firmware */ 974 int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, 975 struct qlcnic_mac_statistics *mac_stats) 976 { 977 struct qlcnic_mac_statistics_le *stats; 978 struct qlcnic_cmd_args cmd; 979 size_t stats_size = sizeof(struct qlcnic_mac_statistics_le); 980 dma_addr_t stats_dma_t; 981 void *stats_addr; 982 int err; 983 984 if (mac_stats == NULL) 985 return -ENOMEM; 986 987 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 988 &stats_dma_t, GFP_KERNEL | __GFP_ZERO); 989 if (!stats_addr) 990 return -ENOMEM; 991 992 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); 993 cmd.req.arg[1] = stats_size << 16; 994 cmd.req.arg[2] = MSD(stats_dma_t); 995 cmd.req.arg[3] = LSD(stats_dma_t); 996 err = qlcnic_issue_cmd(adapter, &cmd); 997 if (!err) { 998 stats = stats_addr; 999 mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames); 1000 mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes); 1001 mac_stats->mac_tx_mcast_pkts = 1002 le64_to_cpu(stats->mac_tx_mcast_pkts); 1003 mac_stats->mac_tx_bcast_pkts = 1004 le64_to_cpu(stats->mac_tx_bcast_pkts); 1005 mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames); 1006 mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes); 1007 mac_stats->mac_rx_mcast_pkts = 1008 le64_to_cpu(stats->mac_rx_mcast_pkts); 1009 mac_stats->mac_rx_length_error = 1010 le64_to_cpu(stats->mac_rx_length_error); 1011 mac_stats->mac_rx_length_small = 1012 le64_to_cpu(stats->mac_rx_length_small); 1013 mac_stats->mac_rx_length_large = 1014 le64_to_cpu(stats->mac_rx_length_large); 1015 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber); 1016 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped); 1017 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error); 1018 } else { 1019 dev_err(&adapter->pdev->dev, 1020 "%s: Get mac stats failed, err=%d.\n", __func__, err); 1021 } 1022 1023 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 1024 stats_dma_t); 1025 1026 qlcnic_free_mbx_args(&cmd); 1027 1028 return err; 1029 } 1030 1031 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, 1032 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 1033 1034 struct __qlcnic_esw_statistics port_stats; 1035 u8 i; 1036 int ret = -EIO; 1037 1038 if (esw_stats == NULL) 1039 return -ENOMEM; 1040 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1041 return -EIO; 1042 if (adapter->npars == NULL) 1043 return -EIO; 1044 1045 memset(esw_stats, 0, sizeof(u64)); 1046 esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL; 1047 esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL; 1048 esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL; 1049 esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL; 1050 esw_stats->errors = QLCNIC_STATS_NOT_AVAIL; 1051 esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL; 1052 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL; 1053 esw_stats->context_id = eswitch; 1054 1055 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 1056 if (adapter->npars[i].phy_port != eswitch) 1057 continue; 1058 1059 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics)); 1060 if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func, 1061 rx_tx, &port_stats)) 1062 continue; 1063 1064 esw_stats->size = port_stats.size; 1065 esw_stats->version = port_stats.version; 1066 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames, 1067 port_stats.unicast_frames); 1068 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames, 1069 port_stats.multicast_frames); 1070 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames, 1071 port_stats.broadcast_frames); 1072 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames, 1073 port_stats.dropped_frames); 1074 QLCNIC_ADD_ESW_STATS(esw_stats->errors, 1075 port_stats.errors); 1076 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames, 1077 port_stats.local_frames); 1078 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes, 1079 port_stats.numbytes); 1080 ret = 0; 1081 } 1082 return ret; 1083 } 1084 1085 int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, 1086 const u8 port, const u8 rx_tx) 1087 { 1088 int err; 1089 u32 arg1; 1090 struct qlcnic_cmd_args cmd; 1091 1092 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1093 return -EIO; 1094 1095 if (func_esw == QLCNIC_STATS_PORT) { 1096 if (port >= QLCNIC_MAX_PCI_FUNC) 1097 goto err_ret; 1098 } else if (func_esw == QLCNIC_STATS_ESWITCH) { 1099 if (port >= QLCNIC_NIU_MAX_XG_PORTS) 1100 goto err_ret; 1101 } else { 1102 goto err_ret; 1103 } 1104 1105 if (rx_tx > QLCNIC_QUERY_TX_COUNTER) 1106 goto err_ret; 1107 1108 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; 1109 arg1 |= BIT_14 | rx_tx << 15; 1110 1111 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); 1112 cmd.req.arg[1] = arg1; 1113 err = qlcnic_issue_cmd(adapter, &cmd); 1114 qlcnic_free_mbx_args(&cmd); 1115 return err; 1116 1117 err_ret: 1118 dev_err(&adapter->pdev->dev, 1119 "Invalid args func_esw %d port %d rx_ctx %d\n", 1120 func_esw, port, rx_tx); 1121 return -EIO; 1122 } 1123 1124 static int 1125 __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, 1126 u32 *arg1, u32 *arg2) 1127 { 1128 int err = -EIO; 1129 struct qlcnic_cmd_args cmd; 1130 u8 pci_func; 1131 pci_func = (*arg1 >> 8); 1132 1133 qlcnic_alloc_mbx_args(&cmd, adapter, 1134 QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG); 1135 cmd.req.arg[1] = *arg1; 1136 err = qlcnic_issue_cmd(adapter, &cmd); 1137 *arg1 = cmd.rsp.arg[1]; 1138 *arg2 = cmd.rsp.arg[2]; 1139 qlcnic_free_mbx_args(&cmd); 1140 1141 if (err == QLCNIC_RCODE_SUCCESS) 1142 dev_info(&adapter->pdev->dev, 1143 "eSwitch port config for pci func %d\n", pci_func); 1144 else 1145 dev_err(&adapter->pdev->dev, 1146 "Failed to get eswitch port config for pci func %d\n", 1147 pci_func); 1148 return err; 1149 } 1150 /* Configure eSwitch port 1151 op_mode = 0 for setting default port behavior 1152 op_mode = 1 for setting vlan id 1153 op_mode = 2 for deleting vlan id 1154 op_type = 0 for vlan_id 1155 op_type = 1 for port vlan_id 1156 */ 1157 int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, 1158 struct qlcnic_esw_func_cfg *esw_cfg) 1159 { 1160 int err = -EIO, index; 1161 u32 arg1, arg2 = 0; 1162 struct qlcnic_cmd_args cmd; 1163 u8 pci_func; 1164 1165 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1166 return err; 1167 pci_func = esw_cfg->pci_func; 1168 index = qlcnic_is_valid_nic_func(adapter, pci_func); 1169 if (index < 0) 1170 return err; 1171 arg1 = (adapter->npars[index].phy_port & BIT_0); 1172 arg1 |= (pci_func << 8); 1173 1174 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1175 return err; 1176 arg1 &= ~(0x0ff << 8); 1177 arg1 |= (pci_func << 8); 1178 arg1 &= ~(BIT_2 | BIT_3); 1179 switch (esw_cfg->op_mode) { 1180 case QLCNIC_PORT_DEFAULTS: 1181 arg1 |= (BIT_4 | BIT_6 | BIT_7); 1182 arg2 |= (BIT_0 | BIT_1); 1183 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) 1184 arg2 |= (BIT_2 | BIT_3); 1185 if (!(esw_cfg->discard_tagged)) 1186 arg1 &= ~BIT_4; 1187 if (!(esw_cfg->promisc_mode)) 1188 arg1 &= ~BIT_6; 1189 if (!(esw_cfg->mac_override)) 1190 arg1 &= ~BIT_7; 1191 if (!(esw_cfg->mac_anti_spoof)) 1192 arg2 &= ~BIT_0; 1193 if (!(esw_cfg->offload_flags & BIT_0)) 1194 arg2 &= ~(BIT_1 | BIT_2 | BIT_3); 1195 if (!(esw_cfg->offload_flags & BIT_1)) 1196 arg2 &= ~BIT_2; 1197 if (!(esw_cfg->offload_flags & BIT_2)) 1198 arg2 &= ~BIT_3; 1199 break; 1200 case QLCNIC_ADD_VLAN: 1201 arg1 |= (BIT_2 | BIT_5); 1202 arg1 |= (esw_cfg->vlan_id << 16); 1203 break; 1204 case QLCNIC_DEL_VLAN: 1205 arg1 |= (BIT_3 | BIT_5); 1206 arg1 &= ~(0x0ffff << 16); 1207 break; 1208 default: 1209 return err; 1210 } 1211 1212 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH); 1213 cmd.req.arg[1] = arg1; 1214 cmd.req.arg[2] = arg2; 1215 err = qlcnic_issue_cmd(adapter, &cmd); 1216 qlcnic_free_mbx_args(&cmd); 1217 1218 if (err != QLCNIC_RCODE_SUCCESS) 1219 dev_err(&adapter->pdev->dev, 1220 "Failed to configure eswitch pci func %d\n", pci_func); 1221 else 1222 dev_info(&adapter->pdev->dev, 1223 "Configured eSwitch for pci func %d\n", pci_func); 1224 1225 return err; 1226 } 1227 1228 int 1229 qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, 1230 struct qlcnic_esw_func_cfg *esw_cfg) 1231 { 1232 u32 arg1, arg2; 1233 int index; 1234 u8 phy_port; 1235 1236 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) { 1237 index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func); 1238 if (index < 0) 1239 return -EIO; 1240 phy_port = adapter->npars[index].phy_port; 1241 } else { 1242 phy_port = adapter->ahw->physical_port; 1243 } 1244 arg1 = phy_port; 1245 arg1 |= (esw_cfg->pci_func << 8); 1246 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1247 return -EIO; 1248 1249 esw_cfg->discard_tagged = !!(arg1 & BIT_4); 1250 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5); 1251 esw_cfg->promisc_mode = !!(arg1 & BIT_6); 1252 esw_cfg->mac_override = !!(arg1 & BIT_7); 1253 esw_cfg->vlan_id = LSW(arg1 >> 16); 1254 esw_cfg->mac_anti_spoof = (arg2 & 0x1); 1255 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7); 1256 1257 return 0; 1258 } 1259